Restructure Vulkan bootstrapping (#10721) 9c66afdd2a This redoes the Vulkan bootstrapping used by our test apps to use our own internal bootstrapping (rather than VkBootstrap), as well as fixes swapchain issues with a subtly incorrect use of fences. Co-authored-by: Josh Jersild <joshua@rive.app>
diff --git a/.rive_head b/.rive_head index 4845ed7..2338ff4 100644 --- a/.rive_head +++ b/.rive_head
@@ -1 +1 @@ -36a2a4b4db1a2cfd0612955697b21d8c7e440217 +9c66afdd2a7e9ee1a08b2da8da4679cfa3e4545d
diff --git a/renderer/path_fiddle/fiddle_context_vulkan.cpp b/renderer/path_fiddle/fiddle_context_vulkan.cpp index 40bf222..ceda390 100644 --- a/renderer/path_fiddle/fiddle_context_vulkan.cpp +++ b/renderer/path_fiddle/fiddle_context_vulkan.cpp
@@ -14,7 +14,9 @@ #else -#include "rive_vk_bootstrap/rive_vk_bootstrap.hpp" +#include "rive_vk_bootstrap/vulkan_device.hpp" +#include "rive_vk_bootstrap/vulkan_instance.hpp" +#include "rive_vk_bootstrap/vulkan_swapchain.hpp" #include "rive/renderer/rive_renderer.hpp" #include "rive/renderer/vulkan/render_context_vulkan_impl.hpp" #include "rive/renderer/vulkan/render_target_vulkan.hpp" @@ -22,8 +24,6 @@ #include <GLFW/glfw3.h> #include <GLFW/glfw3native.h> #include <vulkan/vulkan.h> -#include <vulkan/vulkan_beta.h> -#include <vk_mem_alloc.h> using namespace rive; using namespace rive::gpu; @@ -33,103 +33,40 @@ public: FiddleContextVulkanPLS(FiddleContextOptions options) : m_options(options) { - rive_vkb::load_vulkan(); + using namespace rive_vkb; uint32_t glfwExtensionCount = 0; const char** glfwExtensions; glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount); - int minorVersionRequested = options.coreFeaturesOnly ? 0 : 3; - bool enableVulkanValidationLayers = - m_options.enableVulkanValidationLayers; - bool disableDebugCallbacks = m_options.disableDebugCallbacks; - - while (true) - { - vkb::InstanceBuilder instanceBuilder; - instanceBuilder.set_app_name("path_fiddle") - .set_engine_name("Rive Renderer") - .enable_extensions(glfwExtensionCount, glfwExtensions) - .require_api_version(1, minorVersionRequested, 0) - .set_minimum_instance_version(1, 0, 0); -#ifdef DEBUG - instanceBuilder.enable_validation_layers( - enableVulkanValidationLayers); - if (!disableDebugCallbacks) - { - instanceBuilder.set_debug_callback( - rive_vkb::default_debug_callback); - } + m_instance = std::make_unique<VulkanInstance>(VulkanInstance::Options{ + .appName = "path_fiddle", + .idealAPIVersion = options.coreFeaturesOnly ? VK_API_VERSION_1_0 + : VK_API_VERSION_1_3, + .requiredExtensions = make_span(glfwExtensions, glfwExtensionCount), +#ifndef NDEBUG + .wantValidationLayers = options.enableVulkanValidationLayers, + .wantDebugCallbacks = !options.disableDebugCallbacks, #endif + }); - auto instanceResult = instanceBuilder.build(); - if (!instanceResult) - { - auto error = static_cast<vkb::InstanceError>( - instanceResult.error().value()); + m_vkDestroySurfaceKHR = + m_instance->loadInstanceFunc<PFN_vkDestroySurfaceKHR>( + "vkDestroySurfaceKHR"); + assert(m_vkDestroySurfaceKHR != nullptr); - if (error == - vkb::InstanceError::vulkan_version_1_1_unavailable && - minorVersionRequested != 0) - { - // There's a bug in VkBootstrap (due to not properly - // handling Vulkan 1.0 not having the - // vkEnumerateInstanceVersion function) where it can give a - // vulkan_version_1_1_unavailable error even though we've - // specified a minimum of 1.0. If we get that error, - // request 1.0 directly and try again. - fprintf(stderr, "Falling back on Vulkan 1.0.\n"); - minorVersionRequested = 0; - continue; - } + m_device = std::make_unique<VulkanDevice>( + *m_instance, + VulkanDevice::Options{ + .coreFeaturesOnly = options.coreFeaturesOnly, + }); -#ifdef DEBUG - if (enableVulkanValidationLayers && - error == vkb::InstanceError::requested_layers_not_present) - { - fprintf(stderr, - "WARNING: Validation layers not found. Attempting " - "to create a Vulkan context again without " - "validation layers.\n"); - enableVulkanValidationLayers = false; - continue; - } - - if (!disableDebugCallbacks && - error == vkb::InstanceError::failed_create_debug_messenger) - { - fprintf(stderr, - "WARNING: Debug callbacks not supported. " - "Attempting to create a Vulkan context again " - "without debug callbacks."); - disableDebugCallbacks = true; - continue; - } -#endif - fprintf(stderr, - "ERROR: %s: Failed to build Vulkan instance.", - instanceResult.error().message().c_str()); - abort(); - } - - m_instance = *instanceResult; - break; - } - m_instanceDispatchTable = m_instance.make_table(); - - VulkanFeatures vulkanFeatures; - std::tie(m_device, vulkanFeatures) = rive_vkb::select_device( - vkb::PhysicalDeviceSelector(m_instance) - .defer_surface_initialization(), - m_options.coreFeaturesOnly ? rive_vkb::FeatureSet::coreOnly - : rive_vkb::FeatureSet::allAvailable, - m_options.gpuNameFilter); m_renderContext = RenderContextVulkanImpl::MakeContext( - m_instance, - m_device.physical_device, - m_device, - vulkanFeatures, - m_instance.fp_vkGetInstanceProcAddr, + m_instance->vkInstance(), + m_device->vkPhysicalDevice(), + m_device->vkDevice(), + m_device->vulkanFeatures(), + m_instance->getVkGetInstanceProcAddrPtr(), { .forceAtomicMode = options.disableRasterOrdering, .shaderCompilationMode = m_options.shaderCompilationMode, @@ -147,11 +84,10 @@ if (m_windowSurface != VK_NULL_HANDLE) { - m_instanceDispatchTable.destroySurfaceKHR(m_windowSurface, nullptr); + m_vkDestroySurfaceKHR(m_instance->vkInstance(), + m_windowSurface, + nullptr); } - - vkb::destroy_device(m_device); - vkb::destroy_instance(m_instance); } float dpiScale(GLFWwindow* window) const final @@ -194,66 +130,79 @@ if (m_windowSurface != VK_NULL_HANDLE) { - m_instanceDispatchTable.destroySurfaceKHR(m_windowSurface, nullptr); + m_vkDestroySurfaceKHR(m_instance->vkInstance(), + m_windowSurface, + nullptr); } - VK_CHECK(glfwCreateWindowSurface(m_instance, + VK_CHECK(glfwCreateWindowSurface(m_instance->vkInstance(), window, nullptr, &m_windowSurface)); - VkSurfaceCapabilitiesKHR windowCapabilities; - VK_CHECK(m_instanceDispatchTable - .fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR( - m_device.physical_device, - m_windowSurface, - &windowCapabilities)); + auto vkGetPhysicalDeviceSurfaceCapabilitiesKHR = + m_instance->loadInstanceFunc< + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR>( + "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); + VkSurfaceCapabilitiesKHR windowCapabilities{}; + VK_CHECK(vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + m_device->vkPhysicalDevice(), + m_windowSurface, + &windowCapabilities)); - vkb::SwapchainBuilder swapchainBuilder(m_device, m_windowSurface); - swapchainBuilder - .set_desired_format({ - // Swap the target format in "vkcore" mode, just for fun so we - // test both configurations. - .format = m_options.srgb ? VK_FORMAT_R8G8B8A8_SRGB - : m_options.coreFeaturesOnly - ? VK_FORMAT_R8G8B8A8_UNORM - : VK_FORMAT_B8G8R8A8_UNORM, - .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, - }) - .add_fallback_format({ - .format = m_options.coreFeaturesOnly ? VK_FORMAT_B8G8R8A8_UNORM - : VK_FORMAT_R8G8B8A8_UNORM, - .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, - }) - .set_desired_present_mode(VK_PRESENT_MODE_IMMEDIATE_KHR) - .add_fallback_present_mode(VK_PRESENT_MODE_MAILBOX_KHR) - .add_fallback_present_mode(VK_PRESENT_MODE_FIFO_RELAXED_KHR) - .add_fallback_present_mode(VK_PRESENT_MODE_FIFO_KHR); + auto swapOpts = rive_vkb::VulkanSwapchain::Options{ + .formatPreferences = + { + { + .format = m_options.srgb ? VK_FORMAT_R8G8B8A8_SRGB + : m_options.coreFeaturesOnly + ? VK_FORMAT_R8G8B8A8_UNORM + : VK_FORMAT_B8G8R8A8_UNORM, + .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + }, + + // Fall back to either ordering of ARGB + { + .format = VK_FORMAT_R8G8B8A8_UNORM, + .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + }, + { + .format = VK_FORMAT_B8G8R8A8_UNORM, + .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + }, + }, + .presentModePreferences = + { + VK_PRESENT_MODE_IMMEDIATE_KHR, + VK_PRESENT_MODE_MAILBOX_KHR, + VK_PRESENT_MODE_FIFO_RELAXED_KHR, + VK_PRESENT_MODE_FIFO_KHR, + }, + .initialFrameNumber = currentFrameNumber, + }; + if (!m_options.coreFeaturesOnly && (windowCapabilities.supportedUsageFlags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) { - swapchainBuilder.add_image_usage_flags( - VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT); + swapOpts.imageUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; if (m_options.enableReadPixels) { - swapchainBuilder.add_image_usage_flags( - VK_IMAGE_USAGE_TRANSFER_SRC_BIT); + swapOpts.imageUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; } } else { - swapchainBuilder - .add_image_usage_flags(VK_IMAGE_USAGE_TRANSFER_SRC_BIT) - .add_image_usage_flags(VK_IMAGE_USAGE_TRANSFER_DST_BIT); + swapOpts.imageUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_TRANSFER_DST_BIT; } - m_swapchain = std::make_unique<rive_vkb::Swapchain>( - m_device, - ref_rcp(vk()), - width, - height, - VKB_CHECK(swapchainBuilder.build()), - currentFrameNumber); + + m_swapchain = + std::make_unique<rive_vkb::VulkanSwapchain>(*m_instance, + *m_device, + ref_rcp(vk()), + m_windowSurface, + swapOpts); m_renderTarget = renderContextVulkanImpl()->makeRenderTarget( width, @@ -266,7 +215,7 @@ void hotloadShaders() final { - m_swapchain->dispatchTable().deviceWaitIdle(); + m_device->waitUntilIdle(); rive::Span<const uint32_t> newShaderBytecodeData = loadNewShaderFileData(); if (newShaderBytecodeData.size() > 0) @@ -282,35 +231,46 @@ void begin(const RenderContext::FrameDescriptor& frameDescriptor) final { - m_renderContext->beginFrame(std::move(frameDescriptor)); + m_renderContext->beginFrame(frameDescriptor); } void flushPLSContext(RenderTarget* offscreenRenderTarget) final { - const rive_vkb::SwapchainImage* swapchainImage = - m_swapchain->currentImage(); - if (swapchainImage == nullptr) + if (!m_swapchain->isFrameStarted()) { - swapchainImage = m_swapchain->acquireNextImage(); - m_renderTarget->setTargetImageView(swapchainImage->imageView, - swapchainImage->image, - swapchainImage->imageLastAccess); + m_swapchain->beginFrame(); + + m_renderTarget->setTargetImageView( + m_swapchain->currentVkImageView(), + m_swapchain->currentVkImage(), + m_swapchain->currentLastAccess()); } m_renderContext->flush({ .renderTarget = offscreenRenderTarget != nullptr ? offscreenRenderTarget : m_renderTarget.get(), - .externalCommandBuffer = swapchainImage->commandBuffer, - .currentFrameNumber = swapchainImage->currentFrameNumber, - .safeFrameNumber = swapchainImage->safeFrameNumber, + .externalCommandBuffer = m_swapchain->currentCommandBuffer(), + .currentFrameNumber = m_swapchain->currentFrameNumber(), + .safeFrameNumber = m_swapchain->safeFrameNumber(), }); } - void end(GLFWwindow* window, std::vector<uint8_t>* pixelData) final + void end(GLFWwindow*, std::vector<uint8_t>* pixelData) final { flushPLSContext(nullptr); - m_swapchain->submit(m_renderTarget->targetLastAccess(), pixelData); + + auto lastAccess = m_renderTarget->targetLastAccess(); + if (pixelData != nullptr) + { + m_swapchain->queueImageCopy(&lastAccess); + } + m_swapchain->endFrame(lastAccess); + + if (pixelData != nullptr) + { + m_swapchain->getPixelsFromLastImageCopy(pixelData); + } } private: @@ -320,15 +280,16 @@ } const FiddleContextOptions m_options; - vkb::Instance m_instance; - vkb::InstanceDispatchTable m_instanceDispatchTable; - vkb::Device m_device; + std::unique_ptr<rive_vkb::VulkanInstance> m_instance; + std::unique_ptr<rive_vkb::VulkanDevice> m_device; + std::unique_ptr<rive_vkb::VulkanSwapchain> m_swapchain; VkSurfaceKHR m_windowSurface = VK_NULL_HANDLE; - std::unique_ptr<rive_vkb::Swapchain> m_swapchain; std::unique_ptr<RenderContext> m_renderContext; rcp<RenderTargetVulkanImpl> m_renderTarget; + + PFN_vkDestroySurfaceKHR m_vkDestroySurfaceKHR = nullptr; }; std::unique_ptr<FiddleContext> FiddleContext::MakeVulkanPLS(
diff --git a/renderer/premake5_pls_renderer.lua b/renderer/premake5_pls_renderer.lua index 7778666..c735925 100644 --- a/renderer/premake5_pls_renderer.lua +++ b/renderer/premake5_pls_renderer.lua
@@ -195,7 +195,7 @@ externalincludedirs({'glad/include'}) fatalwarnings({ 'All' }) - files({ 'src/*.cpp', 'src/shaders/*.glsl', 'include/**.hpp', 'include/**.h' }) + files({ 'src/*.cpp', 'src/*.hpp', 'src/*.h', 'src/shaders/*.glsl', 'include/**.hpp', 'include/**.h' }) if _OPTIONS['with_optick'] then
diff --git a/renderer/rive_vk_bootstrap/bootstrap_project.lua b/renderer/rive_vk_bootstrap/bootstrap_project.lua index 18497b8..30cb7e7 100644 --- a/renderer/rive_vk_bootstrap/bootstrap_project.lua +++ b/renderer/rive_vk_bootstrap/bootstrap_project.lua
@@ -8,17 +8,16 @@ end local dependency = require('dependency') -vk_bootstrap = dependency.github('charles-lunarg/vk-bootstrap', 'v1.4.307') includedirs({ 'include' }) externalincludedirs({ vulkan_headers .. '/include', vulkan_memory_allocator .. '/include', - vk_bootstrap .. '/src', }) files({ - 'rive_vk_bootstrap.cpp', - vk_bootstrap .. '/src/VkBootstrap.cpp', + 'include/**.hpp', + 'src/*.cpp', + 'src/*.hpp', })
diff --git a/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp deleted file mode 100644 index ab7bd88..0000000 --- a/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp +++ /dev/null
@@ -1,144 +0,0 @@ -/* - * Copyright 2024 Rive - */ - -#include <VkBootstrap.h> -#include "rive/renderer/vulkan/vulkan_context.hpp" - -namespace rive_vkb -{ -template <typename T> -T vkb_check(vkb::Result<T> result, const char* code, int line, const char* file) -{ - if (!result) - { - fprintf(stderr, - "%s:%u: Error: %s: %s\n", - file, - line, - code, - result.error().message().c_str()); - abort(); - } - return *result; -} - -#define VKB_CHECK(RESULT) \ - ::rive_vkb::vkb_check(RESULT, #RESULT, __LINE__, __FILE__) - -vkb::SystemInfo load_vulkan(); - -#ifdef DEBUG -VKAPI_ATTR VkBool32 VKAPI_CALL -default_debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT, - VkDebugUtilsMessageTypeFlagsEXT, - const VkDebugUtilsMessengerCallbackDataEXT*, - void* pUserData); -#endif - -enum class FeatureSet -{ - coreOnly, - allAvailable, -}; - -// Select a GPU, using 'gpuNameFilter' or 'getenv("RIVE_GPU")', otherwise -// preferring discrete. Abort if the filter matches more than one name. -std::tuple<vkb::Device, rive::gpu::VulkanFeatures> select_device( - vkb::PhysicalDeviceSelector& selector, - FeatureSet, - const char* gpuNameFilter = nullptr); - -inline std::tuple<vkb::Device, rive::gpu::VulkanFeatures> select_device( - vkb::Instance instance, - FeatureSet featureSet, - const char* gpuNameFilter = nullptr) -{ - vkb::PhysicalDeviceSelector selector(instance); - return select_device(selector, featureSet, gpuNameFilter); -} - -struct SwapchainImage -{ - VkImage image; - VkImageView imageView; - rive::gpu::vkutil::ImageAccess imageLastAccess; - VkFence fence; - VkSemaphore frameBeginSemaphore; - VkSemaphore frameCompleteSemaphore; - VkCommandBuffer commandBuffer; - // Resource lifetime counters. Resources last used on or before - // 'safeFrameNumber' are safe to be released or recycled. - uint64_t currentFrameNumber = 0; - uint64_t safeFrameNumber = 0; -}; - -class Swapchain -{ -public: - // Vulkan native swapchain. - Swapchain(const vkb::Device&, - rive::rcp<rive::gpu::VulkanContext>, - uint32_t width, - uint32_t height, - vkb::Swapchain&&, - uint64_t currentFrameNumber = 0); - - // Offscreen texture. - Swapchain(const vkb::Device&, - rive::rcp<rive::gpu::VulkanContext>, - uint32_t width, - uint32_t height, - VkFormat imageFormat, - VkImageUsageFlags additionalUsageFlags = - VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, - uint64_t currentFrameNumber = 0); - - ~Swapchain(); - - uint32_t width() const { return m_width; } - uint32_t height() const { return m_height; } - VkFormat imageFormat() const { return m_imageFormat; } - VkImageUsageFlags imageUsageFlags() const { return m_imageUsageFlags; } - const vkb::DispatchTable& dispatchTable() const { return m_dispatchTable; } - uint64_t currentFrameNumber() const { return m_currentFrameNumber; } - - const SwapchainImage* acquireNextImage(); - const SwapchainImage* currentImage() const - { - return m_currentImageIndex == INVALID_IMAGE_INDEX - ? nullptr - : &m_swapchainImages[m_currentImageIndex]; - } - - // Submits and presents the current swapchain image. - // 'lastAccess' lets us know know how to barrier the swapchain image. - // 'pixelData', if not null, reads the swapchain image being presented. - void submit(rive::gpu::vkutil::ImageAccess lastAccess, - std::vector<uint8_t>* pixelData = nullptr, - rive::IAABB pixelReadBounds = {}, - rive::gpu::vkutil::Texture2D* pixelReadTexture = nullptr); - -private: - constexpr static uint32_t INVALID_IMAGE_INDEX = -1; - - void init(const vkb::Device&, const std::vector<VkImage>& images); - - const vkb::DispatchTable m_dispatchTable; - const VkQueue m_queue; - const rive::rcp<rive::gpu::VulkanContext> m_vk; - const uint32_t m_width; - const uint32_t m_height; - const VkFormat m_imageFormat; - const VkImageUsageFlags m_imageUsageFlags; - const rive::rcp<rive::gpu::vkutil::Image> m_offscreenImage; - vkb::Swapchain m_vkbSwapchain; - VkCommandPool m_commandPool; - VkSemaphore m_nextAcquireSemaphore; - std::vector<SwapchainImage> m_swapchainImages; - uint32_t m_currentImageIndex = INVALID_IMAGE_INDEX; - uint64_t m_currentFrameNumber = 0; - rive::rcp<rive::gpu::vkutil::Buffer> m_pixelReadBuffer; -}; - -} // namespace rive_vkb
diff --git a/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_device.hpp b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_device.hpp new file mode 100644 index 0000000..3427106 --- /dev/null +++ b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_device.hpp
@@ -0,0 +1,104 @@ +/* + * Copyright 2025 Rive + */ + +#include <optional> +#include <string> +#include <vector> +#include <vulkan/vulkan.h> + +#include "rive/renderer/vulkan/vulkan_context.hpp" + +namespace rive_vkb +{ +class VulkanInstance; + +class VulkanDevice +{ +public: + struct Options + { + bool coreFeaturesOnly = false; + const char* gpuNameFilter = nullptr; + bool headless = false; + + // If this is set to a valid surface (and not a headless device), device + // discovery will test for present compatibility to this surface + VkSurfaceKHR presentationSurfaceForDeviceSelection = VK_NULL_HANDLE; + }; + + VulkanDevice(VulkanInstance&, const Options&); + ~VulkanDevice(); + + VulkanDevice(const VulkanDevice&) = delete; + VulkanDevice& operator=(const VulkanDevice&) = delete; + + template <typename T> T loadDeviceFunc(const char* name) const + { + return reinterpret_cast<T>(m_vkGetDeviceProcAddr(m_device, name)); + } + + VkDevice vkDevice() const { return m_device; } + + VkPhysicalDevice vkPhysicalDevice() const { return m_physicalDevice; } + + std::vector<VkSurfaceFormatKHR> getSurfaceFormats(VkSurfaceKHR surface); + std::vector<VkPresentModeKHR> getSurfacePresentModes(VkSurfaceKHR surface); + + VkSurfaceCapabilitiesKHR getSurfaceCapabilities(VkSurfaceKHR) const; + + rive::gpu::VulkanFeatures vulkanFeatures() const + { + return m_riveVulkanFeatures; + } + + void waitUntilIdle() const; + + uint32_t graphicsQueueFamilyIndex() const + { + return m_graphicsQueueFamilyIndex; + } + +private: + struct FindDeviceResult + { + VkPhysicalDevice physicalDevice; + std::string deviceName; + VkPhysicalDeviceType deviceType; + }; + + FindDeviceResult findCompatiblePhysicalDevice( + VulkanInstance&, + const char* nameFilter, + VkSurfaceKHR optionalSurfaceForValidation); + + std::optional<VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT> + tryEnableRasterOrderFeatures( + VulkanInstance&, + const std::vector<VkExtensionProperties>& supportedExtensions, + std::vector<const char*>& extensions); + + bool addExtensionIfSupported( + const char* name, + const std::vector<VkExtensionProperties>& supportedExtensions, + std::vector<const char*>& extensions); + + std::vector<VkQueueFamilyProperties> m_queueFamilyProperties; + + VkPhysicalDevice m_physicalDevice; + VkDevice m_device; + PFN_vkGetDeviceProcAddr m_vkGetDeviceProcAddr; + PFN_vkGetDeviceQueue m_vkGetDeviceQueue; + PFN_vkDestroyDevice m_vkDestroyDevice; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR + m_vkGetPhysicalDeviceSurfaceCapabilitiesKHR; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR + m_vkGetPhysicalDeviceSurfaceFormatsKHR; + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR + m_vkGetPhysicalDeviceSurfacePresentModesKHR; + PFN_vkDeviceWaitIdle m_vkDeviceWaitIdle; + + rive::gpu::VulkanFeatures m_riveVulkanFeatures; + uint32_t m_graphicsQueueFamilyIndex; +}; +} // namespace rive_vkb
diff --git a/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_frame_synchronizer.hpp b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_frame_synchronizer.hpp new file mode 100644 index 0000000..e63f046 --- /dev/null +++ b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_frame_synchronizer.hpp
@@ -0,0 +1,144 @@ +/* + * Copyright 2025 Rive + */ + +#include <vulkan/vulkan.h> +#include <vector> + +#include "rive/renderer/vulkan/vulkan_context.hpp" + +namespace rive_vkb +{ +class VulkanDevice; +class VulkanInstance; + +class VulkanFrameSynchronizer +{ +public: + ~VulkanFrameSynchronizer(); + + VulkanFrameSynchronizer(const VulkanFrameSynchronizer&) = delete; + VulkanFrameSynchronizer& operator=(const VulkanFrameSynchronizer&) = delete; + VkCommandBuffer currentCommandBuffer() const + { + return current().commandBuffer; + } + uint64_t safeFrameNumber() const { return current().safeFrameNumber; } + uint64_t currentFrameNumber() const { return m_monotonicFrameNumber; } + + // Queue a copy of the specified image with optional bounds. Must be done + // before endFrame. + void queueImageCopy(VkImage, + VkFormat, + rive::gpu::vkutil::ImageAccess* inOutLastAccess, + rive::IAABB pixelReadBounds); + + // This gets the pixels from the last image copy requested. Must be called + // after the endFrame of the frame the request occurred during. + void getPixelsFromLastImageCopy(std::vector<uint8_t>* outPixels); + +protected: + struct Options + { + uint64_t initialFrameNumber = 0; + + // In effectively all cases it's best to have 2 in-flight frames (rather + // than one per swapchain image), to keep latency down. + uint32_t inFlightFrameCount = 2; + + // This should be true for swapchains, since the acquire/present calls + // need to use semaphores + bool externalGPUSynchronization = false; + }; + + struct InFlightFrame + { + VkFence fence = VK_NULL_HANDLE; + VkCommandBuffer commandBuffer = VK_NULL_HANDLE; + VkSemaphore frameEndSemaphore = VK_NULL_HANDLE; + VkSemaphore externallySignaledSemaphore = VK_NULL_HANDLE; + uint64_t safeFrameNumber = 0; + }; + + VulkanFrameSynchronizer(VulkanInstance&, + VulkanDevice&, + rive::rcp<rive::gpu::VulkanContext>, + const Options&); + + VkDevice vkDevice() const { return m_device; } + + VkSemaphore waitForFenceAndBeginFrame(); + VkSemaphore endFrame(); + + const InFlightFrame& current() const + { + return m_inFlightFrames[m_renderFrameIndex]; + } + InFlightFrame& current() { return m_inFlightFrames[m_renderFrameIndex]; } + + // Get the previous in-flight frame + InFlightFrame& prev() + { + return m_inFlightFrames[(m_renderFrameIndex + m_inFlightFrames.size() - + 1) % + m_inFlightFrames.size()]; + } + + rive::gpu::VulkanContext* context() const { return m_vk.get(); } + + VkQueue graphicsQueue() const { return m_graphicsQueue; } + +private: + rive::rcp<rive::gpu::VulkanContext> m_vk; + VkDevice m_device; + + enum class PixelReadState + { + None, + Queued, + Ready, + }; + + rive::rcp<rive::gpu::vkutil::Buffer> m_pixelReadBuffer; + uint32_t m_pixelReadWidth; + uint32_t m_pixelReadHeight; + VkFormat m_pixelReadFormat; + PixelReadState m_pixelReadState = PixelReadState::None; + + VkQueue m_graphicsQueue; + VkCommandPool m_commandPool; + + uint64_t m_monotonicFrameNumber = 0; + uint32_t m_renderFrameIndex = 0; + + // Best practice, regardless of how many swapchain images there are, is to + // have exactly two sets of frame objects, one for the previously-completed + // frame, and one for the currently-building frame. + std::vector<InFlightFrame> m_inFlightFrames; + + // These are all the commands the swapchain needs to do its work - this + // macro is also used to load them in the .cpp +#define RIVE_VK_FRAME_SYNC_INSTANCE_COMMANDS(F) \ + F(vkCreateCommandPool) \ + F(vkDestroyCommandPool) \ + F(vkCreateFence) \ + F(vkDestroyFence) \ + F(vkCreateSemaphore) \ + F(vkDestroySemaphore) \ + F(vkAllocateCommandBuffers) \ + F(vkFreeCommandBuffers) \ + F(vkWaitForFences) \ + F(vkCmdPipelineBarrier) \ + F(vkQueueSubmit) \ + F(vkGetDeviceQueue) \ + F(vkResetFences) \ + F(vkResetCommandBuffer) \ + F(vkBeginCommandBuffer) \ + F(vkEndCommandBuffer) \ + F(vkCmdCopyImageToBuffer) + +#define DECLARE_VULKAN_COMMAND(name) PFN_##name m_##name = nullptr; + RIVE_VK_FRAME_SYNC_INSTANCE_COMMANDS(DECLARE_VULKAN_COMMAND) +#undef DECLARE_VULKAN_COMMAND +}; +} // namespace rive_vkb
diff --git a/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_headless_frame_synchronizer.hpp b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_headless_frame_synchronizer.hpp new file mode 100644 index 0000000..ce8237b --- /dev/null +++ b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_headless_frame_synchronizer.hpp
@@ -0,0 +1,89 @@ +/* + * Copyright 2025 Rive + */ + +#include <vulkan/vulkan.h> +#include <vector> + +#include "rive/renderer/vulkan/vulkan_context.hpp" +#include "rive_vk_bootstrap/vulkan_frame_synchronizer.hpp" + +namespace rive_vkb +{ +class VulkanDevice; +class VulkanInstance; + +// This is similar to a swapchain, but instead renders to an offscreen image. +class VulkanHeadlessFrameSynchronizer : public VulkanFrameSynchronizer +{ + using Super = VulkanFrameSynchronizer; + +public: + struct Options + { + uint32_t width = 0; + uint32_t height = 0; + VkFormat imageFormat = VK_FORMAT_R8G8B8A8_UNORM; + VkImageUsageFlags imageUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + uint64_t initialFrameNumber = 0; + }; + + VulkanHeadlessFrameSynchronizer(VulkanInstance&, + VulkanDevice&, + rive::rcp<rive::gpu::VulkanContext>, + const Options&); + ~VulkanHeadlessFrameSynchronizer(); + + VulkanHeadlessFrameSynchronizer(const VulkanHeadlessFrameSynchronizer&) = + delete; + VulkanHeadlessFrameSynchronizer& operator=( + const VulkanHeadlessFrameSynchronizer&) = delete; + + VkImage vkImage() const { return *m_image; } + VkImageView vkImageView() const { return *m_imageView; } + rive::gpu::vkutil::ImageAccess lastAccess() const + { + return m_lastImageAccess; + } + + uint32_t width() const { return m_width; } + uint32_t height() const { return m_height; } + + VkFormat imageFormat() const { return m_imageFormat; } + VkImageUsageFlags imageUsageFlags() const { return m_imageUsageFlags; } + + bool isFrameStarted() const; + void beginFrame(); + + void queueImageCopy(rive::gpu::vkutil::ImageAccess* inOutLastAccess, + rive::IAABB optPixelReadBounds = {}); + + void endFrame(const rive::gpu::vkutil::ImageAccess&); + +private: + bool m_isInFrame = false; + + VkFormat m_imageFormat; + VkImageUsageFlags m_imageUsageFlags; + uint32_t m_width; + uint32_t m_height; + + rive::gpu::vkutil::ImageAccess m_lastImageAccess; + + const rive::rcp<rive::gpu::vkutil::Image> m_image; + const rive::rcp<rive::gpu::vkutil::ImageView> m_imageView; + + // These are all the commands the swapchain needs to do its work - this + // macro is also used to load them in the .cpp +#define RIVE_VK_OFFSCREEN_FRAME_INSTANCE_COMMANDS(F) \ + F(vkAcquireNextImageKHR) \ + F(vkDestroySwapchainKHR) \ + F(vkDestroyImageView) \ + F(vkDeviceWaitIdle) \ + F(vkQueuePresentKHR) + +#define DECLARE_VULKAN_COMMAND(name) PFN_##name m_##name = nullptr; + RIVE_VK_OFFSCREEN_FRAME_INSTANCE_COMMANDS(DECLARE_VULKAN_COMMAND) +#undef DECLARE_VULKAN_COMMAND +}; +} // namespace rive_vkb
diff --git a/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_instance.hpp b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_instance.hpp new file mode 100644 index 0000000..f61700d --- /dev/null +++ b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_instance.hpp
@@ -0,0 +1,76 @@ +#include <vulkan/vulkan.h> + +#include <vector> +#include "rive/span.hpp" + +namespace rive_vkb +{ +#ifndef NDEBUG +constexpr bool RIVE_DEFAULT_VULKAN_DEBUG_PREFERENCE = true; +#else +constexpr bool RIVE_DEFAULT_VULKAN_DEBUG_PREFERENCE = false; +#endif + +class VulkanLibrary; + +class VulkanInstance +{ +public: + struct Options + { + const char* appName = "fiddle_context app"; + const char* engineName = "Rive Renderer"; + + uint32_t idealAPIVersion = VK_API_VERSION_1_3; + uint32_t minimumSupportedInstanceVersion = VK_API_VERSION_1_0; + + rive::Span<const char*> requiredExtensions; + rive::Span<const char*> optionalExtensions; + + bool wantValidationLayers = RIVE_DEFAULT_VULKAN_DEBUG_PREFERENCE; + bool wantDebugCallbacks = RIVE_DEFAULT_VULKAN_DEBUG_PREFERENCE; + + bool logExtendedCreationInfo = false; + }; + + VulkanInstance(const Options&); + ~VulkanInstance(); + + VulkanInstance(const VulkanInstance&) = delete; + VulkanInstance& operator=(const VulkanInstance&) = delete; + + template <typename T> T loadInstanceFunc(const char* name) const + { + return reinterpret_cast<T>(loadInstanceFunc(name)); + } + + VkInstance vkInstance() const { return m_instance; } + uint32_t instanceVersion() const { return m_instanceVersion; } + uint32_t apiVersion() const { return m_apiVersion; } + + PFN_vkGetInstanceProcAddr getVkGetInstanceProcAddrPtr() const; + + bool tryGetPhysicalDeviceFeatures2( + VkPhysicalDevice, + VkPhysicalDeviceFeatures2* inoutFeatures); + +private: + PFN_vkVoidFunction loadInstanceFunc(const char* name) const; + + std::unique_ptr<VulkanLibrary> m_library; + std::vector<const char*> m_enabledExtensions; + std::vector<const char*> m_enabledLayers; + uint32_t m_instanceVersion; + uint32_t m_apiVersion; + VkInstance m_instance; + PFN_vkDestroyInstance m_vkDestroyInstance; + + // These two are optional, at most one of them will be set + PFN_vkGetPhysicalDeviceFeatures2 m_vkGetPhysicalDeviceFeatures2 = nullptr; + PFN_vkGetPhysicalDeviceFeatures2KHR m_vkGetPhysicalDeviceFeatures2KHR = + nullptr; + VkDebugUtilsMessengerEXT m_debugUtilsMessenger = VK_NULL_HANDLE; + VkDebugReportCallbackEXT m_debugReportCallback = VK_NULL_HANDLE; +}; + +} // namespace rive_vkb \ No newline at end of file
diff --git a/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_swapchain.hpp b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_swapchain.hpp new file mode 100644 index 0000000..d850c6f --- /dev/null +++ b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_swapchain.hpp
@@ -0,0 +1,119 @@ +/* + * Copyright 2025 Rive + */ + +#include <vulkan/vulkan.h> +#include <vector> + +#include "rive/renderer/vulkan/vulkan_context.hpp" +#include "rive_vk_bootstrap/vulkan_frame_synchronizer.hpp" + +namespace rive_vkb +{ +class VulkanDevice; +class VulkanInstance; + +class VulkanSwapchain : public VulkanFrameSynchronizer +{ + using Super = VulkanFrameSynchronizer; + +public: + struct Options + { + // Span of desired formats, ordered by preference. + std::vector<VkSurfaceFormatKHR> formatPreferences; + std::vector<VkPresentModeKHR> presentModePreferences; + + VkImageUsageFlags imageUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + uint64_t initialFrameNumber = 0; + uint32_t preferredImageCount = 2; + }; + + VulkanSwapchain(VulkanInstance&, + VulkanDevice&, + rive::rcp<rive::gpu::VulkanContext>, + VkSurfaceKHR, + const Options&); + ~VulkanSwapchain(); + + VulkanSwapchain(const VulkanSwapchain&) = delete; + VulkanSwapchain& operator=(const VulkanSwapchain&) = delete; + + VkImage currentVkImage() const { return current().image; } + VkImageView currentVkImageView() const { return current().view; } + rive::gpu::vkutil::ImageAccess currentLastAccess() const + { + return current().lastAccess; + } + + VkFormat imageFormat() const { return m_imageFormat; } + VkImageUsageFlags imageUsageFlags() const { return m_imageUsageFlags; } + + bool isFrameStarted() const; + void beginFrame(); + + // Queue a copy of the swapchain image for this frame with optional bounds. + // Must be done before endFrame. + void queueImageCopy(rive::gpu::vkutil::ImageAccess* inOutLastAccess, + rive::IAABB optPixelReadBounds = {}); + + using Super::queueImageCopy; + + void endFrame(const rive::gpu::vkutil::ImageAccess&); + + uint32_t width() const { return m_width; } + uint32_t height() const { return m_height; } + +private: + struct SwapchainImage + { + VkImage image; + VkImageView view; + rive::gpu::vkutil::ImageAccess lastAccess; + }; + + SwapchainImage& current() + { + return m_swapchainImages.at(m_currentImageIndex); + } + + const SwapchainImage& current() const + { + return m_swapchainImages.at(m_currentImageIndex); + } + + VkSurfaceFormatKHR findBestFormat( + VulkanDevice& device, + VkSurfaceKHR surface, + const std::vector<VkSurfaceFormatKHR>& preferences); + + VkPresentModeKHR findBestPresentMode( + VulkanDevice& device, + VkSurfaceKHR surface, + const std::vector<VkPresentModeKHR>& presentModePreferences); + + VkSwapchainKHR m_swapchain; + + VkFormat m_imageFormat; + VkImageUsageFlags m_imageUsageFlags; + uint32_t m_width; + uint32_t m_height; + + uint32_t m_currentImageIndex = std::numeric_limits<uint32_t>::max(); + + std::vector<SwapchainImage> m_swapchainImages; + + // These are all the commands the swapchain needs to do its work - this + // macro is also used to load them in the .cpp +#define RIVE_VK_SWAPCHAIN_INSTANCE_COMMANDS(F) \ + F(vkAcquireNextImageKHR) \ + F(vkDestroySwapchainKHR) \ + F(vkDestroyImageView) \ + F(vkDeviceWaitIdle) \ + F(vkQueuePresentKHR) + +#define DECLARE_VULKAN_COMMAND(name) PFN_##name m_##name = nullptr; + RIVE_VK_SWAPCHAIN_INSTANCE_COMMANDS(DECLARE_VULKAN_COMMAND) +#undef DECLARE_VULKAN_COMMAND +}; +} // namespace rive_vkb
diff --git a/renderer/rive_vk_bootstrap/rive_vk_bootstrap.cpp b/renderer/rive_vk_bootstrap/rive_vk_bootstrap.cpp deleted file mode 100644 index 2cf7626..0000000 --- a/renderer/rive_vk_bootstrap/rive_vk_bootstrap.cpp +++ /dev/null
@@ -1,638 +0,0 @@ -/* - * Copyright 2024 Rive - */ - -#include "rive_vk_bootstrap/rive_vk_bootstrap.hpp" - -#ifdef __APPLE__ -#include <dlfcn.h> -#endif - -namespace rive_vkb -{ -vkb::SystemInfo load_vulkan() -{ - PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr = nullptr; -#ifdef __APPLE__ - // The Vulkan SDK on Mac gets installed to /usr/local/lib, which is no - // longer on the library search path after Sonoma. - if (void* vulkanLib = - dlopen("/usr/local/lib/libvulkan.1.dylib", RTLD_NOW | RTLD_LOCAL)) - { - fp_vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>( - dlsym(vulkanLib, "vkGetInstanceProcAddr")); - } -#endif - return VKB_CHECK( - vkb::SystemInfo::get_system_info(fp_vkGetInstanceProcAddr)); -} - -#ifdef DEBUG -static std::array<const char*, 3> s_ignoredValidationMsgList = { - // Swiftshader generates this error during - // vkEnumeratePhysicalDevices. It seems fine to ignore. - "Copying old device 0 into new device 0", - // Cirrus Ubuntu runner w/ Nvidia gpu reports this but it seems harmless. - "terminator_CreateInstance: Received return code -3 from call to " - "vkCreateInstance in ICD /usr/lib/x86_64-linux-gnu/libvulkan_virtio.so. " - "Skipping this driver.", - "Override layer has override paths set to D:\\VulkanSDK\\1.3.296.0\\Bin", -}; - -VKAPI_ATTR VkBool32 VKAPI_CALL default_debug_callback( - VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, - VkDebugUtilsMessageTypeFlagsEXT messageType, - const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, - void* pUserData) -{ - bool shouldAbort = true; - - switch (messageType) - { - case VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT: - for (const char* msg : s_ignoredValidationMsgList) - { - if (strcmp(pCallbackData->pMessage, msg) == 0) - { - shouldAbort = false; - break; - } - } - - fprintf(stderr, "Rive Vulkan error"); - if (!shouldAbort) - { - fprintf(stderr, " (error ignored)"); - } - fprintf(stderr, - ": %i: %s: %s\n", - pCallbackData->messageIdNumber, - pCallbackData->pMessageIdName, - pCallbackData->pMessage); - - if (shouldAbort) - { - abort(); - } - break; - - case VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT: - fprintf(stderr, - "Rive Vulkan Validation error: %i: %s: %s\n", - pCallbackData->messageIdNumber, - pCallbackData->pMessageIdName, - pCallbackData->pMessage); - abort(); - case VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT: - fprintf(stderr, - "Rive Vulkan Performance warning: %i: %s: %s\n", - pCallbackData->messageIdNumber, - pCallbackData->pMessageIdName, - pCallbackData->pMessage); - break; - } - return VK_TRUE; -} -#endif - -static const char* physical_device_type_name(VkPhysicalDeviceType type) -{ - switch (type) - { - case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: - return "Integrated"; - case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: - return "Discrete"; - case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: - return "Virtual"; - case VK_PHYSICAL_DEVICE_TYPE_CPU: - return "CPU"; - default: - return "Other"; - } -} - -// Select a GPU name if it contains the substring 'filter' or '$RIVE_GPU'. -// Return false if 'filter' and '$RIVE_GPU' are both null. -// Abort if the filter matches more than one name. -std::tuple<vkb::Device, rive::gpu::VulkanFeatures> select_device( - vkb::PhysicalDeviceSelector& selector, - FeatureSet featureSet, - const char* gpuNameFilter) -{ - if (const char* rive_gpu = getenv("RIVE_GPU")) - { - gpuNameFilter = rive_gpu; - } - if (gpuNameFilter == nullptr || gpuNameFilter[0] == '\0') - { - // No active filter. Go with a discrete GPU. - selector.allow_any_gpu_device_type(false).prefer_gpu_device_type( - vkb::PreferredDeviceType::discrete); - } - else - { - std::vector<std::string> names = - VKB_CHECK(selector.select_device_names()); - std::vector<std::string> matches; - for (const std::string& name : names) - { - if (strstr(name.c_str(), gpuNameFilter) != nullptr) - { - matches.push_back(name); - } - } - if (matches.size() != 1) - { - const char* filterName = - gpuNameFilter != nullptr ? gpuNameFilter : "<discrete_gpu>"; - const std::vector<std::string>* devicePrintList; - if (matches.size() > 1) - { - fprintf(stderr, - "Cannot select GPU\nToo many matches for filter " - "'%s'.\nMatches:\n", - filterName); - devicePrintList = &matches; - } - else - { - fprintf(stderr, - "Cannot select GPU.\nNo matches for filter " - "'%s'.\nAvailable GPUs:\n", - filterName); - devicePrintList = &names; - } - for (const std::string& name : *devicePrintList) - { - fprintf(stderr, " %s\n", name.c_str()); - } - fprintf(stderr, - "\nPlease update the $RIVE_GPU environment variable\n"); - abort(); - } - selector.set_name(matches[0]); - } - auto selectResult = - selector.set_minimum_version(1, 0) - .allow_any_gpu_device_type(false) - .select(vkb::DeviceSelectionMode::only_fully_suitable); - if (!selectResult) - { - selectResult = selector.allow_any_gpu_device_type(true).select( - vkb::DeviceSelectionMode::partially_and_fully_suitable); - } - auto physicalDevice = VKB_CHECK(selectResult); - - physicalDevice.enable_features_if_present({ - .independentBlend = featureSet != FeatureSet::coreOnly, - .fillModeNonSolid = VK_TRUE, // Wireframe is a debug feature, so leave - // it on even for "core features" mode. - .fragmentStoresAndAtomics = VK_TRUE, - .shaderClipDistance = featureSet != FeatureSet::coreOnly, - }); - - rive::gpu::VulkanFeatures riveVulkanFeatures = { - .independentBlend = - static_cast<bool>(physicalDevice.features.independentBlend), - .fillModeNonSolid = - static_cast<bool>(physicalDevice.features.fillModeNonSolid), - .fragmentStoresAndAtomics = - static_cast<bool>(physicalDevice.features.fragmentStoresAndAtomics), - .shaderClipDistance = - static_cast<bool>(physicalDevice.features.shaderClipDistance), - }; - - if (featureSet != FeatureSet::coreOnly && - (physicalDevice.enable_extension_if_present( - VK_EXT_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME) || - physicalDevice.enable_extension_if_present( - "VK_AMD_rasterization_order_attachment_access"))) - { - constexpr static VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT - rasterOrderFeatures = { - .sType = - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT, - .rasterizationOrderColorAttachmentAccess = VK_TRUE, - }; - if (physicalDevice.enable_extension_features_if_present( - rasterOrderFeatures)) - { - riveVulkanFeatures.rasterizationOrderColorAttachmentAccess = true; - } - } - - vkb::Device device = VKB_CHECK(vkb::DeviceBuilder(physicalDevice).build()); - riveVulkanFeatures.apiVersion = device.instance_version, - - printf("==== Vulkan %i.%i.%i GPU (%s): %s [ ", - VK_API_VERSION_MAJOR(riveVulkanFeatures.apiVersion), - VK_API_VERSION_MINOR(riveVulkanFeatures.apiVersion), - VK_API_VERSION_PATCH(riveVulkanFeatures.apiVersion), - physical_device_type_name(physicalDevice.properties.deviceType), - physicalDevice.properties.deviceName); - struct CommaSeparator - { - const char* m_separator = ""; - const char* operator*() { return std::exchange(m_separator, ", "); } - } commaSeparator; - if (riveVulkanFeatures.independentBlend) - printf("%sindependentBlend", *commaSeparator); - if (riveVulkanFeatures.fillModeNonSolid) - printf("%sfillModeNonSolid", *commaSeparator); - if (riveVulkanFeatures.fragmentStoresAndAtomics) - printf("%sfragmentStoresAndAtomics", *commaSeparator); - if (riveVulkanFeatures.shaderClipDistance) - printf("%sshaderClipDistance", *commaSeparator); - if (riveVulkanFeatures.rasterizationOrderColorAttachmentAccess) - printf("%srasterizationOrderColorAttachmentAccess", *commaSeparator); -#if 0 - printf("Extensions:\n"); - for (const auto& ext : physicalDevice.get_available_extensions()) - { - printf(" %s\n", ext.c_str()); - } -#endif - printf(" ] ====\n"); - - return {device, riveVulkanFeatures}; -} - -// Vulkan native swapchain. -Swapchain::Swapchain(const vkb::Device& device, - rive::rcp<rive::gpu::VulkanContext> vk, - uint32_t width, - uint32_t height, - vkb::Swapchain&& vkbSwapchain, - uint64_t currentFrameNumber) : - m_dispatchTable(device.make_table()), - m_queue(VKB_CHECK(device.get_queue(vkb::QueueType::graphics))), - m_vk(vk), - m_width(width), - m_height(height), - m_imageFormat(vkbSwapchain.image_format), - m_imageUsageFlags(vkbSwapchain.image_usage_flags), - m_vkbSwapchain(std::move(vkbSwapchain)), - m_currentFrameNumber(currentFrameNumber) -{ - assert(m_vkbSwapchain.swapchain != VK_NULL_HANDLE); - init(device, *m_vkbSwapchain.get_images()); -} - -// Offscreen texture. -Swapchain::Swapchain(const vkb::Device& device, - rive::rcp<rive::gpu::VulkanContext> vk, - uint32_t width, - uint32_t height, - VkFormat imageFormat, - VkImageUsageFlags additionalUsageFlags, - uint64_t currentFrameNumber) : - m_dispatchTable(device.make_table()), - m_queue(VKB_CHECK(device.get_queue(vkb::QueueType::graphics))), - m_vk(vk), - m_width(width), - m_height(height), - m_imageFormat(imageFormat), - m_imageUsageFlags(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | - VK_IMAGE_USAGE_TRANSFER_SRC_BIT | additionalUsageFlags), - m_offscreenImage(m_vk->makeImage({ - .imageType = VK_IMAGE_TYPE_2D, - .format = m_imageFormat, - .extent = {m_width, m_height, 1}, - .usage = m_imageUsageFlags, - })), - m_currentFrameNumber(currentFrameNumber) -{ - init(device, {*m_offscreenImage}); - - // Signal the frame completion semaphore so we can blindly wait for it on - // the first frame, just like all the other frames. - VkSubmitInfo submitInfo = { - .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, - .signalSemaphoreCount = 1, - .pSignalSemaphores = &m_swapchainImages[0].frameCompleteSemaphore, - }; - VK_CHECK( - m_dispatchTable.queueSubmit(m_queue, 1, &submitInfo, VK_NULL_HANDLE)); -} - -void Swapchain::init(const vkb::Device& device, - const std::vector<VkImage>& images) -{ -#ifndef NDEBUG - // In order to implement blend modes, the target texture needs to either - // support input attachment usage (ideal), or else transfers. - constexpr static VkImageUsageFlags TRANSFER_SRC_AND_DST = - VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; - assert((m_imageUsageFlags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) || - (m_imageUsageFlags & TRANSFER_SRC_AND_DST) == TRANSFER_SRC_AND_DST); -#endif - - constexpr static VkSemaphoreCreateInfo semaphoreCreateInfo = { - .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, - }; - - constexpr static VkFenceCreateInfo fenceCreateInfo = { - .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, - .flags = VK_FENCE_CREATE_SIGNALED_BIT, - }; - - VkCommandPoolCreateInfo commandPoolCreateInfo = { - .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, - .flags = VkCommandPoolCreateFlagBits:: - VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, - .queueFamilyIndex = *device.get_queue_index(vkb::QueueType::graphics), - }; - - VK_CHECK(m_dispatchTable.createCommandPool(&commandPoolCreateInfo, - nullptr, - &m_commandPool)); - - VkCommandBufferAllocateInfo commandBufferAllocateInfo = { - .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, - .commandPool = m_commandPool, - .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, - .commandBufferCount = 1, - }; - - VK_CHECK(m_dispatchTable.createSemaphore(&semaphoreCreateInfo, - nullptr, - &m_nextAcquireSemaphore)); - - m_swapchainImages.resize(images.size()); - for (uint32_t i = 0; i < images.size(); ++i) - { - SwapchainImage& swapchainImage = m_swapchainImages[i]; - swapchainImage.image = images[i]; - swapchainImage.imageLastAccess = {}; - - VkImageViewCreateInfo imageViewCreateInfo = { - .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, - .image = swapchainImage.image, - .viewType = VK_IMAGE_VIEW_TYPE_2D, - .format = m_imageFormat, - .subresourceRange = - { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .levelCount = 1, - .layerCount = 1, - }, - }; - - VK_CHECK(m_dispatchTable.createImageView(&imageViewCreateInfo, - nullptr, - &swapchainImage.imageView)); - - VK_CHECK(m_dispatchTable.createFence(&fenceCreateInfo, - nullptr, - &swapchainImage.fence)); - - VK_CHECK(m_dispatchTable.createSemaphore( - &semaphoreCreateInfo, - nullptr, - &swapchainImage.frameBeginSemaphore)); - - VK_CHECK(m_dispatchTable.createSemaphore( - &semaphoreCreateInfo, - nullptr, - &swapchainImage.frameCompleteSemaphore)); - - VK_CHECK(m_dispatchTable.allocateCommandBuffers( - &commandBufferAllocateInfo, - &swapchainImage.commandBuffer)); - - swapchainImage.currentFrameNumber = swapchainImage.safeFrameNumber = - m_currentFrameNumber; - } -} - -Swapchain::~Swapchain() -{ - m_dispatchTable.queueWaitIdle(m_queue); - for (SwapchainImage& swapchainImage : m_swapchainImages) - { - m_dispatchTable.destroyImageView(swapchainImage.imageView, nullptr); - m_dispatchTable.destroyFence(swapchainImage.fence, nullptr); - m_dispatchTable.destroySemaphore(swapchainImage.frameBeginSemaphore, - nullptr); - m_dispatchTable.destroySemaphore(swapchainImage.frameCompleteSemaphore, - nullptr); - m_dispatchTable.freeCommandBuffers(m_commandPool, - 1, - &swapchainImage.commandBuffer); - } - m_dispatchTable.destroySemaphore(m_nextAcquireSemaphore, nullptr); - m_dispatchTable.destroyCommandPool(m_commandPool, nullptr); - vkb::destroy_swapchain(m_vkbSwapchain); -} - -static void wait_fence(const vkb::DispatchTable& DispatchTable, VkFence fence) -{ - while (DispatchTable.waitForFences(1, &fence, VK_TRUE, 1000) == VK_TIMEOUT) - { - // Keep waiting. - } -} - -const SwapchainImage* Swapchain::acquireNextImage() -{ - SwapchainImage* swapchainImage; - if (m_vkbSwapchain.swapchain != VK_NULL_HANDLE) - { - m_dispatchTable.acquireNextImageKHR(m_vkbSwapchain, - UINT64_MAX, - m_nextAcquireSemaphore, - VK_NULL_HANDLE, - &m_currentImageIndex); - swapchainImage = &m_swapchainImages[m_currentImageIndex]; - std::swap(swapchainImage->frameBeginSemaphore, m_nextAcquireSemaphore); - } - else - { - m_currentImageIndex = 0; - swapchainImage = &m_swapchainImages[0]; - std::swap(swapchainImage->frameBeginSemaphore, - swapchainImage->frameCompleteSemaphore); - } - - wait_fence(m_dispatchTable, swapchainImage->fence); - m_dispatchTable.resetFences(1, &swapchainImage->fence); - - m_dispatchTable.resetCommandBuffer(swapchainImage->commandBuffer, {}); - VkCommandBufferBeginInfo commandBufferBeginInfo = { - .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, - }; - m_dispatchTable.beginCommandBuffer(swapchainImage->commandBuffer, - &commandBufferBeginInfo); - - // Now that we've waited for the fence, resources from 'currentFrameNumber' - // are safe to be released or recycled. - swapchainImage->safeFrameNumber = swapchainImage->currentFrameNumber; - swapchainImage->currentFrameNumber = ++m_currentFrameNumber; - return swapchainImage; -} - -void Swapchain::submit(rive::gpu::vkutil::ImageAccess lastAccess, - std::vector<uint8_t>* pixelData, - rive::IAABB pixelReadBounds, - rive::gpu::vkutil::Texture2D* pixelReadTexture) -{ - SwapchainImage* swapchainImage = &m_swapchainImages[m_currentImageIndex]; - VkCommandBuffer commandBuffer = swapchainImage->commandBuffer; - - if (pixelData != nullptr) - { - if (pixelReadBounds.empty()) - { - pixelReadBounds = rive::IAABB::MakeWH(m_width, m_height); - } - // Copy the framebuffer out to a buffer. - VkDeviceSize requiredBufferSize = - pixelReadBounds.height() * pixelReadBounds.width() * 4; - if (m_pixelReadBuffer == nullptr || - m_pixelReadBuffer->info().size < requiredBufferSize) - { - m_pixelReadBuffer = m_vk->makeBuffer( - { - .size = requiredBufferSize, - .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT, - }, - rive::gpu::vkutil::Mappability::readWrite); - } - - constexpr rive::gpu::vkutil::ImageAccess TRANSFER_SRC_ACCESS = { - .pipelineStages = VK_PIPELINE_STAGE_TRANSFER_BIT, - .accessMask = VK_ACCESS_TRANSFER_READ_BIT, - .layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - }; - if (pixelReadTexture != nullptr) - { - pixelReadTexture->barrier(commandBuffer, TRANSFER_SRC_ACCESS); - } - else - { - lastAccess = m_vk->simpleImageMemoryBarrier(commandBuffer, - lastAccess, - TRANSFER_SRC_ACCESS, - swapchainImage->image); - } - - VkBufferImageCopy imageCopyDesc = { - .imageSubresource = - { - .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .mipLevel = 0, - .baseArrayLayer = 0, - .layerCount = 1, - }, - .imageOffset = {pixelReadBounds.left, pixelReadBounds.top, 0}, - .imageExtent = {static_cast<uint32_t>(pixelReadBounds.width()), - static_cast<uint32_t>(pixelReadBounds.height()), - 1}, - }; - - m_dispatchTable.cmdCopyImageToBuffer( - commandBuffer, - pixelReadTexture != nullptr ? pixelReadTexture->vkImage() - : swapchainImage->image, - VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - *m_pixelReadBuffer, - 1, - &imageCopyDesc); - - m_vk->bufferMemoryBarrier( - commandBuffer, - VK_PIPELINE_STAGE_TRANSFER_BIT, - VK_PIPELINE_STAGE_HOST_BIT, - 0, - { - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_HOST_READ_BIT, - .buffer = *m_pixelReadBuffer, - }); - } - - if (m_vkbSwapchain.swapchain != VK_NULL_HANDLE) - { - lastAccess = m_vk->simpleImageMemoryBarrier( - commandBuffer, - lastAccess, - { - .pipelineStages = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, - .accessMask = VK_ACCESS_NONE, - .layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, - }, - swapchainImage->image); - } - - VK_CHECK(m_dispatchTable.endCommandBuffer(commandBuffer)); - - VkPipelineStageFlags waitDstStageMask = - VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - - VkSubmitInfo submitInfo = { - .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, - .waitSemaphoreCount = 1, - .pWaitSemaphores = &swapchainImage->frameBeginSemaphore, - .pWaitDstStageMask = &waitDstStageMask, - .commandBufferCount = 1, - .pCommandBuffers = &commandBuffer, - .signalSemaphoreCount = 1, - .pSignalSemaphores = &swapchainImage->frameCompleteSemaphore, - }; - - VK_CHECK(m_dispatchTable.queueSubmit(m_queue, - 1, - &submitInfo, - swapchainImage->fence)); - - if (pixelData != nullptr) - { - // Wait for all rendering to complete before transferring the - // framebuffer data to pixelData. - wait_fence(m_dispatchTable, swapchainImage->fence); - m_pixelReadBuffer->invalidateContents(); - - // Copy the buffer containing the framebuffer contents to pixelData. - uint32_t w = pixelReadBounds.width(); - uint32_t h = pixelReadBounds.height(); - pixelData->resize(h * w * 4); - - assert(m_pixelReadBuffer->info().size >= h * w * 4); - for (uint32_t y = 0; y < h; ++y) - { - auto src = - static_cast<const uint8_t*>(m_pixelReadBuffer->contents()) + - w * 4 * y; - uint8_t* dst = pixelData->data() + (h - y - 1) * w * 4; - memcpy(dst, src, w * 4); - if (m_imageFormat == VK_FORMAT_B8G8R8A8_UNORM) - { - // Reverse bgr -> rgb. - for (uint32_t x = 0; x < w * 4; x += 4) - { - std::swap(dst[x], dst[x + 2]); - } - } - } - } - - if (m_vkbSwapchain.swapchain != VK_NULL_HANDLE) - { - VkPresentInfoKHR presentInfo = { - .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, - .waitSemaphoreCount = 1, - .pWaitSemaphores = &swapchainImage->frameCompleteSemaphore, - .swapchainCount = 1, - .pSwapchains = &m_vkbSwapchain.swapchain, - .pImageIndices = &m_currentImageIndex, - }; - - m_dispatchTable.queuePresentKHR(m_queue, &presentInfo); - lastAccess = {}; - } - - swapchainImage->imageLastAccess = lastAccess; - m_currentImageIndex = INVALID_IMAGE_INDEX; -} -} // namespace rive_vkb
diff --git a/renderer/rive_vk_bootstrap/src/logging.hpp b/renderer/rive_vk_bootstrap/src/logging.hpp new file mode 100644 index 0000000..f868be2 --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/logging.hpp
@@ -0,0 +1,54 @@ +/* + * Copyright 2025 Rive + */ + +#pragma once + +#include <stdio.h> + +#ifdef RIVE_ANDROID +#include <android/log.h> +#endif + +// TODO: These probably want to be made more generally available across the +// whole renderer and updated to log nicely for more platforms (like Mac) +#if defined(__ANDROID__) +#define LOG_INFO_LINE(FORMAT, ...) \ + [](auto&&... args) { \ + printf(FORMAT "\n", std::forward<decltype(args)>(args)...); \ + __android_log_print(ANDROID_LOG_INFO, \ + "rive_android_tests", \ + FORMAT, \ + std::forward<decltype(args)>(args)...); \ + }(__VA_ARGS__) + +// Send errors to stderr and the Android log, just for redundancy in case one or +// the other gets dropped. +#define LOG_ERROR_LINE(FORMAT, ...) \ + [](auto&&... args) { \ + fprintf(stderr, FORMAT "\n", std::forward<decltype(args)>(args)...); \ + __android_log_print(ANDROID_LOG_ERROR, \ + "rive_android_tests", \ + FORMAT, \ + std::forward<decltype(args)>(args)...); \ + }(__VA_ARGS__) +#else +// With C++20 (specifically with __VA_OPT__), these could just be: +// #define LOG_INFO_LINE(FORMAT, ...) \ +// printf(FORMAT "\n" __VA_OPT__(,) __VA_ARGS__) +// #define LOG_ERROR_LINE(FORMAT, ...) \ +// fprintf(stderr, FORMAT "\n" __VA_OPT__(,) __VA_ARGS__) +// +// But without, this still needs to be wrapped up in a lambda so that the +// __VA_ARGS__ can be sent in a way that works fine when it's empty. +#define LOG_INFO_LINE(FORMAT, ...) \ + [](auto&&... args) { \ + printf(FORMAT "\n", std::forward<decltype(args)>(args)...); \ + }(__VA_ARGS__) + +#define LOG_ERROR_LINE(FORMAT, ...) \ + [](auto&&... args) { \ + fprintf(stderr, FORMAT "\n", std::forward<decltype(args)>(args)...); \ + }(__VA_ARGS__) + +#endif
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_debug_callbacks.cpp b/renderer/rive_vk_bootstrap/src/vulkan_debug_callbacks.cpp new file mode 100644 index 0000000..05c3f46 --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_debug_callbacks.cpp
@@ -0,0 +1,138 @@ +/* + * Copyright 2024 Rive + */ + +#include <array> +#include <stdlib.h> +#include <string.h> +#include "logging.hpp" +#include "vulkan_debug_callbacks.hpp" + +namespace rive_vkb +{ +static bool shouldErrorMessageAbort(const char* message) +{ + static std::array<const char*, 3> s_ignoredValidationMsgList = { + // Swiftshader generates this error during + // vkEnumeratePhysicalDevices. It seems fine to ignore. + "Copying old device 0 into new device 0", + // Cirrus Ubuntu runner w/ Nvidia gpu reports this but it seems + // harmless. + "terminator_CreateInstance: Received return code -3 from call to " + "vkCreateInstance in ICD " + "/usr/lib/x86_64-linux-gnu/libvulkan_virtio.so. Skipping this driver.", + "Override layer has override paths set to " + "D:\\VulkanSDK\\1.3.296.0\\Bin", + }; + + for (const char* msg : s_ignoredValidationMsgList) + { + if (strcmp(message, msg) == 0) + { + return false; + } + } + + return true; +} + +VKAPI_ATTR VkBool32 VKAPI_CALL defaultDebugUtilCallback( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageType, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData) +{ + bool shouldAbort = true; + + switch (messageType) + { + case VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT: + shouldAbort = shouldErrorMessageAbort(pCallbackData->pMessage); + LOG_ERROR_LINE("Rive Vulkan error%s: %i: %s: %s\n", + (shouldAbort) ? "" : " (error ignored)", + pCallbackData->messageIdNumber, + pCallbackData->pMessageIdName, + pCallbackData->pMessage); + + if (shouldAbort) + { + abort(); + } + break; + + case VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT: + LOG_ERROR_LINE("Rive Vulkan Validation error: %i: %s: %s", + pCallbackData->messageIdNumber, + pCallbackData->pMessageIdName, + pCallbackData->pMessage); + abort(); + case VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT: + LOG_ERROR_LINE("Rive Vulkan Performance warning: %i: %s: %s", + pCallbackData->messageIdNumber, + pCallbackData->pMessageIdName, + pCallbackData->pMessage); + break; + } + return VK_TRUE; +} + +VKAPI_ATTR VkBool32 VKAPI_CALL +defaultDebugReportCallback(VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT, // objectType + uint64_t, // object + size_t, // location + int32_t messageCode, + const char* layerPrefix, + const char* message, + void* pUserData) +{ + if ((flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) != 0) + { + bool shouldAbort = shouldErrorMessageAbort(message); + + LOG_ERROR_LINE("Rive Vulkan error%s: %s (%i): %s", + (shouldAbort) ? "" : " (error ignored)", + layerPrefix, + messageCode, + message); + + if (shouldAbort) + { + abort(); + } + } + else if ((flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) != 0) + { + LOG_ERROR_LINE("Rive Vulkan warning: %s (%i): %s", + layerPrefix, + messageCode, + message); + abort(); + } + else if ((flags & VK_DEBUG_REPORT_DEBUG_BIT_EXT) != 0) + { + LOG_ERROR_LINE("Rive Vulkan debug message: %s (%i): %s", + layerPrefix, + messageCode, + message); + abort(); + } + else if ((flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) != 0) + { + LOG_ERROR_LINE("Rive Vulkan performance warning: %s (%i): %s", + layerPrefix, + messageCode, + message); + } + else + { + LOG_INFO_LINE("Rive Vulkan info: %s (%i): %s", + layerPrefix, + messageCode, + message); + } + + return false; +} + +} // namespace rive_vkb
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_debug_callbacks.hpp b/renderer/rive_vk_bootstrap/src/vulkan_debug_callbacks.hpp new file mode 100644 index 0000000..435ba62 --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_debug_callbacks.hpp
@@ -0,0 +1,24 @@ +/* + * Copyright 2024 Rive + */ + +#include <vulkan/vulkan.h> + +namespace rive_vkb +{ +VKAPI_ATTR VkBool32 VKAPI_CALL +defaultDebugReportCallback(VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + +VKAPI_ATTR VkBool32 VKAPI_CALL +defaultDebugUtilCallback(VkDebugUtilsMessageSeverityFlagBitsEXT, + VkDebugUtilsMessageTypeFlagsEXT, + const VkDebugUtilsMessengerCallbackDataEXT*, + void* pUserData); +} // namespace rive_vkb
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_device.cpp b/renderer/rive_vk_bootstrap/src/vulkan_device.cpp new file mode 100644 index 0000000..699720b --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_device.cpp
@@ -0,0 +1,482 @@ +/* + * Copyright 2025 Rive + */ + +#include <string> +#include "rive/renderer/vulkan/vkutil.hpp" +#include "rive_vk_bootstrap/vulkan_device.hpp" +#include "rive_vk_bootstrap/vulkan_instance.hpp" +#include "logging.hpp" +#include "vulkan_library.hpp" + +namespace rive_vkb +{ +class VulkanInstance; + +static const char* physicalDeviceTypeName(VkPhysicalDeviceType type) +{ + switch (type) + { + case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: + return "Integrated"; + case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: + return "Discrete"; + case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: + return "Virtual"; + case VK_PHYSICAL_DEVICE_TYPE_CPU: + return "CPU"; + default: + return "Other"; + } +} + +VulkanDevice::VulkanDevice(VulkanInstance& instance, const Options& opts) +{ + assert(!opts.headless || + opts.presentationSurfaceForDeviceSelection == VK_NULL_HANDLE && + "It doesn't make sense to specify a presentation surface for a " + "headless device"); + const char* nameFilter = opts.gpuNameFilter; + if (const char* gpuFromEnv = getenv("RIVE_GPU"); gpuFromEnv != nullptr) + { + // Override the program's GPU filter with one from the environment if + // it's set. + nameFilter = gpuFromEnv; + } + + auto findResult = findCompatiblePhysicalDevice( + instance, + nameFilter, + opts.presentationSurfaceForDeviceSelection); + m_physicalDevice = findResult.physicalDevice; + + DEFINE_AND_LOAD_INSTANCE_FUNC(vkGetPhysicalDeviceFeatures, instance); + assert(vkGetPhysicalDeviceFeatures != nullptr); + + VkPhysicalDeviceFeatures supportedFeatures; + vkGetPhysicalDeviceFeatures(m_physicalDevice, &supportedFeatures); + + VkPhysicalDeviceFeatures requestedFeatures = { + .independentBlend = + !opts.coreFeaturesOnly && supportedFeatures.independentBlend, + // We use wireframe for debugging, so enable it even in core mode + .fillModeNonSolid = supportedFeatures.fillModeNonSolid, + + // Always enable this + .fragmentStoresAndAtomics = supportedFeatures.fragmentStoresAndAtomics, + + .shaderClipDistance = + !opts.coreFeaturesOnly && supportedFeatures.shaderClipDistance, + }; + + m_riveVulkanFeatures = { + .apiVersion = instance.apiVersion(), + .independentBlend = bool(requestedFeatures.independentBlend), + .fillModeNonSolid = bool(requestedFeatures.fillModeNonSolid), + .fragmentStoresAndAtomics = + bool(requestedFeatures.fragmentStoresAndAtomics), + .shaderClipDistance = bool(requestedFeatures.shaderClipDistance), + }; + + DEFINE_AND_LOAD_INSTANCE_FUNC(vkEnumerateDeviceExtensionProperties, + instance); + assert(vkEnumerateDeviceExtensionProperties != nullptr); + + std::vector<VkExtensionProperties> supportedExtensions; + { + uint32_t count; + vkEnumerateDeviceExtensionProperties(m_physicalDevice, + nullptr, + &count, + nullptr); + supportedExtensions.resize(count); + vkEnumerateDeviceExtensionProperties(m_physicalDevice, + nullptr, + &count, + supportedExtensions.data()); + } + + std::vector<const char*> addedExtensions; + + // This extension *must* be enabled if it's supported (it's usually on a + // device that is not a fully-conforming device) + // TODO: we may want to note that a device had this extension, it might be + // useful information for devices that are doing weird things. + addExtensionIfSupported("VK_KHR_portability_subset", + supportedExtensions, + addedExtensions); + + if (!opts.headless) + { + if (!addExtensionIfSupported(VK_KHR_SWAPCHAIN_EXTENSION_NAME, + supportedExtensions, + addedExtensions)) + { + LOG_ERROR_LINE("Cannot create device: %s is not supported.", + VK_KHR_SWAPCHAIN_EXTENSION_NAME); + abort(); + } + } + + // If this has a value we'll use it in the VkDeviceCreateInfo chain + std::optional<VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT> + rasterOrderFeatures; + + if (!opts.coreFeaturesOnly) + { + rasterOrderFeatures = tryEnableRasterOrderFeatures(instance, + supportedExtensions, + addedExtensions); + } + + // Get our list of queue family properties. + { + DEFINE_AND_LOAD_INSTANCE_FUNC(vkGetPhysicalDeviceQueueFamilyProperties, + instance); + uint32_t count; + vkGetPhysicalDeviceQueueFamilyProperties(m_physicalDevice, + &count, + nullptr); + m_queueFamilyProperties.resize(count); + vkGetPhysicalDeviceQueueFamilyProperties( + m_physicalDevice, + &count, + m_queueFamilyProperties.data()); + } + + // Now find the graphics queue in the list. + m_graphicsQueueFamilyIndex = std::numeric_limits<uint32_t>::max(); + for (uint32_t i = 0; i < m_queueFamilyProperties.size(); i++) + { + if ((m_queueFamilyProperties[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != + 0) + { + m_graphicsQueueFamilyIndex = i; + break; + } + } + + assert(m_graphicsQueueFamilyIndex != std::numeric_limits<uint32_t>::max() && + "Could not find graphics queue index"); + + // We're going to create a single queue (the graphics queue) with a priority + // of 1. + float queuePriority = 1.0f; + VkDeviceQueueCreateInfo queueCreateInfo = { + .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + .queueFamilyIndex = m_graphicsQueueFamilyIndex, + .queueCount = 1, + .pQueuePriorities = &queuePriority, + }; + + // Finally create the actual device. + VkDeviceCreateInfo deviceCreateInfo = { + .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + .pNext = (rasterOrderFeatures.has_value()) + ? &rasterOrderFeatures.value() + : nullptr, + .queueCreateInfoCount = 1, + .pQueueCreateInfos = &queueCreateInfo, + .enabledExtensionCount = uint32_t(addedExtensions.size()), + .ppEnabledExtensionNames = addedExtensions.data(), + .pEnabledFeatures = &requestedFeatures, + }; + + DEFINE_AND_LOAD_INSTANCE_FUNC(vkCreateDevice, instance); + VK_CHECK(vkCreateDevice(m_physicalDevice, + &deviceCreateInfo, + nullptr, + &m_device)); + + LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(vkGetDeviceProcAddr, instance); + LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(vkDeviceWaitIdle, instance); + + m_vkGetDeviceQueue = reinterpret_cast<PFN_vkGetDeviceQueue>( + m_vkGetDeviceProcAddr(m_device, "vkGetDeviceQueue")); + m_vkDestroyDevice = reinterpret_cast<PFN_vkDestroyDevice>( + m_vkGetDeviceProcAddr(m_device, "vkDestroyDevice")); + + LOAD_MEMBER_INSTANCE_FUNC(vkGetPhysicalDeviceSurfaceFormatsKHR, instance); + LOAD_MEMBER_INSTANCE_FUNC(vkGetPhysicalDeviceSurfacePresentModesKHR, + instance); + LOAD_MEMBER_INSTANCE_FUNC(vkGetPhysicalDeviceSurfaceCapabilitiesKHR, + instance); + + printf("==== Vulkan %i.%i.%i GPU (%s): %s [ ", + VK_API_VERSION_MAJOR(m_riveVulkanFeatures.apiVersion), + VK_API_VERSION_MINOR(m_riveVulkanFeatures.apiVersion), + VK_API_VERSION_PATCH(m_riveVulkanFeatures.apiVersion), + physicalDeviceTypeName(findResult.deviceType), + findResult.deviceName.c_str()); + struct CommaSeparator + { + const char* m_separator = ""; + const char* operator*() { return std::exchange(m_separator, ", "); } + } commaSeparator; + if (m_riveVulkanFeatures.independentBlend) + printf("%sindependentBlend", *commaSeparator); + if (m_riveVulkanFeatures.fillModeNonSolid) + printf("%sfillModeNonSolid", *commaSeparator); + if (m_riveVulkanFeatures.fragmentStoresAndAtomics) + printf("%sfragmentStoresAndAtomics", *commaSeparator); + if (m_riveVulkanFeatures.shaderClipDistance) + printf("%sshaderClipDistance", *commaSeparator); + if (m_riveVulkanFeatures.rasterizationOrderColorAttachmentAccess) + printf("%srasterizationOrderColorAttachmentAccess", *commaSeparator); + printf(" ] ====\n"); +} + +VulkanDevice::~VulkanDevice() { m_vkDestroyDevice(m_device, nullptr); } + +VkSurfaceCapabilitiesKHR VulkanDevice::getSurfaceCapabilities( + VkSurfaceKHR surface) const +{ + VkSurfaceCapabilitiesKHR caps; + VK_CHECK(m_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(m_physicalDevice, + surface, + &caps)); + return caps; +} + +VulkanDevice::FindDeviceResult VulkanDevice::findCompatiblePhysicalDevice( + VulkanInstance& instance, + const char* nameFilter, + VkSurfaceKHR optionalSurfaceForValidation) +{ + if (nameFilter != nullptr && nameFilter[0] == '\0') + { + // Clear name filter if it's empty string. + nameFilter = nullptr; + } + + std::vector<VkPhysicalDevice> physicalDevices; + { + DEFINE_AND_LOAD_INSTANCE_FUNC(vkEnumeratePhysicalDevices, instance); + assert(vkEnumeratePhysicalDevices != nullptr); + + uint32_t count; + VK_CHECK( + vkEnumeratePhysicalDevices(instance.vkInstance(), &count, nullptr)); + physicalDevices.resize(count); + VK_CHECK(vkEnumeratePhysicalDevices(instance.vkInstance(), + &count, + physicalDevices.data())); + } + + DEFINE_AND_LOAD_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, instance); + assert(vkGetPhysicalDeviceProperties != nullptr); + + DEFINE_AND_LOAD_INSTANCE_FUNC(vkGetPhysicalDeviceSurfaceFormatsKHR, + instance); + DEFINE_AND_LOAD_INSTANCE_FUNC(vkGetPhysicalDeviceSurfacePresentModesKHR, + instance); + + auto IsSurfaceSupported = [&](VkPhysicalDevice device) { + uint32_t surfaceFormatCount = 0; + vkGetPhysicalDeviceSurfaceFormatsKHR(device, + optionalSurfaceForValidation, + &surfaceFormatCount, + nullptr); + uint32_t presentModeCount = 0; + vkGetPhysicalDeviceSurfacePresentModesKHR(device, + optionalSurfaceForValidation, + &presentModeCount, + nullptr); + + // We don't care *what* formats/modes are returned, we just need to + // know that there would be any that are supported. + return surfaceFormatCount > 0 && presentModeCount > 0; + }; + + if (nameFilter != nullptr) + { + // Find a device containing the given filter + FindDeviceResult matchResult = { + .physicalDevice = VK_NULL_HANDLE, + }; + std::vector<std::string> matchedDeviceNames; + for (const auto& device : physicalDevices) + { + if (optionalSurfaceForValidation != VK_NULL_HANDLE && + !IsSurfaceSupported(device)) + { + // This device does not support the surface we want to + // present with. + continue; + } + + VkPhysicalDeviceProperties props{}; + vkGetPhysicalDeviceProperties(device, &props); + if (strstr(props.deviceName, nameFilter) != nullptr) + { + matchResult = { + .physicalDevice = device, + .deviceName = props.deviceName, + .deviceType = props.deviceType, + }; + matchedDeviceNames.push_back(std::string{props.deviceName}); + } + } + + if (matchedDeviceNames.size() > 1) + { + LOG_ERROR_LINE("Cannot create device: Too many GPU matches for " + "filter '%s':", + nameFilter); + for (auto& matchName : matchedDeviceNames) + { + LOG_ERROR_LINE(" '%s'", matchName.c_str()); + } + LOG_ERROR_LINE("Please update the RIVE_GPU environment variable."); + abort(); + } + + if (matchResult.physicalDevice != VK_NULL_HANDLE) + { + return matchResult; + } + + LOG_ERROR_LINE( + "Cannot create device: No GPU matches for filter '%s'.\nPlease " + "update the RIVE_GPU environment variable.", + nameFilter); + abort(); + } + else + { + // Without a filter we are going to search for any device, but do a + // first pass looking for discrete devices only. + for (auto onlyAcceptDiscrete : {true, false}) + { + for (const auto& device : physicalDevices) + { + VkPhysicalDeviceProperties props{}; + vkGetPhysicalDeviceProperties(device, &props); + + if (optionalSurfaceForValidation != VK_NULL_HANDLE && + !IsSurfaceSupported(device)) + { + // This device does not support the surface we want to + // present with. + continue; + } + + if (!onlyAcceptDiscrete || + props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) + { + return { + .physicalDevice = device, + .deviceName = props.deviceName, + .deviceType = props.deviceType, + }; + } + } + } + + LOG_ERROR_LINE( + "Cannot create device: no supported GPU devices detected."); + abort(); + } +} + +std::optional<VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT> +VulkanDevice::tryEnableRasterOrderFeatures( + VulkanInstance& instance, + const std::vector<VkExtensionProperties>& supportedExtensions, + std::vector<const char*>& extensions) +{ + // Attempt to enable rasterization order attachment access, both by its + // standard name and by the AMD-specific name + for (auto* ext : + {VK_EXT_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME, + "VK_AMD_rasterization_order_attachment_access"}) + { + if (addExtensionIfSupported(ext, supportedExtensions, extensions)) + { + constexpr static VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT + requestedRasterOrderFeatures = { + .sType = + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT, + .rasterizationOrderColorAttachmentAccess = VK_TRUE, + }; + + auto testedRasterOrderFeatures = requestedRasterOrderFeatures; + + // Test to see if this is supported + VkPhysicalDeviceFeatures2 features = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + .pNext = &testedRasterOrderFeatures, + }; + + if (instance.tryGetPhysicalDeviceFeatures2(m_physicalDevice, + &features) && + testedRasterOrderFeatures + .rasterizationOrderColorAttachmentAccess) + { + // The query came back with the requested flag set so return the + // feature set we want, it's supported! + m_riveVulkanFeatures.rasterizationOrderColorAttachmentAccess = + true; + return requestedRasterOrderFeatures; + } + break; + } + } + + return std::nullopt; +} + +bool VulkanDevice::addExtensionIfSupported( + const char* name, + const std::vector<VkExtensionProperties>& supportedExtensions, + std::vector<const char*>& extensions) +{ + for (const auto& ext : supportedExtensions) + { + if (strcmp(ext.extensionName, name) == 0) + { + extensions.push_back(name); + return true; + } + } + + return false; +} + +std::vector<VkSurfaceFormatKHR> VulkanDevice::getSurfaceFormats( + VkSurfaceKHR surface) +{ + std::vector<VkSurfaceFormatKHR> formats; + uint32_t count; + m_vkGetPhysicalDeviceSurfaceFormatsKHR(m_physicalDevice, + surface, + &count, + nullptr); + formats.resize(count); + m_vkGetPhysicalDeviceSurfaceFormatsKHR(m_physicalDevice, + surface, + &count, + formats.data()); + return formats; +} + +std::vector<VkPresentModeKHR> VulkanDevice::getSurfacePresentModes( + VkSurfaceKHR surface) +{ + std::vector<VkPresentModeKHR> modes; + uint32_t count; + m_vkGetPhysicalDeviceSurfacePresentModesKHR(m_physicalDevice, + surface, + &count, + nullptr); + modes.resize(count); + m_vkGetPhysicalDeviceSurfacePresentModesKHR(m_physicalDevice, + surface, + &count, + modes.data()); + return modes; +} + +void VulkanDevice::waitUntilIdle() const { m_vkDeviceWaitIdle(m_device); } + +} // namespace rive_vkb \ No newline at end of file
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_frame_synchronizer.cpp b/renderer/rive_vk_bootstrap/src/vulkan_frame_synchronizer.cpp new file mode 100644 index 0000000..bdfc102 --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_frame_synchronizer.cpp
@@ -0,0 +1,320 @@ +/* + * Copyright 2025 Rive + */ + +#include "rive_vk_bootstrap/vulkan_device.hpp" +#include "rive_vk_bootstrap/vulkan_instance.hpp" +#include "rive_vk_bootstrap/vulkan_frame_synchronizer.hpp" +#include "logging.hpp" +#include "vulkan_library.hpp" + +namespace rive_vkb +{ +VulkanFrameSynchronizer::VulkanFrameSynchronizer( + VulkanInstance& instance, + VulkanDevice& device, + rive::rcp<rive::gpu::VulkanContext> vk, + const Options& opts) : + + m_vk(std::move(vk)), + m_device(device.vkDevice()), + m_monotonicFrameNumber(opts.initialFrameNumber) +{ + // Load all of the functions we care about +#define LOAD(name) LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(name, instance); + RIVE_VK_FRAME_SYNC_INSTANCE_COMMANDS(LOAD); +#undef LOAD + + m_vkGetDeviceQueue(m_device, + device.graphicsQueueFamilyIndex(), + 0, + &m_graphicsQueue); + + // Create the command pool + VkCommandPoolCreateInfo commandPoolCreateInfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + .queueFamilyIndex = device.graphicsQueueFamilyIndex(), + }; + + VK_CHECK(m_vkCreateCommandPool(m_device, + &commandPoolCreateInfo, + nullptr, + &m_commandPool)); + + // Create the alternating-frame sync objects + assert(opts.inFlightFrameCount > 1); + m_inFlightFrames.resize(opts.inFlightFrameCount); + for (auto& sync : m_inFlightFrames) + { + static constexpr VkFenceCreateInfo fenceCreateInfo = { + .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + .flags = VK_FENCE_CREATE_SIGNALED_BIT, + }; + + VK_CHECK( + m_vkCreateFence(m_device, &fenceCreateInfo, nullptr, &sync.fence)); + + VkCommandBufferAllocateInfo cbufferAllocateInfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = m_commandPool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = 1, + }; + + VK_CHECK(m_vkAllocateCommandBuffers(m_device, + &cbufferAllocateInfo, + &sync.commandBuffer)); + static constexpr VkSemaphoreCreateInfo semaCreateInfo = { + .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + }; + + VK_CHECK(m_vkCreateSemaphore(m_device, + &semaCreateInfo, + nullptr, + &sync.frameEndSemaphore)); + + if (opts.externalGPUSynchronization) + { + // For external synchronization we have a unique semaphore for each + // frame's command buffer submission to wait on. + VK_CHECK(m_vkCreateSemaphore(m_device, + &semaCreateInfo, + nullptr, + &sync.externallySignaledSemaphore)); + } + + sync.safeFrameNumber = m_monotonicFrameNumber; + } + + if (!opts.externalGPUSynchronization) + { + // Without external GPU synchronization we need the to explicitly signal + // the very last semaphore in the chain so that the first frame has + // something correct to wait on. + + VkSubmitInfo submitInfo = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .signalSemaphoreCount = 1, + .pSignalSemaphores = &prev().frameEndSemaphore, + }; + + m_vkQueueSubmit(m_graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE); + } +} + +VulkanFrameSynchronizer::~VulkanFrameSynchronizer() +{ + // Note that the derived class will have already waited for the device to be + // idle so we can safely destroy things here. + for (auto& frame : m_inFlightFrames) + { + if (frame.externallySignaledSemaphore != VK_NULL_HANDLE) + { + m_vkDestroySemaphore(m_device, + frame.externallySignaledSemaphore, + nullptr); + } + m_vkDestroySemaphore(m_device, frame.frameEndSemaphore, nullptr); + m_vkFreeCommandBuffers(m_device, + m_commandPool, + 1, + &frame.commandBuffer); + m_vkDestroyFence(m_device, frame.fence, nullptr); + } + + m_vkDestroyCommandPool(m_device, m_commandPool, nullptr); +} + +VkSemaphore VulkanFrameSynchronizer::waitForFenceAndBeginFrame() +{ + // Before we can use the command buffers/semaphores for the current frame, + // we need to wait on its fence to stall the CPU until it's ready. + static constexpr auto NO_TIMEOUT = std::numeric_limits<uint64_t>::max(); + VK_CHECK( + m_vkWaitForFences(m_device, 1, ¤t().fence, true, NO_TIMEOUT)); + + // Now we need to reset the command buffer + VkCommandBufferBeginInfo beginInfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + }; + + VK_CHECK(m_vkResetCommandBuffer(current().commandBuffer, 0)); + VK_CHECK(m_vkBeginCommandBuffer(current().commandBuffer, &beginInfo)); + m_monotonicFrameNumber++; + + // Return the semaphore that external GPU synchronization (i.e. a swapchain) + // will signal. This will just return VK_NULL_HANDLE if there is not + // external GPU synchronization. + return current().externallySignaledSemaphore; +} + +VkSemaphore VulkanFrameSynchronizer::endFrame() +{ + auto& frame = current(); + + // This frame is done - reset the fence so that the submit can signal it. + VK_CHECK(m_vkResetFences(m_device, 1, &frame.fence)); + + // Next, the command buffer needs to be ended (so it can be submitted) + m_vkEndCommandBuffer(frame.commandBuffer); + + VkPipelineStageFlags waitStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; + + // If we have an externally-signaled semaphore, use that. Otherwise, we tell + // the GPU to wait for the previous frame to signal that it was complete + // before continuing. + auto waitSemaphore = (frame.externallySignaledSemaphore != VK_NULL_HANDLE) + ? frame.externallySignaledSemaphore + : prev().frameEndSemaphore; + + VkSubmitInfo submitInfo = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .waitSemaphoreCount = 1, + .pWaitSemaphores = &waitSemaphore, + .pWaitDstStageMask = &waitStageMask, + .commandBufferCount = 1, + .pCommandBuffers = &frame.commandBuffer, + .signalSemaphoreCount = 1, + .pSignalSemaphores = &frame.frameEndSemaphore, + }; + + VK_CHECK(m_vkQueueSubmit(m_graphicsQueue, 1, &submitInfo, frame.fence)); + + // It will be safe to destroy assets in use in the current in-flight frame + // when the current frame number finishes. + frame.safeFrameNumber = m_monotonicFrameNumber; + + // Cycle the render index to the next. + m_renderFrameIndex = (m_renderFrameIndex + 1) % m_inFlightFrames.size(); + + if (m_pixelReadState == PixelReadState::Queued) + { + m_pixelReadState = PixelReadState::Ready; + } + + // External synchronization will need to wait for the frame to end before + // doing its final step (i.e. a swapchain queuing presentation) + return frame.frameEndSemaphore; +} + +void VulkanFrameSynchronizer::queueImageCopy( + VkImage image, + VkFormat format, + rive::gpu::vkutil::ImageAccess* inOutLastAccess, + rive::IAABB pixelReadBounds) +{ + assert(m_pixelReadState == PixelReadState::None && + "Pixel read was while another is active."); + VkDeviceSize requiredBufferSize = + pixelReadBounds.height() * pixelReadBounds.width() * 4; + + // Ensure that we have a read buffer that can hold the amount of data we + // need it to. + if (m_pixelReadBuffer == nullptr || + m_pixelReadBuffer->info().size < requiredBufferSize) + { + m_pixelReadBuffer = m_vk->makeBuffer( + { + .size = requiredBufferSize, + .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT, + }, + rive::gpu::vkutil::Mappability::readWrite); + } + + constexpr rive::gpu::vkutil::ImageAccess TRANSFER_SRC_ACCESS = { + .pipelineStages = VK_PIPELINE_STAGE_TRANSFER_BIT, + .accessMask = VK_ACCESS_TRANSFER_READ_BIT, + .layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + }; + + // We need to move the image into a mode where it can be copied. + auto& frame = current(); + *inOutLastAccess = m_vk->simpleImageMemoryBarrier(frame.commandBuffer, + *inOutLastAccess, + TRANSFER_SRC_ACCESS, + image); + + // Queue the actual copy + VkBufferImageCopy copyDesc = { + .imageSubresource = + { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1, + }, + .imageOffset = {pixelReadBounds.left, pixelReadBounds.top, 0}, + .imageExtent = {uint32_t(pixelReadBounds.width()), + uint32_t(pixelReadBounds.height()), + 1}, + }; + + m_vkCmdCopyImageToBuffer(frame.commandBuffer, + image, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + *m_pixelReadBuffer, + 1, + ©Desc); + + // Now transition the buffer's state so it will be readable by the CPU once + // the operation is done. + m_vk->bufferMemoryBarrier(frame.commandBuffer, + VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_HOST_BIT, + 0, + { + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_HOST_READ_BIT, + .buffer = *m_pixelReadBuffer, + }); + + m_pixelReadWidth = uint32_t(pixelReadBounds.width()); + m_pixelReadHeight = uint32_t(pixelReadBounds.height()); + m_pixelReadFormat = format; + m_pixelReadState = PixelReadState::Queued; +} + +void VulkanFrameSynchronizer::getPixelsFromLastImageCopy( + std::vector<uint8_t>* outPixels) +{ + assert(m_pixelReadState != PixelReadState::None && + "Pixels from image copy requested without one submitted"); + assert(m_pixelReadState != PixelReadState::Queued && + "Pixels from image copy requested before endFrame was called"); + + // In order to read this back, we need to wait for the previously-finished + // frame to finish. + auto& sync = prev(); + static constexpr auto NO_TIMEOUT = std::numeric_limits<uint64_t>::max(); + VK_CHECK(m_vkWaitForFences(m_device, 1, &sync.fence, true, NO_TIMEOUT)); + + // Make the texture data available to the CPU + m_pixelReadBuffer->invalidateContents(); + + outPixels->resize(m_pixelReadWidth * m_pixelReadHeight * 4); + assert(m_pixelReadBuffer->info().size >= outPixels->size()); + + for (auto y = 0u; y < m_pixelReadHeight; y++) + { + // Copy the given row (the destination is flipped vertically vs the + // source so read the source the other way around) + auto src = static_cast<const uint8_t*>(m_pixelReadBuffer->contents()) + + m_pixelReadWidth * 4 * (m_pixelReadHeight - 1 - y); + uint8_t* dst = &outPixels->at(y * m_pixelReadWidth * 4); + memcpy(dst, src, m_pixelReadWidth * 4); + + if (m_pixelReadFormat == VK_FORMAT_B8G8R8A8_UNORM) + { + // Need to swap BGRA -> RGBA + for (auto x = 0u; x < m_pixelReadWidth * 4; x += 4) + { + std::swap(dst[x], dst[x + 2]); + } + } + } + + m_pixelReadState = PixelReadState::None; +} + +} // namespace rive_vkb \ No newline at end of file
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_headless_frame_synchronizer.cpp b/renderer/rive_vk_bootstrap/src/vulkan_headless_frame_synchronizer.cpp new file mode 100644 index 0000000..fb25e0d --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_headless_frame_synchronizer.cpp
@@ -0,0 +1,94 @@ +/* + * Copyright 2025 Rive + */ + +#include "rive_vk_bootstrap/vulkan_device.hpp" +#include "rive_vk_bootstrap/vulkan_instance.hpp" +#include "rive_vk_bootstrap/vulkan_headless_frame_synchronizer.hpp" +#include "logging.hpp" +#include "vulkan_library.hpp" + +namespace rive_vkb +{ +VulkanHeadlessFrameSynchronizer::VulkanHeadlessFrameSynchronizer( + VulkanInstance& instance, + VulkanDevice& device, + rive::rcp<rive::gpu::VulkanContext> vk, + const Options& opts) : + Super(instance, + device, + std::move(vk), + { + .initialFrameNumber = opts.initialFrameNumber, + .externalGPUSynchronization = false, + }), + m_imageFormat(opts.imageFormat), + m_imageUsageFlags(opts.imageUsageFlags), + m_width(opts.width), + m_height(opts.height), + m_image(context()->makeImage({ + .imageType = VK_IMAGE_TYPE_2D, + .format = m_imageFormat, + .extent = {m_width, m_height, 1}, + .usage = m_imageUsageFlags, + })), + m_imageView(context()->makeImageView(m_image)) +{ + assert(opts.width > 0 && opts.height > 0 && + "Offscreen frame dimensions must be set"); + + // Load all of the functions we care about +#define LOAD(name) LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(name, instance); + RIVE_VK_OFFSCREEN_FRAME_INSTANCE_COMMANDS(LOAD); +#undef LOAD +} + +VulkanHeadlessFrameSynchronizer::~VulkanHeadlessFrameSynchronizer() +{ + // Don't do anything until everything is flushed through. + m_vkDeviceWaitIdle(vkDevice()); +} + +bool VulkanHeadlessFrameSynchronizer::isFrameStarted() const +{ + return m_isInFrame; +} + +void VulkanHeadlessFrameSynchronizer::beginFrame() +{ + assert(!isFrameStarted()); + + Super::waitForFenceAndBeginFrame(); + + m_isInFrame = true; +} + +void VulkanHeadlessFrameSynchronizer::queueImageCopy( + rive::gpu::vkutil::ImageAccess* inOutLastAccess, + rive::IAABB optPixelReadBounds) +{ + if (optPixelReadBounds.empty()) + { + // Empty bounds means we want to just copy the entire texture + optPixelReadBounds = rive::IAABB::MakeWH(m_width, m_height); + } + + Super::queueImageCopy(*m_image, + m_imageFormat, + inOutLastAccess, + optPixelReadBounds); +} + +void VulkanHeadlessFrameSynchronizer::endFrame( + const rive::gpu::vkutil::ImageAccess& lastAccess) +{ + assert(isFrameStarted()); + + m_lastImageAccess = lastAccess; + + Super::endFrame(); + + m_isInFrame = false; +} + +} // namespace rive_vkb \ No newline at end of file
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_instance.cpp b/renderer/rive_vk_bootstrap/src/vulkan_instance.cpp new file mode 100644 index 0000000..16d8f89 --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_instance.cpp
@@ -0,0 +1,339 @@ +/* + * Copyright 2025 Rive + */ + +#include "rive/renderer/vulkan/vkutil.hpp" +#include "rive_vk_bootstrap/vulkan_instance.hpp" +#include "logging.hpp" +#include "vulkan_debug_callbacks.hpp" +#include "vulkan_library.hpp" + +namespace rive_vkb +{ +VulkanInstance::VulkanInstance(const Options& opts) +{ + m_library = std::make_unique<VulkanLibrary>(); + + // Figure out which version to use + m_instanceVersion = opts.idealAPIVersion; + m_apiVersion = opts.idealAPIVersion; + if (opts.idealAPIVersion > opts.minimumSupportedInstanceVersion) + { + if (m_library->canEnumerateInstanceVersion()) + { + m_library->enumerateInstanceVersion(&m_instanceVersion); + } + else + { + // If vkEnumerateInstanceVersion doesn't exist then this we need + // to assume this is Vulkan 1.0 (the function was introduced + // in 1.1) + m_instanceVersion = VK_API_VERSION_1_0; + } + } + + if (m_instanceVersion < VK_API_VERSION_1_1) + { + // The API veresion is intended to be the maximum Vulkan version + // supported by the given application. However, Vulkan 1.0 + // implementations will fail with VK_ERROR_INCOMPATIBLE_DRIVER for + // api versions > 1.0, so if we detect that it's not a 1.1 or + // greater device, we need to force the api version to 1.0 + m_apiVersion = VK_API_VERSION_1_0; + } + + if (m_instanceVersion < opts.minimumSupportedInstanceVersion) + { + LOG_ERROR_LINE( + "Instance version %d.%d.%d is less than the minimum supported " + "version of %d.%d.%d", + VK_VERSION_MAJOR(m_instanceVersion), + VK_VERSION_MINOR(m_instanceVersion), + VK_VERSION_PATCH(m_instanceVersion), + VK_VERSION_MAJOR(opts.minimumSupportedInstanceVersion), + VK_VERSION_MINOR(opts.minimumSupportedInstanceVersion), + VK_VERSION_PATCH(opts.minimumSupportedInstanceVersion)); + abort(); + } + + VkApplicationInfo appInfo = { + .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, + .pApplicationName = opts.appName, + .pEngineName = opts.engineName, + .apiVersion = m_apiVersion, + }; + + std::vector<VkExtensionProperties> supportedExtensions; + std::vector<VkLayerProperties> supportedLayers; + { + uint32_t count; + m_library->enumerateInstanceExtensionProperties(nullptr, + &count, + nullptr); + + supportedExtensions.resize(count); + m_library->enumerateInstanceExtensionProperties( + nullptr, + &count, + supportedExtensions.data()); + + m_library->enumerateInstanceLayerProperties(&count, nullptr); + supportedLayers.resize(count); + m_library->enumerateInstanceLayerProperties(&count, + supportedLayers.data()); + + if (opts.logExtendedCreationInfo) + { + LOG_INFO_LINE("Reported Vulkan extensions:"); + for (const auto& ext : supportedExtensions) + { + LOG_INFO_LINE(" %s (version %d)", + ext.extensionName, + ext.specVersion); + } + + LOG_INFO_LINE("Reported Vulkan layers:"); + for (const auto& layer : supportedLayers) + { + LOG_INFO_LINE(" %s (spec: %d, impl: %d)%s%s", + layer.layerName, + layer.specVersion, + layer.implementationVersion, + (layer.description[0] != '\0') ? "\n " + : "", + layer.description); + } + } + } + + bool enableDebugCallbacks = opts.wantDebugCallbacks; + bool enableValidationLayers = opts.wantValidationLayers; + + std::vector<const char*> extensions; + std::vector<const char*> layers; + + auto add_extension_if_supported = [&](const char* extName) { + for (const auto& ext : supportedExtensions) + { + if (strcmp(ext.extensionName, extName) == 0) + { + extensions.push_back(extName); + return true; + } + } + + return false; + }; + + auto add_layer_if_supported = [&](const char* layerName) { + for (const auto& layer : supportedLayers) + { + if (strcmp(layer.layerName, layerName) == 0) + { + layers.push_back(layerName); + return true; + } + } + + return false; + }; + + auto add_extensions_if_supported = [&](const auto& extNames) { + for (auto* extName : extNames) + { + add_extension_if_supported(extName); + } + }; + + for (auto* extName : opts.requiredExtensions) + { + if (!add_extension_if_supported(extName)) + { + LOG_ERROR_LINE("Required extension '%s' was not supported", + extName); + abort(); + } + } + + add_extensions_if_supported(opts.optionalExtensions); + + VkInstanceCreateFlags instanceCreateFlags = 0; + +#ifdef VK_KHR_portability_enumeration + if (add_extension_if_supported( + VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME)) + { + instanceCreateFlags |= VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR; + } +#endif + + bool useFallbackDebugCallbacks = false; + if (enableDebugCallbacks) + { + if (!add_extension_if_supported(VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + { + if (add_extension_if_supported(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) + { + // Some devices only have the older debug report extension, so + // use that where we need to. + useFallbackDebugCallbacks = true; + } + else + { + LOG_ERROR_LINE("WARNING: Debug callbacks are not supported. " + "Creating context without debug callbacks.\n"); + enableDebugCallbacks = false; + } + } + } + + if (enableValidationLayers) + { + if (!add_layer_if_supported("VK_LAYER_KHRONOS_validation")) + { + LOG_ERROR_LINE("WARNING: Validation layers are not supported. " + "Creating context without validation layers.\n"); + enableValidationLayers = false; + } + } + + bool enabledKHRDeviceProperties2 = false; + if (m_instanceVersion < VK_API_VERSION_1_1) + { + enabledKHRDeviceProperties2 = add_extension_if_supported( + VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); + } + + VkInstanceCreateInfo createInfo = { + .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + .flags = instanceCreateFlags, + .pApplicationInfo = &appInfo, + .enabledLayerCount = uint32_t(layers.size()), + .ppEnabledLayerNames = layers.data(), + .enabledExtensionCount = uint32_t(extensions.size()), + .ppEnabledExtensionNames = extensions.data(), + }; + + VK_CHECK( + m_library->createInstance(&createInfo, VK_NULL_HANDLE, &m_instance)); + + m_enabledExtensions = std::move(extensions); + m_enabledLayers = std::move(layers); + + LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(vkDestroyInstance, *this); + + if (m_instanceVersion >= VK_API_VERSION_1_1) + { + LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(vkGetPhysicalDeviceFeatures2, *this); + } + else if (enabledKHRDeviceProperties2) + { + LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(vkGetPhysicalDeviceFeatures2KHR, + *this); + } + + if (enableDebugCallbacks) + { + if (useFallbackDebugCallbacks) + { + LOG_INFO_LINE("Note: " VK_EXT_DEBUG_UTILS_EXTENSION_NAME + " was not supported, falling back " + "to " VK_EXT_DEBUG_REPORT_EXTENSION_NAME "."); + DEFINE_AND_LOAD_INSTANCE_FUNC(vkCreateDebugReportCallbackEXT, + *this); + + VkDebugReportCallbackCreateInfoEXT debugCreateInfo = {}; + debugCreateInfo.sType = + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT; + debugCreateInfo.flags = + VK_DEBUG_REPORT_WARNING_BIT_EXT | + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | + VK_DEBUG_REPORT_ERROR_BIT_EXT; + debugCreateInfo.pfnCallback = &defaultDebugReportCallback; + vkCreateDebugReportCallbackEXT(m_instance, + &debugCreateInfo, + nullptr, + &m_debugReportCallback); + } + else + { + DEFINE_AND_LOAD_INSTANCE_FUNC(vkCreateDebugUtilsMessengerEXT, + *this); + + VkDebugUtilsMessengerCreateInfoEXT messengerCreateInfo = { + .sType = + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + .messageSeverity = + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT, + .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, + .pfnUserCallback = &defaultDebugUtilCallback, + }; + + if (auto result = + vkCreateDebugUtilsMessengerEXT(m_instance, + &messengerCreateInfo, + nullptr, + &m_debugUtilsMessenger); + result != VK_SUCCESS) + { + LOG_ERROR_LINE( + "Failed to create debug messenger. Error code: %d", + uint32_t(result)); + } + } + } +}; // namespace rive_vkb + +VulkanInstance::~VulkanInstance() +{ + if (m_debugUtilsMessenger != VK_NULL_HANDLE) + { + DEFINE_AND_LOAD_INSTANCE_FUNC(vkDestroyDebugUtilsMessengerEXT, *this); + vkDestroyDebugUtilsMessengerEXT(m_instance, + m_debugUtilsMessenger, + nullptr); + } + + if (m_debugReportCallback != VK_NULL_HANDLE) + { + DEFINE_AND_LOAD_INSTANCE_FUNC(vkDestroyDebugReportCallbackEXT, *this); + vkDestroyDebugReportCallbackEXT(m_instance, + m_debugReportCallback, + nullptr); + } + + m_vkDestroyInstance(m_instance, nullptr); +} + +PFN_vkVoidFunction VulkanInstance::loadInstanceFunc(const char* name) const +{ + return m_library->getInstanceProcAddr(m_instance, name); +} + +bool VulkanInstance::tryGetPhysicalDeviceFeatures2( + VkPhysicalDevice device, + VkPhysicalDeviceFeatures2* inoutFeatures) +{ + if (m_vkGetPhysicalDeviceFeatures2 != nullptr) + { + m_vkGetPhysicalDeviceFeatures2(device, inoutFeatures); + return true; + } + else if (m_vkGetPhysicalDeviceFeatures2KHR != nullptr) + { + m_vkGetPhysicalDeviceFeatures2KHR(device, inoutFeatures); + return true; + } + + return false; +} + +PFN_vkGetInstanceProcAddr VulkanInstance::getVkGetInstanceProcAddrPtr() const +{ + return m_library->getVkGetInstanceProcAddrPtr(); +} + +} // namespace rive_vkb \ No newline at end of file
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_library.cpp b/renderer/rive_vk_bootstrap/src/vulkan_library.cpp new file mode 100644 index 0000000..79ba15c --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_library.cpp
@@ -0,0 +1,117 @@ +/* + * Copyright 2025 Rive + */ + +#include "vulkan_library.hpp" + +#ifndef _WIN32 +#include <dlfcn.h> +#endif + +namespace rive_vkb +{ +VulkanLibrary::VulkanLibrary() +{ +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__linux__) + static_assert(false, "Unsupported platform"); +#endif + + constexpr const char* libFilenameCandidates[] = { +#if defined(_WIN32) + "vulkan-1.dll", +#elif defined(__APPLE__) + "libvulkan.dylib", + "libvulkan.1.dylib", + "libMoltenVK.dylib", + // The Vulkan SDK on Mac gets installed to /usr/local/lib, which is no + // longer on the library search path after Sonoma. + "/usr/local/lib/libvulkan.1.dylib", +#else + "libvulkan.so.1", + "libvulkan.so", +#endif + }; + + for (auto* filenameCandidate : libFilenameCandidates) + { +#ifdef _WIN32 + m_library = LoadLibraryA(filenameCandidate); +#else + m_library = dlopen(filenameCandidate, RTLD_NOW | RTLD_LOCAL); +#endif + + if (m_library != nullptr) + { + break; + } + } + + assert(m_library != nullptr && "Failed to find Vulkan library"); + +#ifdef _WIN32 + m_vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>( + GetProcAddress(m_library, "vkGetInstanceProcAddr")); +#else + m_vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>( + dlsym(m_library, "vkGetInstanceProcAddr")); +#endif + assert(m_vkGetInstanceProcAddr != nullptr); + + LOAD_MEMBER_INSTANCE_FUNC(vkEnumerateInstanceVersion, *this); + LOAD_MEMBER_INSTANCE_FUNC(vkCreateInstance, *this); + assert(m_vkCreateInstance != nullptr); + LOAD_MEMBER_INSTANCE_FUNC(vkEnumerateInstanceLayerProperties, *this); + assert(m_vkEnumerateInstanceLayerProperties != nullptr); + LOAD_MEMBER_INSTANCE_FUNC(vkEnumerateInstanceExtensionProperties, *this); + assert(m_vkEnumerateInstanceExtensionProperties != nullptr); +} + +VulkanLibrary::~VulkanLibrary() +{ +#ifdef _WIN32 + FreeLibrary(m_library); +#else + dlclose(m_library); +#endif +} + +PFN_vkVoidFunction VulkanLibrary::getInstanceProcAddr(VkInstance instance, + const char* name) const +{ + return m_vkGetInstanceProcAddr(instance, name); +} + +bool VulkanLibrary::canEnumerateInstanceVersion() const +{ + return m_vkEnumerateInstanceVersion != nullptr; +} + +VkResult VulkanLibrary::enumerateInstanceVersion(uint32_t* outVersion) const +{ + return m_vkEnumerateInstanceVersion(outVersion); +} + +VkResult VulkanLibrary::createInstance(const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance) const +{ + return m_vkCreateInstance(pCreateInfo, pAllocator, pInstance); +} + +VkResult VulkanLibrary::enumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties) const +{ + return m_vkEnumerateInstanceLayerProperties(pPropertyCount, pProperties); +} + +VkResult VulkanLibrary::enumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties) const +{ + return m_vkEnumerateInstanceExtensionProperties(pLayerName, + pPropertyCount, + pProperties); +} +} // namespace rive_vkb \ No newline at end of file
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_library.hpp b/renderer/rive_vk_bootstrap/src/vulkan_library.hpp new file mode 100644 index 0000000..50e6d28 --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_library.hpp
@@ -0,0 +1,83 @@ +/* + * Copyright 2025 Rive + */ + +#ifdef _WIN32 // !!! in the VulkanInstance cpp +#include <Windows.h> +#endif + +#include <assert.h> +#include <vulkan/vulkan.h> + +// Helper macro to use a given object with a load_instance_func function to load +// a vulkan function with the given name (to avoid redundancy and reduce the +// chance of typos in the string) +#define LOAD_INSTANCE_FUNC(name, obj) (obj).loadInstanceFunc<PFN_##name>(#name) + +// Helper macro to define and load a given instance func (eliminating the name +// needing to be typed 3 times and reduce the chance of typos in the string) +#define DEFINE_AND_LOAD_INSTANCE_FUNC(name, obj) \ + auto name = LOAD_INSTANCE_FUNC(name, obj) + +// Helper macro to load a given instance func into an existing member +// (eliminating the name needing to be typed 3 times and reduce the chance of +// typos in the string) +#define LOAD_MEMBER_INSTANCE_FUNC(name, obj) \ + m_##name = LOAD_INSTANCE_FUNC(name, obj) + +// Same as LOAD_MEMBER_INSTANCE_FUNC but asserts non-null +#define LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(name, obj) \ + LOAD_MEMBER_INSTANCE_FUNC(name, obj); \ + assert(m_##name != nullptr && "Could not load " #name) +namespace rive_vkb +{ +class VulkanLibrary +{ +public: + VulkanLibrary(); + ~VulkanLibrary(); + + PFN_vkVoidFunction getInstanceProcAddr(VkInstance instance, + const char* name) const; + + // Load an instance func that doesn't require a specified instance + template <typename T> T loadInstanceFunc(const char* name) const + { + return reinterpret_cast<T>(getInstanceProcAddr(VK_NULL_HANDLE, name)); + } + + bool canEnumerateInstanceVersion() const; + + VkResult enumerateInstanceVersion(uint32_t* outVersion) const; + VkResult createInstance(const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance) const; + VkResult enumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties) const; + VkResult enumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties) const; + + PFN_vkGetInstanceProcAddr getVkGetInstanceProcAddrPtr() const + { + return m_vkGetInstanceProcAddr; + } + +private: +#ifdef _WIN32 + HMODULE m_library; +#else + void* m_library; +#endif + + PFN_vkGetInstanceProcAddr m_vkGetInstanceProcAddr = nullptr; + PFN_vkEnumerateInstanceVersion m_vkEnumerateInstanceVersion = nullptr; + PFN_vkCreateInstance m_vkCreateInstance = nullptr; + PFN_vkEnumerateInstanceLayerProperties + m_vkEnumerateInstanceLayerProperties = nullptr; + PFN_vkEnumerateInstanceExtensionProperties + m_vkEnumerateInstanceExtensionProperties = nullptr; +}; +} // namespace rive_vkb \ No newline at end of file
diff --git a/renderer/rive_vk_bootstrap/src/vulkan_swapchain.cpp b/renderer/rive_vk_bootstrap/src/vulkan_swapchain.cpp new file mode 100644 index 0000000..ef8d117 --- /dev/null +++ b/renderer/rive_vk_bootstrap/src/vulkan_swapchain.cpp
@@ -0,0 +1,249 @@ +/* + * Copyright 2025 Rive + */ + +#include "rive_vk_bootstrap/vulkan_device.hpp" +#include "rive_vk_bootstrap/vulkan_instance.hpp" +#include "rive_vk_bootstrap/vulkan_swapchain.hpp" +#include "logging.hpp" +#include "vulkan_library.hpp" + +namespace rive_vkb +{ +VulkanSwapchain::VulkanSwapchain(VulkanInstance& instance, + VulkanDevice& device, + rive::rcp<rive::gpu::VulkanContext> vk, + VkSurfaceKHR surface, + const Options& opts) : + Super(instance, + device, + std::move(vk), + { + .initialFrameNumber = opts.initialFrameNumber, + .externalGPUSynchronization = true, + }) +{ + assert(opts.formatPreferences.size() > 0 && + "Must request at least one surface format"); + assert(opts.presentModePreferences.size() > 0 && + "Must request at least one present mode"); + + // Load all of the functions we care about +#define LOAD(name) LOAD_REQUIRED_MEMBER_INSTANCE_FUNC(name, instance); + RIVE_VK_SWAPCHAIN_INSTANCE_COMMANDS(LOAD); +#undef LOAD + + // Check the device to see what our best-match image format is + auto surfaceFormat = + findBestFormat(device, surface, opts.formatPreferences); + auto presentMode = + findBestPresentMode(device, surface, opts.presentModePreferences); + + VkCompositeAlphaFlagBitsKHR compositeAlphaFlags = +#if defined(__ANDROID__) + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR; +#else + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; +#endif + + auto surfaceCaps = device.getSurfaceCapabilities(surface); + + VkSwapchainCreateInfoKHR swapchainCreateInfo = { + .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + .surface = surface, + .minImageCount = + std::max(opts.preferredImageCount, surfaceCaps.minImageCount), + .imageFormat = surfaceFormat.format, + .imageColorSpace = surfaceFormat.colorSpace, + .imageExtent = surfaceCaps.currentExtent, + .imageArrayLayers = 1, + .imageUsage = opts.imageUsageFlags, + .preTransform = surfaceCaps.currentTransform, + .compositeAlpha = compositeAlphaFlags, + .presentMode = presentMode, + .clipped = true, + }; + + m_width = surfaceCaps.currentExtent.width; + m_height = surfaceCaps.currentExtent.height; + + DEFINE_AND_LOAD_INSTANCE_FUNC(vkCreateSwapchainKHR, instance); + VK_CHECK(vkCreateSwapchainKHR(device.vkDevice(), + &swapchainCreateInfo, + nullptr, + &m_swapchain)); + + m_imageFormat = surfaceFormat.format; + m_imageUsageFlags = opts.imageUsageFlags; + + // Get the swapchain images and then build out our internal image data + std::vector<VkImage> vkImages; + { + DEFINE_AND_LOAD_INSTANCE_FUNC(vkGetSwapchainImagesKHR, instance); + + uint32_t count; + vkGetSwapchainImagesKHR(device.vkDevice(), + m_swapchain, + &count, + nullptr); + vkImages.resize(count); + vkGetSwapchainImagesKHR(device.vkDevice(), + m_swapchain, + &count, + vkImages.data()); + } + + m_swapchainImages.resize(vkImages.size()); + + DEFINE_AND_LOAD_INSTANCE_FUNC(vkCreateImageView, instance); + for (uint32_t i = 0; i < vkImages.size(); i++) + { + m_swapchainImages[i].image = vkImages[i]; + + VkImageViewCreateInfo viewCreateInfo = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = vkImages[i], + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = m_imageFormat, + .subresourceRange = + { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .levelCount = 1, + .layerCount = 1, + }, + }; + + vkCreateImageView(device.vkDevice(), + &viewCreateInfo, + nullptr, + &m_swapchainImages[i].view); + } +} + +VulkanSwapchain::~VulkanSwapchain() +{ + // Don't do anything until everything is flushed through. + m_vkDeviceWaitIdle(vkDevice()); + + for (auto& data : m_swapchainImages) + { + m_vkDestroyImageView(vkDevice(), data.view, nullptr); + } + + m_vkDestroySwapchainKHR(vkDevice(), m_swapchain, nullptr); +} + +VkSurfaceFormatKHR VulkanSwapchain::findBestFormat( + VulkanDevice& device, + VkSurfaceKHR surface, + const std::vector<VkSurfaceFormatKHR>& preferences) +{ + auto formats = device.getSurfaceFormats(surface); + for (auto& pref : preferences) + { + for (auto& format : formats) + { + if (format.format == pref.format && + format.colorSpace == pref.colorSpace) + { + return pref; + } + } + } + + LOG_ERROR_LINE("Could not find any preferred surface format"); + abort(); +} + +VkPresentModeKHR VulkanSwapchain::findBestPresentMode( + VulkanDevice& device, + VkSurfaceKHR surface, + const std::vector<VkPresentModeKHR>& presentModePreferences) +{ + auto modes = device.getSurfacePresentModes(surface); + for (auto& pref : presentModePreferences) + { + for (auto& mode : modes) + { + if (mode == pref) + { + return pref; + } + } + } + + LOG_ERROR_LINE("Could not find any preferred present mode"); + abort(); +} + +bool VulkanSwapchain::isFrameStarted() const +{ + return m_currentImageIndex < m_swapchainImages.size(); +} + +void VulkanSwapchain::beginFrame() +{ + assert(!isFrameStarted()); + + // Do the work for the frame synchronization to begin + auto semaphoreToSignal = Super::waitForFenceAndBeginFrame(); + + // Next, acquire the next image from the swap chain, and signal the + static constexpr auto NO_TIMEOUT = std::numeric_limits<uint64_t>::max(); + VK_CHECK(m_vkAcquireNextImageKHR(vkDevice(), + m_swapchain, + NO_TIMEOUT, + semaphoreToSignal, + VK_NULL_HANDLE, + &m_currentImageIndex)); +} + +void VulkanSwapchain::queueImageCopy( + rive::gpu::vkutil::ImageAccess* inOutLastAccess, + rive::IAABB optPixelReadBounds) +{ + queueImageCopy(current().image, + m_imageFormat, + inOutLastAccess, + optPixelReadBounds); +} + +void VulkanSwapchain::endFrame(const rive::gpu::vkutil::ImageAccess& lastAccess) +{ + assert(isFrameStarted()); + + auto& swapImage = current(); + + // Whether or not we attempted to copy from the swapchain we need to + // transition it to the present layout. + swapImage.lastAccess = context()->simpleImageMemoryBarrier( + currentCommandBuffer(), + lastAccess, + { + .pipelineStages = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + .accessMask = VK_ACCESS_NONE, + .layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + }, + swapImage.image); + + // Now that the memory barrier is in the command buffer, we can end the + // frame sync frame. + VkSemaphore waitSemaphore = Super::endFrame(); + + // Now queue the actual presentation of the swpchain image + VkPresentInfoKHR presentInfo = { + .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + .waitSemaphoreCount = 1, + .pWaitSemaphores = &waitSemaphore, + .swapchainCount = 1, + .pSwapchains = &m_swapchain, + .pImageIndices = &m_currentImageIndex, + }; + + m_vkQueuePresentKHR(graphicsQueue(), &presentInfo); + + // This puts us in the !IsFrameStarted() state + m_currentImageIndex = std::numeric_limits<uint32_t>::max(); +} + +} // namespace rive_vkb \ No newline at end of file
diff --git a/tests/common/testing_window_android_vulkan.cpp b/tests/common/testing_window_android_vulkan.cpp index 2d89b17..164beed 100644 --- a/tests/common/testing_window_android_vulkan.cpp +++ b/tests/common/testing_window_android_vulkan.cpp
@@ -15,7 +15,9 @@ #else #include "common/offscreen_render_target.hpp" -#include "rive_vk_bootstrap/rive_vk_bootstrap.hpp" +#include "rive_vk_bootstrap/vulkan_device.hpp" +#include "rive_vk_bootstrap/vulkan_instance.hpp" +#include "rive_vk_bootstrap/vulkan_swapchain.hpp" #include "rive/renderer/rive_renderer.hpp" #include "rive/renderer/vulkan/render_context_vulkan_impl.hpp" #include "rive/renderer/vulkan/render_target_vulkan.hpp" @@ -45,145 +47,106 @@ ANativeWindow* window) : m_backendParams(backendParams) { - m_androidWindowWidth = m_width = ANativeWindow_getWidth(window); - m_androidWindowHeight = m_height = ANativeWindow_getHeight(window); - rive_vkb::load_vulkan(); + using namespace rive_vkb; // Request Vulkan 1.3, except if we're in core mode where we want 1.0. int minorVersionRequested = m_backendParams.core ? 0 : 3; - for (;;) - { - vkb::InstanceBuilder instanceBuilder; - instanceBuilder.set_app_name("path_fiddle") - .set_engine_name("Rive Renderer") - .enable_extension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) - .require_api_version(1, minorVersionRequested, 0) - .set_minimum_instance_version(1, 0, 0); -#ifdef DEBUG - if (!m_backendParams.disableValidationLayers) - { - instanceBuilder.enable_validation_layers(); - } - if (!m_backendParams.disableDebugCallbacks) - { - instanceBuilder.set_debug_callback( - rive_vkb::default_debug_callback); - } -#endif - auto instanceResult = instanceBuilder.build(); - if (!instanceResult) - { - auto error = static_cast<vkb::InstanceError>( - instanceResult.error().value()); - if (error == - vkb::InstanceError::vulkan_version_1_1_unavailable && - minorVersionRequested != 0) - { - // There's a bug in VkBootstrap (due to not properly - // handling Vulkan 1.0 not having the - // vkEnumerateInstanceVersion function) where it can give a - // vulkan_version_1_1_unavailable error even though we've - // specified a minimum of 1.0. If we get that error, - // request 1.0 directly and try again. - LOG_ERROR_LINE("Falling back on Vulkan 1.0."); - minorVersionRequested = 0; - continue; - } + std::vector<const char*> extensionNames; + extensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME); + extensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); -#ifdef DEBUG - if (!m_backendParams.disableValidationLayers && - error == vkb::InstanceError::requested_layers_not_present) - { - LOG_ERROR_LINE( - "WARNING: Validation layers not found. Attempting to " - "create a Vulkan context again without validation " - "layers."); - m_backendParams.disableValidationLayers = true; - continue; - } - if (!m_backendParams.disableDebugCallbacks && - error == vkb::InstanceError::failed_create_debug_messenger) - { - LOG_ERROR_LINE( - "WARNING: Debug callbacks not supported. Attempting to " - "create a Vulkan context again without debug " - "callbacks."); - m_backendParams.disableDebugCallbacks = true; - continue; - } + m_instance = std::make_unique<VulkanInstance>(VulkanInstance::Options{ + .appName = "Rive Android Test", + .idealAPIVersion = + m_backendParams.core ? VK_API_VERSION_1_0 : VK_API_VERSION_1_3, + .requiredExtensions = + make_span(extensionNames.data(), extensionNames.size()), +#ifndef NDEBUG + .wantValidationLayers = !m_backendParams.disableDebugCallbacks, + .wantDebugCallbacks = !m_backendParams.disableValidationLayers, #endif - LOG_ERROR_LINE("ERROR: %s: Failed to build Vulkan instance.", - instanceResult.error().message().c_str()); - abort(); - } - m_instance = *instanceResult; - break; - } - m_instanceDispatchTable = m_instance.make_table(); + }); + + m_vkDestroySurfaceKHR = + m_instance->loadInstanceFunc<PFN_vkDestroySurfaceKHR>( + "vkDestroySurfaceKHR"); + assert(m_vkDestroySurfaceKHR != nullptr); VkAndroidSurfaceCreateInfoKHR androidSurfaceCreateInfo = { .sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, .window = window, }; auto pfnvkCreateAndroidSurfaceKHR = - reinterpret_cast<PFN_vkCreateAndroidSurfaceKHR>( - m_instance.fp_vkGetInstanceProcAddr( - m_instance, - "vkCreateAndroidSurfaceKHR")); - assert(pfnvkCreateAndroidSurfaceKHR); - VK_CHECK(pfnvkCreateAndroidSurfaceKHR(m_instance, + m_instance->loadInstanceFunc<PFN_vkCreateAndroidSurfaceKHR>( + "vkCreateAndroidSurfaceKHR"); + assert(pfnvkCreateAndroidSurfaceKHR != nullptr); + VK_CHECK(pfnvkCreateAndroidSurfaceKHR(m_instance->vkInstance(), &androidSurfaceCreateInfo, nullptr, &m_windowSurface)); - VulkanFeatures vulkanFeatures; - std::tie(m_device, vulkanFeatures) = rive_vkb::select_device( - vkb::PhysicalDeviceSelector(m_instance) - .set_surface(m_windowSurface), - m_backendParams.core ? rive_vkb::FeatureSet::coreOnly - : rive_vkb::FeatureSet::allAvailable); + m_device = std::make_unique<VulkanDevice>( + *m_instance, + VulkanDevice::Options{ + .coreFeaturesOnly = m_backendParams.core, + }); + m_renderContext = RenderContextVulkanImpl::MakeContext( - m_instance, - m_device.physical_device, - m_device, - vulkanFeatures, - m_instance.fp_vkGetInstanceProcAddr, + m_instance->vkInstance(), + m_device->vkPhysicalDevice(), + m_device->vkDevice(), + m_device->vulkanFeatures(), + m_instance->getVkGetInstanceProcAddrPtr(), {.forceAtomicMode = backendParams.atomic}); - VkSurfaceCapabilitiesKHR windowCapabilities; - VK_CHECK(m_instanceDispatchTable - .fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR( - m_device.physical_device, - m_windowSurface, - &windowCapabilities)); - auto swapchainBuilder = - vkb::SwapchainBuilder(m_device, m_windowSurface) - .set_desired_format({ - .format = m_backendParams.srgb ? VK_FORMAT_R8G8B8A8_SRGB - : VK_FORMAT_R8G8B8A8_UNORM, - .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, - }) - .add_fallback_format({ - .format = VK_FORMAT_R8G8B8A8_UNORM, - .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, - }) - .set_desired_present_mode(VK_PRESENT_MODE_IMMEDIATE_KHR) - .add_fallback_present_mode(VK_PRESENT_MODE_FIFO_KHR) - .add_image_usage_flags(VK_IMAGE_USAGE_TRANSFER_SRC_BIT) - .add_image_usage_flags(VK_IMAGE_USAGE_TRANSFER_DST_BIT); - if (windowCapabilities.supportedUsageFlags & - VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) + auto windowCapabilities = + m_device->getSurfaceCapabilities(m_windowSurface); + + auto swapOpts = VulkanSwapchain::Options{ + .formatPreferences = + { + { + .format = m_backendParams.srgb + ? VK_FORMAT_R8G8B8A8_SRGB + : VK_FORMAT_R8G8B8A8_UNORM, + .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + }, + // Fall back to either ordering of ARGB + { + .format = VK_FORMAT_R8G8B8A8_UNORM, + .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + }, + { + .format = VK_FORMAT_B8G8R8A8_UNORM, + .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + }, + }, + .presentModePreferences = + { + VK_PRESENT_MODE_IMMEDIATE_KHR, + VK_PRESENT_MODE_FIFO_KHR, + }, + .imageUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | + VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_TRANSFER_DST_BIT, + }; + + if ((windowCapabilities.supportedUsageFlags & + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) != 0) { - swapchainBuilder.add_image_usage_flags( - VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT); + swapOpts.imageUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; } - m_swapchain = std::make_unique<rive_vkb::Swapchain>( - m_device, - ref_rcp(vk()), - m_androidWindowWidth, - m_androidWindowHeight, - VKB_CHECK(swapchainBuilder.build())); + + m_swapchain = + std::make_unique<rive_vkb::VulkanSwapchain>(*m_instance, + *m_device, + ref_rcp(vk()), + m_windowSurface, + swapOpts); + + m_androidWindowWidth = m_swapchain->width(); + m_androidWindowHeight = m_swapchain->height(); m_renderTarget = impl()->makeRenderTarget(m_width, @@ -204,11 +167,10 @@ if (m_windowSurface != VK_NULL_HANDLE) { - m_instanceDispatchTable.destroySurfaceKHR(m_windowSurface, nullptr); + m_vkDestroySurfaceKHR(m_instance->vkInstance(), + m_windowSurface, + nullptr); } - - vkb::destroy_device(m_device); - vkb::destroy_instance(m_instance); } Factory* factory() override { return m_renderContext.get(); } @@ -288,9 +250,10 @@ void flushPLSContext(RenderTarget* offscreenRenderTarget) override { - if (m_swapchainImage == nullptr) + if (!m_swapchain->isFrameStarted()) { - m_swapchainImage = m_swapchain->acquireNextImage(); + m_swapchain->beginFrame(); + if (m_overflowTexture != nullptr) { m_renderTarget->setTargetImageView( @@ -301,31 +264,35 @@ else { m_renderTarget->setTargetImageView( - m_swapchainImage->imageView, - m_swapchainImage->image, - m_swapchainImage->imageLastAccess); + m_swapchain->currentVkImageView(), + m_swapchain->currentVkImage(), + m_swapchain->currentLastAccess()); } } m_renderContext->flush({ .renderTarget = offscreenRenderTarget != nullptr ? offscreenRenderTarget : m_renderTarget.get(), - .externalCommandBuffer = m_swapchainImage->commandBuffer, - .currentFrameNumber = m_swapchainImage->currentFrameNumber, - .safeFrameNumber = m_swapchainImage->safeFrameNumber, + .externalCommandBuffer = m_swapchain->currentCommandBuffer(), + .currentFrameNumber = m_swapchain->currentFrameNumber(), + .safeFrameNumber = m_swapchain->safeFrameNumber(), }); } void endFrame(std::vector<uint8_t>* pixelData) override { flushPLSContext(nullptr); + vkutil::ImageAccess swapchainLastAccess; if (m_overflowTexture == nullptr) { // We rendered directly to the window. Submit and read back // normally. - m_swapchain->submit(m_renderTarget->targetLastAccess(), - pixelData, - IAABB::MakeWH(m_width, m_height)); + swapchainLastAccess = m_renderTarget->targetLastAccess(); + if (pixelData != nullptr) + { + m_swapchain->queueImageCopy(&swapchainLastAccess, + IAABB::MakeWH(m_width, m_height)); + } } else { @@ -333,39 +300,48 @@ // visual feedback. vkutil::ImageAccess swapchainLastAccess = vk()->simpleImageMemoryBarrier( - m_swapchainImage->commandBuffer, - m_swapchainImage->imageLastAccess, + m_swapchain->currentCommandBuffer(), + m_swapchain->currentLastAccess(), { .pipelineStages = VK_PIPELINE_STAGE_TRANSFER_BIT, .accessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, }, - m_swapchainImage->image); + m_swapchain->currentVkImage()); vk()->blitSubRect( - m_swapchainImage->commandBuffer, + m_swapchain->currentCommandBuffer(), m_renderTarget->accessTargetImage( - m_swapchainImage->commandBuffer, + m_swapchain->currentCommandBuffer(), { .pipelineStages = VK_PIPELINE_STAGE_TRANSFER_BIT, .accessMask = VK_ACCESS_TRANSFER_READ_BIT, .layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, }), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - m_swapchainImage->image, + m_swapchain->currentVkImage(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, IAABB{0, 0, std::min<int>(m_width, m_androidWindowWidth), std::min<int>(m_height, m_androidWindowHeight)}); + m_overflowTexture->lastAccess() = m_renderTarget->targetLastAccess(); - // Readback from the overflow texture when we submit. - m_swapchain->submit(swapchainLastAccess, - pixelData, - IAABB::MakeWH(m_width, m_height), - m_overflowTexture.get()); + + if (pixelData != nullptr) + { + m_swapchain->queueImageCopy(m_overflowTexture->vkImage(), + m_swapchain->imageFormat(), + &m_overflowTexture->lastAccess(), + IAABB::MakeWH(m_width, m_height)); + } } - m_swapchainImage = nullptr; + + m_swapchain->endFrame(swapchainLastAccess); + if (pixelData != nullptr) + { + m_swapchain->getPixelsFromLastImageCopy(pixelData); + } } private: @@ -379,16 +355,15 @@ BackendParams m_backendParams; uint32_t m_androidWindowWidth; uint32_t m_androidWindowHeight; - vkb::Instance m_instance; - vkb::InstanceDispatchTable m_instanceDispatchTable; - vkb::Device m_device; + std::unique_ptr<rive_vkb::VulkanInstance> m_instance; + std::unique_ptr<rive_vkb::VulkanDevice> m_device; VkSurfaceKHR m_windowSurface = VK_NULL_HANDLE; - std::unique_ptr<rive_vkb::Swapchain> m_swapchain; + std::unique_ptr<rive_vkb::VulkanSwapchain> m_swapchain; std::unique_ptr<RenderContext> m_renderContext; rcp<RenderTargetVulkanImpl> m_renderTarget; rcp<vkutil::Texture2D> m_overflowTexture; // Used when the desired render // size doesn't fit in the window. - const rive_vkb::SwapchainImage* m_swapchainImage = nullptr; + PFN_vkDestroySurfaceKHR m_vkDestroySurfaceKHR = nullptr; }; TestingWindow* TestingWindow::MakeAndroidVulkan(
diff --git a/tests/common/testing_window_vulkan_texture.cpp b/tests/common/testing_window_vulkan_texture.cpp index a2f8bb9..d3fdd91 100644 --- a/tests/common/testing_window_vulkan_texture.cpp +++ b/tests/common/testing_window_vulkan_texture.cpp
@@ -14,7 +14,9 @@ #else #include "common/offscreen_render_target.hpp" -#include "rive_vk_bootstrap/rive_vk_bootstrap.hpp" +#include "rive_vk_bootstrap/vulkan_device.hpp" +#include "rive_vk_bootstrap/vulkan_instance.hpp" +#include "rive_vk_bootstrap/vulkan_headless_frame_synchronizer.hpp" #include "rive/renderer/rive_renderer.hpp" #include "rive/renderer/vulkan/render_context_vulkan_impl.hpp" #include "rive/renderer/vulkan/render_target_vulkan.hpp" @@ -27,94 +29,32 @@ TestingWindowVulkanTexture(const BackendParams& backendParams) : m_backendParams(backendParams) { - rive_vkb::load_vulkan(); + using namespace rive_vkb; - int minorVersionRequested = m_backendParams.core ? 0 : 3; - bool disableValidationLayers = m_backendParams.disableValidationLayers; - bool disableDebugCallbacks = m_backendParams.disableDebugCallbacks; - - while (true) - { - vkb::InstanceBuilder instanceBuilder; - instanceBuilder.set_app_name("rive_tools") - .set_engine_name("Rive Renderer") - .set_headless(true) - .require_api_version(1, m_backendParams.core ? 0 : 3, 0) - .set_minimum_instance_version(1, 0, 0); -#ifdef DEBUG - instanceBuilder.enable_validation_layers(!disableValidationLayers); - if (!disableDebugCallbacks) - { - instanceBuilder.set_debug_callback( - rive_vkb::default_debug_callback); - } + m_instance = std::make_unique<VulkanInstance>(VulkanInstance::Options{ + .appName = "Rive Unit Tests", + .idealAPIVersion = + m_backendParams.core ? VK_API_VERSION_1_0 : VK_API_VERSION_1_3, +#ifndef NDEBUG + .wantValidationLayers = !m_backendParams.disableValidationLayers, + .wantDebugCallbacks = !m_backendParams.disableDebugCallbacks, #endif - auto instanceResult = instanceBuilder.build(); - if (!instanceResult) - { - auto error = static_cast<vkb::InstanceError>( - instanceResult.error().value()); + }); - if (error == - vkb::InstanceError::vulkan_version_1_1_unavailable && - minorVersionRequested != 0) - { - // There's a bug in VkBootstrap (due to not properly - // handling Vulkan 1.0 not having the - // vkEnumerateInstanceVersion function) where it can give a - // vulkan_version_1_1_unavailable error even though we've - // specified a minimum of 1.0. If we get that error, - // request 1.0 directly and try again. - fprintf(stderr, "Falling back on Vulkan 1.0.\n"); - minorVersionRequested = 0; - continue; - } + m_device = std::make_unique<VulkanDevice>( + *m_instance, + VulkanDevice::Options{ + .coreFeaturesOnly = m_backendParams.core, + .gpuNameFilter = m_backendParams.gpuNameFilter.c_str(), + .headless = true, + }); -#ifdef DEBUG - if (!disableValidationLayers && - error == vkb::InstanceError::requested_layers_not_present) - { - fprintf(stderr, - "WARNING: Validation layers not found. Attempting " - "to create a Vulkan context again without " - "validation layers.\n"); - disableValidationLayers = true; - continue; - } - - if (!disableDebugCallbacks && - error == vkb::InstanceError::failed_create_debug_messenger) - { - fprintf(stderr, - "WARNING: Debug callbacks not supported. " - "Attempting to create a Vulkan context again " - "without debug callbacks."); - disableDebugCallbacks = true; - continue; - } -#endif - fprintf(stderr, - "ERROR: %s: Failed to build Vulkan instance.", - instanceResult.error().message().c_str()); - abort(); - } - - m_instance = *instanceResult; - break; - } - - VulkanFeatures vulkanFeatures; - std::tie(m_device, vulkanFeatures) = rive_vkb::select_device( - m_instance, - m_backendParams.core ? rive_vkb::FeatureSet::coreOnly - : rive_vkb::FeatureSet::allAvailable, - m_backendParams.gpuNameFilter.c_str()); m_renderContext = RenderContextVulkanImpl::MakeContext( - m_instance, - m_device.physical_device, - m_device, - vulkanFeatures, - m_instance.fp_vkGetInstanceProcAddr, + m_instance->vkInstance(), + m_device->vkPhysicalDevice(), + m_device->vkDevice(), + m_device->vulkanFeatures(), + m_instance->getVkGetInstanceProcAddrPtr(), { .forceAtomicMode = backendParams.atomic, .shaderCompilationMode = @@ -124,15 +64,12 @@ ~TestingWindowVulkanTexture() { - // Destroy the swapchain first because it synchronizes for in-flight - // command buffers. - m_swapchain = nullptr; + // Destroy the offscreen frame syncrhonizer first because it + // synchronizes for in-flight command buffers. + m_frameSynchronizer = nullptr; m_renderContext.reset(); m_renderTarget.reset(); - - vkb::destroy_device(m_device); - vkb::destroy_instance(m_instance); } rive::Factory* factory() override { return m_renderContext.get(); } @@ -156,32 +93,41 @@ std::unique_ptr<rive::Renderer> beginFrame( const FrameOptions& options) override { - if (m_swapchain == nullptr || m_swapchain->width() != m_width || - m_swapchain->height() != m_height) + if (m_frameSynchronizer == nullptr || + m_frameSynchronizer->width() != m_width || + m_frameSynchronizer->height() != m_height) { - VkFormat swapchainFormat = + VkFormat imageFormat = m_backendParams.srgb ? VK_FORMAT_R8G8B8A8_SRGB : m_backendParams.core ? VK_FORMAT_R8G8B8A8_UNORM : VK_FORMAT_B8G8R8A8_UNORM; // Don't use VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT so we can test our // codepath that makes us work without it. - VkImageUsageFlags additionalUsageFlags = - VK_IMAGE_USAGE_TRANSFER_DST_BIT; + VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | + VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_TRANSFER_DST_BIT; uint64_t currentFrameNumber = - m_swapchain != nullptr ? m_swapchain->currentFrameNumber() : 0; - m_swapchain = - std::make_unique<rive_vkb::Swapchain>(m_device, - ref_rcp(vk()), - m_width, - m_height, - swapchainFormat, - additionalUsageFlags, - currentFrameNumber); - m_renderTarget = - impl()->makeRenderTarget(m_width, - m_height, - m_swapchain->imageFormat(), - m_swapchain->imageUsageFlags()); + m_frameSynchronizer != nullptr + ? m_frameSynchronizer->currentFrameNumber() + : 0; + + m_frameSynchronizer = + std::make_unique<rive_vkb::VulkanHeadlessFrameSynchronizer>( + *m_instance, + *m_device, + ref_rcp(vk()), + rive_vkb::VulkanHeadlessFrameSynchronizer::Options{ + .width = m_width, + .height = m_height, + .imageFormat = imageFormat, + .imageUsageFlags = usageFlags, + .initialFrameNumber = currentFrameNumber, + }); + m_renderTarget = impl()->makeRenderTarget( + m_width, + m_height, + m_frameSynchronizer->imageFormat(), + m_frameSynchronizer->imageUsageFlags()); } rive::gpu::RenderContext::FrameDescriptor frameDescriptor = { @@ -204,30 +150,42 @@ void flushPLSContext(RenderTarget* offscreenRenderTarget) final { - const rive_vkb::SwapchainImage* swapchainImage = - m_swapchain->currentImage(); - if (swapchainImage == nullptr) + if (!m_frameSynchronizer->isFrameStarted()) { - swapchainImage = m_swapchain->acquireNextImage(); - m_renderTarget->setTargetImageView(swapchainImage->imageView, - swapchainImage->image, - swapchainImage->imageLastAccess); + m_frameSynchronizer->beginFrame(); + + m_renderTarget->setTargetImageView( + m_frameSynchronizer->vkImageView(), + m_frameSynchronizer->vkImage(), + m_frameSynchronizer->lastAccess()); } m_renderContext->flush({ .renderTarget = offscreenRenderTarget != nullptr ? offscreenRenderTarget : m_renderTarget.get(), - .externalCommandBuffer = swapchainImage->commandBuffer, - .currentFrameNumber = swapchainImage->currentFrameNumber, - .safeFrameNumber = swapchainImage->safeFrameNumber, + .externalCommandBuffer = + m_frameSynchronizer->currentCommandBuffer(), + .currentFrameNumber = m_frameSynchronizer->currentFrameNumber(), + .safeFrameNumber = m_frameSynchronizer->safeFrameNumber(), }); } void endFrame(std::vector<uint8_t>* pixelData) override { flushPLSContext(nullptr); - m_swapchain->submit(m_renderTarget->targetLastAccess(), pixelData); + auto lastAccess = m_renderTarget->targetLastAccess(); + if (pixelData != nullptr) + { + m_frameSynchronizer->queueImageCopy(&lastAccess); + } + + m_frameSynchronizer->endFrame(lastAccess); + + if (pixelData != nullptr) + { + m_frameSynchronizer->getPixelsFromLastImageCopy(pixelData); + } } private: @@ -239,10 +197,11 @@ VulkanContext* vk() const { return impl()->vulkanContext(); } const BackendParams m_backendParams; - vkb::Instance m_instance; - vkb::Device m_device; + std::unique_ptr<rive_vkb::VulkanInstance> m_instance; + std::unique_ptr<rive_vkb::VulkanDevice> m_device; std::unique_ptr<RenderContext> m_renderContext; - std::unique_ptr<rive_vkb::Swapchain> m_swapchain; + std::unique_ptr<rive_vkb::VulkanHeadlessFrameSynchronizer> + m_frameSynchronizer; rcp<RenderTargetVulkanImpl> m_renderTarget; }; }; // namespace rive::gpu
diff --git a/tests/unit_tests/test.sh b/tests/unit_tests/test.sh index 1edbc36..2ede16e 100755 --- a/tests/unit_tests/test.sh +++ b/tests/unit_tests/test.sh
@@ -77,6 +77,11 @@ TOOLSET_ARG=$1 shift ;; + --with_vulkan) + echo "Vulkan is added" + EXTRA_CONFIG=$EXTRA_CONFIG'--with_vulkan ' + shift + ;; *) # We could pass any unrecognized arguments through instead of just eating them echo "Warning: unrecognized argument '$1'"