Merge pull request #709 from cdavis5e/buffer-no-prefer-dedicated
Don't prefer dedicated allocations for buffer memory.
diff --git a/Docs/Whats_New.md b/Docs/Whats_New.md
index 7000655..9a5e72f 100644
--- a/Docs/Whats_New.md
+++ b/Docs/Whats_New.md
@@ -18,6 +18,7 @@
Released TBD
+- Add support for `VkEvent`, using either native `MTLEvent` or emulation when `MTLEvent` not available.
- Revert to supporting host-coherent memory for linear images on macOS.
- Ensure Vulkan loader magic number is set every time before returning any dispatchable Vulkan handle.
- Fix crash when `VkDeviceCreateInfo` specifies queue families out of numerical order.
diff --git a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
index d67de99..eef5e65 100644
--- a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
+++ b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
@@ -116,7 +116,7 @@
*
* 4. Setting the MVK_ALLOW_METAL_EVENTS runtime environment variable or MoltenVK compile-time build
* setting to 1 will cause MoltenVK to use Metal events, if they are available on the device, for
- * Vulkan sychronization components such as VkSemaphore. This is disabled by default.
+ * for VkSemaphore sychronization behaviour. This is disabled by default.
*/
typedef struct {
@@ -167,7 +167,10 @@
* The initial value or this parameter is set by the
* MVK_CONFIG_SYNCHRONOUS_QUEUE_SUBMITS
* runtime environment variable or MoltenVK compile-time build setting.
- * If neither is set, the value of this parameter defaults to true.
+ * If neither is set, the value of this parameter defaults to true for macOS 10.14
+ * and above or iOS 12 and above, and false otherwise. The reason for this distinction
+ * is that this feature should be disabled when emulation is required to support VkEvents
+ * because native support for events (MTLEvent) is not available.
*/
VkBool32 synchronousQueueSubmits;
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h
index 1cfd37e..1c543aa 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h
@@ -190,6 +190,54 @@
#pragma mark -
+#pragma mark MVKCmdSetResetEvent
+
+/** Vulkan command to set or reset an event. */
+class MVKCmdSetResetEvent : public MVKCommand {
+
+public:
+ void setContent(VkEvent event, VkPipelineStageFlags stageMask, bool status);
+
+ void encode(MVKCommandEncoder* cmdEncoder) override;
+
+ MVKCmdSetResetEvent(MVKCommandTypePool<MVKCmdSetResetEvent>* pool);
+
+private:
+ MVKEvent* _mvkEvent;
+ bool _status;
+
+};
+
+
+#pragma mark -
+#pragma mark MVKCmdWaitEvents
+
+/** Vulkan command to wait for an event to be signaled. */
+class MVKCmdWaitEvents : public MVKCommand {
+
+public:
+ void setContent(uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers);
+
+ void encode(MVKCommandEncoder* cmdEncoder) override;
+
+ MVKCmdWaitEvents(MVKCommandTypePool<MVKCmdWaitEvents>* pool);
+
+private:
+ MVKVectorInline<MVKEvent*, 4> _mvkEvents;
+
+};
+
+
+#pragma mark -
#pragma mark Command creation functions
/** Adds commands to the specified command buffer that insert the specified pipeline barriers. */
@@ -241,3 +289,27 @@
VkPipelineLayout layout,
uint32_t set,
const void* pData);
+
+/** Adds a set event command to the specified command buffer. */
+void mvkCmdSetEvent(MVKCommandBuffer* cmdBuff,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+/** Adds a reset event command to the specified command buffer. */
+void mvkCmdResetEvent(MVKCommandBuffer* cmdBuff,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+
+/** Adds a wait events command to the specified command buffer. */
+void mvkCmdWaitEvents(MVKCommandBuffer* cmdBuff,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers);
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
index c65adce..7cd6e09 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
@@ -347,6 +347,54 @@
#pragma mark -
+#pragma mark MVKCmdSetResetEvent
+
+void MVKCmdSetResetEvent::setContent(VkEvent event, VkPipelineStageFlags stageMask, bool status) {
+ _mvkEvent = (MVKEvent*)event;
+ _status = status;
+}
+
+void MVKCmdSetResetEvent::encode(MVKCommandEncoder* cmdEncoder) {
+ cmdEncoder->signalEvent(_mvkEvent, _status);
+}
+
+MVKCmdSetResetEvent::MVKCmdSetResetEvent(MVKCommandTypePool<MVKCmdSetResetEvent>* pool)
+ : MVKCommand::MVKCommand((MVKCommandTypePool<MVKCommand>*)pool) {}
+
+
+#pragma mark -
+#pragma mark MVKCmdWaitEvents
+
+void MVKCmdWaitEvents::setContent(uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers) {
+
+ _mvkEvents.clear(); // Clear for reuse
+ _mvkEvents.reserve(eventCount);
+ for (uint32_t i = 0; i < eventCount; i++) {
+ _mvkEvents.push_back((MVKEvent*)pEvents[i]);
+ }
+
+}
+
+void MVKCmdWaitEvents::encode(MVKCommandEncoder* cmdEncoder) {
+ for (MVKEvent* mvkEvt : _mvkEvents) {
+ mvkEvt->encodeWait(cmdEncoder->_mtlCmdBuffer);
+ }
+}
+
+MVKCmdWaitEvents::MVKCmdWaitEvents(MVKCommandTypePool<MVKCmdWaitEvents>* pool)
+ : MVKCommand::MVKCommand((MVKCommandTypePool<MVKCommand>*)pool) {}
+
+
+#pragma mark -
#pragma mark Command creation functions
void mvkCmdPipelineBarrier(MVKCommandBuffer* cmdBuff,
@@ -420,3 +468,40 @@
cmd->setContent(descUpdateTemplate, layout, set, pData);
cmdBuff->addCommand(cmd);
}
+
+void mvkCmdSetEvent(MVKCommandBuffer* cmdBuff,
+ VkEvent event,
+ VkPipelineStageFlags stageMask) {
+ MVKCmdSetResetEvent* cmd = cmdBuff->_commandPool->_cmdSetResetEventPool.acquireObject();
+ cmd->setContent(event, stageMask, true);
+ cmdBuff->addCommand(cmd);
+}
+
+void mvkCmdResetEvent(MVKCommandBuffer* cmdBuff,
+ VkEvent event,
+ VkPipelineStageFlags stageMask) {
+ MVKCmdSetResetEvent* cmd = cmdBuff->_commandPool->_cmdSetResetEventPool.acquireObject();
+ cmd->setContent(event, stageMask, false);
+ cmdBuff->addCommand(cmd);
+}
+
+void mvkCmdWaitEvents(MVKCommandBuffer* cmdBuff,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers) {
+ MVKCmdWaitEvents* cmd = cmdBuff->_commandPool->_cmdWaitEventsPool.acquireObject();
+ cmd->setContent(eventCount, pEvents,
+ srcStageMask, dstStageMask,
+ memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers);
+ cmdBuff->addCommand(cmd);
+
+}
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
index ddeae21..bbff428 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
@@ -283,6 +283,9 @@
/** Binds a pipeline to a bind point. */
void bindPipeline(VkPipelineBindPoint pipelineBindPoint, MVKPipeline* pipeline);
+ /** Encodes an operation to signal an event to a status. */
+ void signalEvent(MVKEvent* mvkEvent, bool status);
+
/**
* If a pipeline is currently bound, returns whether the current pipeline permits dynamic
* setting of the specified state. If no pipeline is currently bound, returns true.
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
index f43a495..c8fbbdd 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
@@ -359,6 +359,11 @@
}
}
+void MVKCommandEncoder::signalEvent(MVKEvent* mvkEvent, bool status) {
+ endCurrentMetalEncoding();
+ mvkEvent->encodeSignal(_mtlCmdBuffer, status);
+}
+
bool MVKCommandEncoder::supportsDynamicState(VkDynamicState state) {
MVKGraphicsPipeline* gpl = (MVKGraphicsPipeline*)_graphicsPipelineState.getPipeline();
return !gpl || gpl->supportsDynamicState(state);
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandPool.h b/MoltenVK/MoltenVK/Commands/MVKCommandPool.h
index 45cc0a6..e65989e 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandPool.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandPool.h
@@ -149,6 +149,10 @@
MVKCommandTypePool<MVKCmdDebugMarkerInsert> _cmdDebugMarkerInsertPool;
+ MVKCommandTypePool<MVKCmdSetResetEvent> _cmdSetResetEventPool;
+
+ MVKCommandTypePool<MVKCmdWaitEvents> _cmdWaitEventsPool;
+
#pragma mark Command resources
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm b/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
index 668089f..6c95e70 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
@@ -126,6 +126,8 @@
_cmdDebugMarkerBeginPool.clear();
_cmdDebugMarkerEndPool.clear();
_cmdDebugMarkerInsertPool.clear();
+ _cmdSetResetEventPool.clear();
+ _cmdWaitEventsPool.clear();
}
@@ -180,7 +182,9 @@
_cmdPushSetWithTemplatePool(this),
_cmdDebugMarkerBeginPool(this),
_cmdDebugMarkerEndPool(this),
- _cmdDebugMarkerInsertPool(this)
+ _cmdDebugMarkerInsertPool(this),
+ _cmdSetResetEventPool(this),
+ _cmdWaitEventsPool(this)
// when extending be sure to add to trim() as well
{}
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h
index 3ce7590..17bdb02 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h
@@ -47,6 +47,7 @@
class MVKDeviceMemory;
class MVKFence;
class MVKSemaphore;
+class MVKEvent;
class MVKQueryPool;
class MVKShaderModule;
class MVKPipelineCache;
@@ -445,6 +446,11 @@
void destroySemaphore(MVKSemaphore* mvkSem4,
const VkAllocationCallbacks* pAllocator);
+ MVKEvent* createEvent(const VkEventCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator);
+ void destroyEvent(MVKEvent* mvkEvent,
+ const VkAllocationCallbacks* pAllocator);
+
MVKQueryPool* createQueryPool(const VkQueryPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator);
void destroyQueryPool(MVKQueryPool* mvkQP,
@@ -640,6 +646,11 @@
/** Performance statistics. */
MVKPerformanceStatistics _performanceStatistics;
+ // Indicates whether semaphores should use MTLEvents if available.
+ // Set by the MVK_ALLOW_METAL_EVENTS environment variable if MTLEvents are available.
+ // This should be a temporary fix after some repair to semaphore handling.
+ bool _useMTLEventsForSemaphores;
+
#pragma mark Construction
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
index b1930c2..d3d66a8 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
@@ -128,7 +128,7 @@
auto* portabilityFeatures = (VkPhysicalDevicePortabilitySubsetFeaturesEXTX*)next;
portabilityFeatures->triangleFans = false;
portabilityFeatures->separateStencilMaskRef = true;
- portabilityFeatures->events = false;
+ portabilityFeatures->events = _metalFeatures.events;
portabilityFeatures->standardImageViews = _mvkInstance->getMoltenVKConfiguration()->fullImageViewSwizzle;
portabilityFeatures->samplerMipLodBias = false;
break;
@@ -457,8 +457,8 @@
pSurfaceCapabilities->supportedTransforms = (VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR);
pSurfaceCapabilities->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
pSurfaceCapabilities->supportedCompositeAlpha = (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
- VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR |
- VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR);
+ VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR |
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR);
pSurfaceCapabilities->supportedUsageFlags = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_STORAGE_BIT |
VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
@@ -781,7 +781,7 @@
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_iOS_GPUFamily1_v5] ) {
_metalFeatures.mslVersionEnum = MTLLanguageVersion2_1;
- MVK_SET_FROM_ENV_OR_BUILD_BOOL(_metalFeatures.events, MVK_ALLOW_METAL_EVENTS);
+ _metalFeatures.events = true;
_metalFeatures.textureBuffers = true;
}
@@ -844,7 +844,7 @@
if ( [_mtlDevice supportsFeatureSet: MTLFeatureSet_macOS_GPUFamily1_v4] ) {
_metalFeatures.mslVersionEnum = MTLLanguageVersion2_1;
_metalFeatures.multisampleArrayTextures = true;
- MVK_SET_FROM_ENV_OR_BUILD_BOOL(_metalFeatures.events, MVK_ALLOW_METAL_EVENTS);
+ _metalFeatures.events = true;
_metalFeatures.memoryBarriers = true;
_metalFeatures.textureBuffers = true;
}
@@ -1899,6 +1899,19 @@
mvkSem4->destroy();
}
+MVKEvent* MVKDevice::createEvent(const VkEventCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator) {
+ if (_pMetalFeatures->events) {
+ return new MVKEventNative(this, pCreateInfo);
+ } else {
+ return new MVKEventEmulated(this, pCreateInfo);
+ }
+}
+
+void MVKDevice::destroyEvent(MVKEvent* mvkEvent, const VkAllocationCallbacks* pAllocator) {
+ mvkEvent->destroy();
+}
+
MVKQueryPool* MVKDevice::createQueryPool(const VkQueryPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator) {
switch (pCreateInfo->queryType) {
@@ -2301,6 +2314,11 @@
_pProperties = &_physicalDevice->_properties;
_pMemoryProperties = &_physicalDevice->_memoryProperties;
+ _useMTLEventsForSemaphores = MVK_ALLOW_METAL_EVENTS;
+ if (_pMetalFeatures->events) {
+ MVK_SET_FROM_ENV_OR_BUILD_BOOL(_useMTLEventsForSemaphores, MVK_ALLOW_METAL_EVENTS);
+ }
+
#if MVK_MACOS
// If we have selected a high-power GPU and want to force the window system
// to use it, force the window system to use a high-power GPU by calling the
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
index 379acf5..9cf0cce 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
@@ -1283,7 +1283,7 @@
if (_availability.isAvailable) {
_availability.isAvailable = false;
signal(signaler);
- if (_device->_pMetalFeatures->events) {
+ if (_device->_useMTLEventsForSemaphores) {
// Unfortunately, we can't assume we have an MTLSharedEvent here.
// This means we need to execute a command on the device to signal
// the semaphore. Alternatively, we could always use an MTLSharedEvent,
@@ -1307,7 +1307,7 @@
// Signal either or both of the semaphore and fence in the specified tracker pair.
void MVKSwapchainImage::signal(MVKSwapchainSignaler& signaler) {
- if (signaler.first && !_device->_pMetalFeatures->events) { signaler.first->signal(); }
+ if (signaler.first && !_device->_useMTLEventsForSemaphores) { signaler.first->signal(); }
if (signaler.second) { signaler.second->signal(); }
}
@@ -1364,7 +1364,7 @@
if (scName) { [mtlCmdBuff popDebugGroup]; }
resetMetalSurface();
- if (_device->_pMetalFeatures->events && !_availabilitySignalers.empty()) {
+ if (_device->_useMTLEventsForSemaphores && !_availabilitySignalers.empty()) {
// Signal the semaphore device-side.
_availabilitySignalers.front().first->encodeSignal(mtlCmdBuff);
}
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm b/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm
index 0bb9bef..31558bb 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm
@@ -644,6 +644,21 @@
}
void MVKInstance::initConfig() {
+
+// The default value for MVK_CONFIG_SYNCHRONOUS_QUEUE_SUBMITS actually depends on whether
+// MTLEvents are supported, becuase if MTLEvents are not supported, then synchronous queues
+// should be turned off by default to ensure , whereas if MTLEvents are supported, we want
+// sychronous queues for better behaviour. The app can of course still override this default
+// behaviour by setting the env var, or the config directly.
+#undef MVK_CONFIG_SYNCHRONOUS_QUEUE_SUBMITS
+#define MVK_CONFIG_SYNCHRONOUS_QUEUE_SUBMITS syncQueueSubmits
+#if MVK_MACOS
+ bool syncQueueSubmits = mvkOSVersion() >= 10.14; // Support for MTLEvents
+#endif
+#if MVK_IOS
+ bool syncQueueSubmits = mvkOSVersion() >= 12.0; // Support for MTLEvents
+#endif
+
MVK_SET_FROM_ENV_OR_BUILD_BOOL( _mvkConfig.debugMode, MVK_DEBUG);
MVK_SET_FROM_ENV_OR_BUILD_BOOL( _mvkConfig.shaderConversionFlipVertexY, MVK_CONFIG_SHADER_CONVERSION_FLIP_VERTEX_Y);
MVK_SET_FROM_ENV_OR_BUILD_BOOL( _mvkConfig.synchronousQueueSubmits, MVK_CONFIG_SYNCHRONOUS_QUEUE_SUBMITS);
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
index f0cc7f8..b0b4dc6 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
@@ -224,7 +224,7 @@
MVKDevice* mvkDev = _queue->getDevice();
// If the device supports it, wait for any semaphores on the device.
- if (mvkDev->_pMetalFeatures->events && _isAwaitingSemaphores) {
+ if (mvkDev->_useMTLEventsForSemaphores && _isAwaitingSemaphores) {
_isAwaitingSemaphores = false;
for (auto* ws : _waitSemaphores) {
ws->encodeWait(getActiveMTLCommandBuffer());
@@ -239,7 +239,7 @@
if (_fence || _isSignalingSemaphores) { getActiveMTLCommandBuffer(); }
// If the device supports it, signal all semaphores on the device.
- if (mvkDev->_pMetalFeatures->events && _isSignalingSemaphores) {
+ if (mvkDev->_useMTLEventsForSemaphores && _isSignalingSemaphores) {
_isSignalingSemaphores = false;
for (auto* ss : _signalSemaphores) {
ss->encodeSignal(getActiveMTLCommandBuffer());
@@ -354,7 +354,7 @@
// If there are semaphores and this device supports MTLEvent, we must present
// with a command buffer in order to synchronize with the semaphores.
MVKDevice* mvkDev = _queue->getDevice();
- if (mvkDev->_pMetalFeatures->events && !_waitSemaphores.empty()) {
+ if (mvkDev->_useMTLEventsForSemaphores && !_waitSemaphores.empty()) {
// Create a command buffer, have it wait for the semaphores, then present
// surfaces via the command buffer.
id<MTLCommandBuffer> mtlCmdBuff = getMTLCommandBuffer();
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm b/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
index ac3eabc..cae47c8 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
@@ -226,6 +226,9 @@
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_STORAGE_BIT));
+ if (pCreateInfo->compositeAlpha != VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) {
+ _mtlLayer.opaque = pCreateInfo->compositeAlpha == VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ }
#if MVK_MACOS
switch (pCreateInfo->imageColorSpace) {
case VK_COLOR_SPACE_SRGB_NONLINEAR_KHR:
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSync.h b/MoltenVK/MoltenVK/GPUObjects/MVKSync.h
index 24ac84f..9887002 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKSync.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKSync.h
@@ -60,6 +60,9 @@
*/
bool release();
+ /** Returns whether this instance is in a reserved state. */
+ bool isReserved();
+
/**
* Blocks processing on the current thread until any or all (depending on configuration) outstanding
* reservations have been released, or until the specified timeout interval in nanoseconds expires.
@@ -81,7 +84,7 @@
*
* The waitAll parameter indicates whether a call to the release() function is required
* for each call to the reserve() function (waitAll = true), or whether a single call
- * to the release() function will release all outstanding reservations (waitAll = true).
+ * to the release() function will release all outstanding reservations (waitAll = false).
* This value defaults to true, indicating that each call to the reserve() function will
* require a separate call to the release() function to cause the semaphore to stop blocking.
*/
@@ -247,6 +250,84 @@
#pragma mark -
+#pragma mark MVKEvent
+
+/** Represents a Vulkan semaphore. */
+class MVKEvent : public MVKVulkanAPIDeviceObject {
+
+public:
+
+ /** Returns the Vulkan type of this object. */
+ VkObjectType getVkObjectType() override { return VK_OBJECT_TYPE_EVENT; }
+
+ /** Returns the debug report object type of this object. */
+ VkDebugReportObjectTypeEXT getVkDebugReportObjectType() override { return VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT; }
+
+ /** Returns whether this event is set. */
+ virtual bool isSet() = 0;
+
+ /** Sets the signal status. */
+ virtual void signal(bool status) = 0;
+
+ /** Encodes an operation to signal the event with a status. */
+ virtual void encodeSignal(id<MTLCommandBuffer> mtlCmdBuff, bool status) = 0;
+
+ /** Encodes an operation to block command buffer operation until this event is signaled. */
+ virtual void encodeWait(id<MTLCommandBuffer> mtlCmdBuff) = 0;
+
+
+#pragma mark Construction
+
+ MVKEvent(MVKDevice* device, const VkEventCreateInfo* pCreateInfo) : MVKVulkanAPIDeviceObject(device) {}
+
+protected:
+ void propogateDebugName() override {}
+
+};
+
+
+#pragma mark -
+#pragma mark MVKEventNative
+
+/** An MVKEvent that uses native MTLSharedEvent to provide VkEvent functionality. */
+class MVKEventNative : public MVKEvent {
+
+public:
+ bool isSet() override;
+ void signal(bool status) override;
+ void encodeSignal(id<MTLCommandBuffer> mtlCmdBuff, bool status) override;
+ void encodeWait(id<MTLCommandBuffer> mtlCmdBuff) override;
+
+ MVKEventNative(MVKDevice* device, const VkEventCreateInfo* pCreateInfo);
+
+ ~MVKEventNative() override;
+
+protected:
+ id<MTLSharedEvent> _mtlEvent;
+};
+
+
+#pragma mark -
+#pragma mark MVKEventEmulated
+
+/** An MVKEvent that uses CPU synchronization to provide VkEvent functionality. */
+class MVKEventEmulated : public MVKEvent {
+
+public:
+ bool isSet() override;
+ void signal(bool status) override;
+ void encodeSignal(id<MTLCommandBuffer> mtlCmdBuff, bool status) override;
+ void encodeWait(id<MTLCommandBuffer> mtlCmdBuff) override;
+
+ MVKEventEmulated(MVKDevice* device, const VkEventCreateInfo* pCreateInfo);
+
+protected:
+ MVKSemaphoreImpl _blocker;
+ bool _inlineSignalStatus;
+};
+
+
+#pragma mark -
#pragma mark Support functions
/** Resets the specified fences. */
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm b/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm
index f0a1b1c..9fb29be 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm
@@ -46,6 +46,11 @@
_reservationCount++;
}
+bool MVKSemaphoreImpl::isReserved() {
+ lock_guard<mutex> lock(_lock);
+ return !isClear();
+}
+
bool MVKSemaphoreImpl::wait(uint64_t timeout, bool reserveAgain) {
unique_lock<mutex> lock(_lock);
@@ -97,7 +102,7 @@
MVKSemaphore::MVKSemaphore(MVKDevice* device, const VkSemaphoreCreateInfo* pCreateInfo)
: MVKVulkanAPIDeviceObject(device), _blocker(false, 1), _mtlEvent(nil), _mtlEventValue(1) {
- if (device->_pMetalFeatures->events) {
+ if (device->_useMTLEventsForSemaphores) {
_mtlEvent = [device->getMTLDevice() newEvent];
}
}
@@ -156,6 +161,76 @@
#pragma mark -
+#pragma mark MVKEventNative
+
+// Odd == set / Even == reset.
+bool MVKEventNative::isSet() { return _mtlEvent.signaledValue & 1; }
+
+void MVKEventNative::signal(bool status) {
+ if (isSet() != status) {
+ _mtlEvent.signaledValue += 1;
+ }
+}
+
+void MVKEventNative::encodeSignal(id<MTLCommandBuffer> mtlCmdBuff, bool status) {
+ if (isSet() != status) {
+ [mtlCmdBuff encodeSignalEvent: _mtlEvent value: _mtlEvent.signaledValue + 1];
+ }
+}
+
+void MVKEventNative::encodeWait(id<MTLCommandBuffer> mtlCmdBuff) {
+ if ( !isSet() ) {
+ [mtlCmdBuff encodeWaitForEvent: _mtlEvent value: _mtlEvent.signaledValue + 1];
+ }
+}
+
+MVKEventNative::MVKEventNative(MVKDevice* device, const VkEventCreateInfo* pCreateInfo) : MVKEvent(device, pCreateInfo) {
+ _mtlEvent = [_device->getMTLDevice() newSharedEvent]; // retained
+}
+
+MVKEventNative::~MVKEventNative() {
+ [_mtlEvent release];
+}
+
+
+#pragma mark -
+#pragma mark MVKEventEmulated
+
+bool MVKEventEmulated::isSet() { return !_blocker.isReserved(); }
+
+void MVKEventEmulated::signal(bool status) {
+ if (status) {
+ _blocker.release();
+ } else {
+ _blocker.reserve();
+ }
+}
+
+void MVKEventEmulated::encodeSignal(id<MTLCommandBuffer> mtlCmdBuff, bool status) {
+ if (status) {
+ [mtlCmdBuff addCompletedHandler: ^(id<MTLCommandBuffer> mcb) { _blocker.release(); }];
+ } else {
+ _blocker.reserve();
+ }
+
+ // An encoded signal followed by an encoded wait should cause the wait to be skipped.
+ // However, because encoding a signal will not release the blocker until the command buffer
+ // is finished executing (so the CPU can tell when it really is done) it is possible that
+ // the encoded wait will block when it shouldn't. To avoid that, we keep track of whether
+ // the most recent encoded signal was set or reset, so the next encoded wait knows whether
+ // to really wait or not.
+ _inlineSignalStatus = status;
+}
+
+void MVKEventEmulated::encodeWait(id<MTLCommandBuffer> mtlCmdBuff) {
+ if ( !_inlineSignalStatus ) { _blocker.wait(); }
+}
+
+MVKEventEmulated::MVKEventEmulated(MVKDevice* device, const VkEventCreateInfo* pCreateInfo) :
+ MVKEvent(device, pCreateInfo), _blocker(false, 1), _inlineSignalStatus(false) {}
+
+
+#pragma mark -
#pragma mark Support functions
VkResult mvkResetFences(uint32_t fenceCount, const VkFence* pFences) {
diff --git a/MoltenVK/MoltenVK/Utility/MVKEnvironment.h b/MoltenVK/MoltenVK/Utility/MVKEnvironment.h
index dbbba16..56471ad 100644
--- a/MoltenVK/MoltenVK/Utility/MVKEnvironment.h
+++ b/MoltenVK/MoltenVK/Utility/MVKEnvironment.h
@@ -146,7 +146,7 @@
# define MVK_CONFIG_FORCE_LOW_POWER_GPU 0
#endif
-/** Allow the use of Metal events for Vulkan synchronizations such as VkSemaphores. Disabled by default. */
+/** Allow the use of Metal events for VkSemaphore synchronization behaviour. Disabled by default. */
#ifndef MVK_ALLOW_METAL_EVENTS
# define MVK_ALLOW_METAL_EVENTS 0
#endif
diff --git a/MoltenVK/MoltenVK/Vulkan/vulkan.mm b/MoltenVK/MoltenVK/Vulkan/vulkan.mm
index 3522f87..1604d24 100644
--- a/MoltenVK/MoltenVK/Vulkan/vulkan.mm
+++ b/MoltenVK/MoltenVK/Vulkan/vulkan.mm
@@ -651,9 +651,10 @@
VkEvent* pEvent) {
MVKTraceVulkanCallStart();
- //VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT
MVKDevice* mvkDev = MVKDevice::getMVKDevice(device);
- VkResult rslt = mvkDev->reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateEvent(): Vukan events are not supported.");
+ MVKEvent* mvkEvent = mvkDev->createEvent(pCreateInfo, pAllocator);
+ *pEvent = (VkEvent)mvkEvent;
+ VkResult rslt = mvkEvent->getConfigurationResult();
MVKTraceVulkanCallEnd();
return rslt;
}
@@ -666,7 +667,7 @@
MVKTraceVulkanCallStart();
if ( !event ) { return; }
MVKDevice* mvkDev = MVKDevice::getMVKDevice(device);
- mvkDev->reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkDestroyEvent(): Vukan events are not supported.");
+ mvkDev->destroyEvent((MVKEvent*)event, pAllocator);
MVKTraceVulkanCallEnd();
}
@@ -675,8 +676,8 @@
VkEvent event) {
MVKTraceVulkanCallStart();
- MVKDevice* mvkDev = MVKDevice::getMVKDevice(device);
- VkResult rslt = mvkDev->reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkGetEventStatus(): Vukan events are not supported.");
+ MVKEvent* mvkEvent = (MVKEvent*)event;
+ VkResult rslt = mvkEvent->isSet() ? VK_EVENT_SET : VK_EVENT_RESET;
MVKTraceVulkanCallEnd();
return rslt;
}
@@ -686,10 +687,10 @@
VkEvent event) {
MVKTraceVulkanCallStart();
- MVKDevice* mvkDev = MVKDevice::getMVKDevice(device);
- VkResult rslt = mvkDev->reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkSetEvent(): Vukan events are not supported.");
+ MVKEvent* mvkEvent = (MVKEvent*)event;
+ mvkEvent->signal(true);
MVKTraceVulkanCallEnd();
- return rslt;
+ return VK_SUCCESS;
}
MVK_PUBLIC_SYMBOL VkResult vkResetEvent(
@@ -697,10 +698,10 @@
VkEvent event) {
MVKTraceVulkanCallStart();
- MVKDevice* mvkDev = MVKDevice::getMVKDevice(device);
- VkResult rslt = mvkDev->reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkResetEvent(): Vukan events are not supported.");
+ MVKEvent* mvkEvent = (MVKEvent*)event;
+ mvkEvent->signal(false);
MVKTraceVulkanCallEnd();
- return rslt;
+ return VK_SUCCESS;
}
MVK_PUBLIC_SYMBOL VkResult vkCreateQueryPool(
@@ -1709,7 +1710,7 @@
MVKTraceVulkanCallStart();
MVKCommandBuffer* cmdBuff = MVKCommandBuffer::getMVKCommandBuffer(commandBuffer);
- cmdBuff->reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdSetEvent(): Vukan events are not supported.");
+ mvkCmdSetEvent(cmdBuff, event, stageMask);
MVKTraceVulkanCallEnd();
}
@@ -1720,7 +1721,7 @@
MVKTraceVulkanCallStart();
MVKCommandBuffer* cmdBuff = MVKCommandBuffer::getMVKCommandBuffer(commandBuffer);
- cmdBuff->reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdResetEvent(): Vukan events are not supported.");
+ mvkCmdResetEvent(cmdBuff, event, stageMask);
MVKTraceVulkanCallEnd();
}
@@ -1739,7 +1740,11 @@
MVKTraceVulkanCallStart();
MVKCommandBuffer* cmdBuff = MVKCommandBuffer::getMVKCommandBuffer(commandBuffer);
- cmdBuff->reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdWaitEvents(): Vukan events are not supported.");
+ mvkCmdWaitEvents(cmdBuff, eventCount, pEvents,
+ srcStageMask, dstStageMask,
+ memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers);
MVKTraceVulkanCallEnd();
}
@@ -1757,8 +1762,8 @@
MVKTraceVulkanCallStart();
MVKCommandBuffer* cmdBuff = MVKCommandBuffer::getMVKCommandBuffer(commandBuffer);
- mvkCmdPipelineBarrier(cmdBuff, srcStageMask, dstStageMask,
- dependencyFlags, memoryBarrierCount, pMemoryBarriers,
+ mvkCmdPipelineBarrier(cmdBuff, srcStageMask, dstStageMask, dependencyFlags,
+ memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
MVKTraceVulkanCallEnd();