Merge pull request #1368 from ahelpingchip/fix-docs-typos
Fix minor typos in docs
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
index 6bc4f4e..4829b93 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
@@ -512,6 +512,9 @@
#pragma mark -
#pragma mark Support functions
+/** Returns a name, suitable for use as a MTLCommandBuffer label, based on the MVKCommandUse. */
+NSString* mvkMTLCommandBufferLabel(MVKCommandUse cmdUse);
+
/** Returns a name, suitable for use as a MTLRenderCommandEncoder label, based on the MVKCommandUse. */
NSString* mvkMTLRenderCommandEncoderLabel(MVKCommandUse cmdUse);
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
index 2de0f53..352c475 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
@@ -792,48 +792,61 @@
#pragma mark -
#pragma mark Support functions
+NSString* mvkMTLCommandBufferLabel(MVKCommandUse cmdUse) {
+ switch (cmdUse) {
+ case kMVKCommandUseEndCommandBuffer: return @"vkEndCommandBuffer (Prefilled) CommandBuffer";
+ case kMVKCommandUseQueueSubmit: return @"vkQueueSubmit CommandBuffer";
+ case kMVKCommandUseQueuePresent: return @"vkQueuePresentKHR CommandBuffer";
+ case kMVKCommandUseQueueWaitIdle: return @"vkQueueWaitIdle CommandBuffer";
+ case kMVKCommandUseDeviceWaitIdle: return @"vkDeviceWaitIdle CommandBuffer";
+ case kMVKCommandUseAcquireNextImage: return @"vkAcquireNextImageKHR CommandBuffer";
+ case kMVKCommandUseInvalidateMappedMemoryRanges: return @"vkInvalidateMappedMemoryRanges CommandBuffer";
+ default: return @"Unknown Use CommandBuffer";
+ }
+}
+
NSString* mvkMTLRenderCommandEncoderLabel(MVKCommandUse cmdUse) {
switch (cmdUse) {
- case kMVKCommandUseBeginRenderPass: return @"vkCmdBeginRenderPass RenderEncoder";
- case kMVKCommandUseNextSubpass: return @"vkCmdNextSubpass RenderEncoder";
- case kMVKCommandUseBlitImage: return @"vkCmdBlitImage RenderEncoder";
- case kMVKCommandUseResolveImage: return @"vkCmdResolveImage (resolve stage) RenderEncoder";
- case kMVKCommandUseResolveExpandImage: return @"vkCmdResolveImage (expand stage) RenderEncoder";
- case kMVKCommandUseClearColorImage: return @"vkCmdClearColorImage RenderEncoder";
- case kMVKCommandUseClearDepthStencilImage: return @"vkCmdClearDepthStencilImage RenderEncoder";
- default: return @"Unknown Use RenderEncoder";
+ case kMVKCommandUseBeginRenderPass: return @"vkCmdBeginRenderPass RenderEncoder";
+ case kMVKCommandUseNextSubpass: return @"vkCmdNextSubpass RenderEncoder";
+ case kMVKCommandUseBlitImage: return @"vkCmdBlitImage RenderEncoder";
+ case kMVKCommandUseResolveImage: return @"vkCmdResolveImage (resolve stage) RenderEncoder";
+ case kMVKCommandUseResolveExpandImage: return @"vkCmdResolveImage (expand stage) RenderEncoder";
+ case kMVKCommandUseClearColorImage: return @"vkCmdClearColorImage RenderEncoder";
+ case kMVKCommandUseClearDepthStencilImage: return @"vkCmdClearDepthStencilImage RenderEncoder";
+ default: return @"Unknown Use RenderEncoder";
}
}
NSString* mvkMTLBlitCommandEncoderLabel(MVKCommandUse cmdUse) {
switch (cmdUse) {
- case kMVKCommandUsePipelineBarrier: return @"vkCmdPipelineBarrier BlitEncoder";
- case kMVKCommandUseCopyImage: return @"vkCmdCopyImage BlitEncoder";
- case kMVKCommandUseResolveCopyImage: return @"vkCmdResolveImage (copy stage) RenderEncoder";
- case kMVKCommandUseCopyBuffer: return @"vkCmdCopyBuffer BlitEncoder";
- case kMVKCommandUseCopyBufferToImage: return @"vkCmdCopyBufferToImage BlitEncoder";
- case kMVKCommandUseCopyImageToBuffer: return @"vkCmdCopyImageToBuffer BlitEncoder";
- case kMVKCommandUseFillBuffer: return @"vkCmdFillBuffer BlitEncoder";
- case kMVKCommandUseUpdateBuffer: return @"vkCmdUpdateBuffer BlitEncoder";
- case kMVKCommandUseResetQueryPool: return @"vkCmdResetQueryPool BlitEncoder";
- case kMVKCommandUseCopyQueryPoolResults:return @"vkCmdCopyQueryPoolResults BlitEncoder";
- default: return @"Unknown Use BlitEncoder";
+ case kMVKCommandUsePipelineBarrier: return @"vkCmdPipelineBarrier BlitEncoder";
+ case kMVKCommandUseCopyImage: return @"vkCmdCopyImage BlitEncoder";
+ case kMVKCommandUseResolveCopyImage: return @"vkCmdResolveImage (copy stage) RenderEncoder";
+ case kMVKCommandUseCopyBuffer: return @"vkCmdCopyBuffer BlitEncoder";
+ case kMVKCommandUseCopyBufferToImage: return @"vkCmdCopyBufferToImage BlitEncoder";
+ case kMVKCommandUseCopyImageToBuffer: return @"vkCmdCopyImageToBuffer BlitEncoder";
+ case kMVKCommandUseFillBuffer: return @"vkCmdFillBuffer BlitEncoder";
+ case kMVKCommandUseUpdateBuffer: return @"vkCmdUpdateBuffer BlitEncoder";
+ case kMVKCommandUseResetQueryPool: return @"vkCmdResetQueryPool BlitEncoder";
+ case kMVKCommandUseCopyQueryPoolResults: return @"vkCmdCopyQueryPoolResults BlitEncoder";
+ default: return @"Unknown Use BlitEncoder";
}
}
NSString* mvkMTLComputeCommandEncoderLabel(MVKCommandUse cmdUse) {
switch (cmdUse) {
- case kMVKCommandUseDispatch: return @"vkCmdDispatch ComputeEncoder";
- case kMVKCommandUseCopyBuffer: return @"vkCmdCopyBuffer ComputeEncoder";
- case kMVKCommandUseCopyBufferToImage: return @"vkCmdCopyBufferToImage ComputeEncoder";
- case kMVKCommandUseCopyImageToBuffer: return @"vkCmdCopyImageToBuffer ComputeEncoder";
- case kMVKCommandUseFillBuffer: return @"vkCmdFillBuffer ComputeEncoder";
- case kMVKCommandUseClearColorImage: return @"vkCmdClearColorImage ComputeEncoder";
- case kMVKCommandUseTessellationVertexTessCtl: return @"vkCmdDraw (vertex and tess control stages) ComputeEncoder";
- case kMVKCommandUseMultiviewInstanceCountAdjust: return @"vkCmdDraw (multiview instance count adjustment) ComputeEncoder";
- case kMVKCommandUseCopyQueryPoolResults:return @"vkCmdCopyQueryPoolResults ComputeEncoder";
- case kMVKCommandUseAccumOcclusionQuery: return @"Post-render-pass occlusion query accumulation ComputeEncoder";
- default: return @"Unknown Use ComputeEncoder";
+ case kMVKCommandUseDispatch: return @"vkCmdDispatch ComputeEncoder";
+ case kMVKCommandUseCopyBuffer: return @"vkCmdCopyBuffer ComputeEncoder";
+ case kMVKCommandUseCopyBufferToImage: return @"vkCmdCopyBufferToImage ComputeEncoder";
+ case kMVKCommandUseCopyImageToBuffer: return @"vkCmdCopyImageToBuffer ComputeEncoder";
+ case kMVKCommandUseFillBuffer: return @"vkCmdFillBuffer ComputeEncoder";
+ case kMVKCommandUseClearColorImage: return @"vkCmdClearColorImage ComputeEncoder";
+ case kMVKCommandUseTessellationVertexTessCtl: return @"vkCmdDraw (vertex and tess control stages) ComputeEncoder";
+ case kMVKCommandUseMultiviewInstanceCountAdjust: return @"vkCmdDraw (multiview instance count adjustment) ComputeEncoder";
+ case kMVKCommandUseCopyQueryPoolResults: return @"vkCmdCopyQueryPoolResults ComputeEncoder";
+ case kMVKCommandUseAccumOcclusionQuery: return @"Post-render-pass occlusion query accumulation ComputeEncoder";
+ default: return @"Unknown Use ComputeEncoder";
}
}
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm b/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
index 366ebdf..c219539 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
@@ -78,7 +78,7 @@
}
id<MTLCommandBuffer> MVKCommandPool::newMTLCommandBuffer(uint32_t queueIndex) {
- return [_device->getQueue(_queueFamilyIndex, queueIndex)->getMTLCommandBuffer(true) retain];
+ return [_device->getQueue(_queueFamilyIndex, queueIndex)->getMTLCommandBuffer(kMVKCommandUseEndCommandBuffer, true) retain];
}
// Clear the command type pool member variables.
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm
index d5c3421..c192d5d 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm
@@ -36,7 +36,7 @@
id<MTLFunction> vtxFunc = newFunctionNamed(isLayeredBlit ? "vtxCmdBlitImageLayered" : "vtxCmdBlitImage"); // temp retain
id<MTLFunction> fragFunc = newBlitFragFunction(blitKey); // temp retain
MTLRenderPipelineDescriptor* plDesc = [MTLRenderPipelineDescriptor new]; // temp retain
- plDesc.label = @"CmdBlitImage";
+ plDesc.label = @"vkCmdBlitImage";
plDesc.vertexFunction = vtxFunc;
plDesc.fragmentFunction = fragFunc;
@@ -116,7 +116,7 @@
id<MTLFunction> vtxFunc = newClearVertFunction(attKey); // temp retain
id<MTLFunction> fragFunc = newClearFragFunction(attKey); // temp retain
MTLRenderPipelineDescriptor* plDesc = [MTLRenderPipelineDescriptor new]; // temp retain
- plDesc.label = @"CmdClearAttachments";
+ plDesc.label = @"vkCmdClearAttachments";
plDesc.vertexFunction = vtxFunc;
plDesc.fragmentFunction = fragFunc;
plDesc.sampleCount = attKey.mtlSampleCount;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
index e70de7e..40ee051 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
@@ -2855,7 +2855,7 @@
VkResult rslt = VK_SUCCESS;
for (auto& queues : _queuesByQueueFamilyIndex) {
for (MVKQueue* q : queues) {
- if ((rslt = q->waitIdle()) != VK_SUCCESS) { return rslt; }
+ if ((rslt = q->waitIdle(kMVKCommandUseDeviceWaitIdle)) != VK_SUCCESS) { return rslt; }
}
}
return VK_SUCCESS;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
index 7f65a32..eac900f 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
@@ -109,7 +109,7 @@
#if MVK_MACOS
if (pBlitEnc && _mtlBuffer && _mtlStorageMode == MTLStorageModeManaged) {
- if ( !pBlitEnc->mtlCmdBuffer) { pBlitEnc->mtlCmdBuffer = _device->getAnyQueue()->getMTLCommandBuffer(); }
+ if ( !pBlitEnc->mtlCmdBuffer) { pBlitEnc->mtlCmdBuffer = _device->getAnyQueue()->getMTLCommandBuffer(kMVKCommandUseInvalidateMappedMemoryRanges); }
if ( !pBlitEnc->mtlBlitEncoder) { pBlitEnc->mtlBlitEncoder = [pBlitEnc->mtlCmdBuffer blitCommandEncoder]; }
[pBlitEnc->mtlBlitEncoder synchronizeResource: _mtlBuffer];
}
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
index 094bda9..79e54df 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
@@ -1210,7 +1210,7 @@
@autoreleasepool {
MVKSemaphore* mvkSem = signaler.semaphore;
id<MTLCommandBuffer> mtlCmdBuff = (mvkSem && mvkSem->isUsingCommandEncoding()
- ? _device->getAnyQueue()->getMTLCommandBuffer()
+ ? _device->getAnyQueue()->getMTLCommandBuffer(kMVKCommandUseAcquireNextImage)
: nil);
signal(signaler, mtlCmdBuff);
[mtlCmdBuff commit];
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h
index cc4d3c2..e9c92ee 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h
@@ -89,13 +89,13 @@
#pragma mark Queue submissions
/** Submits the specified command buffers to the queue. */
- VkResult submit(uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
+ VkResult submit(uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence, MVKCommandUse cmdUse);
/** Submits the specified presentation command to the queue. */
VkResult submit(const VkPresentInfoKHR* pPresentInfo);
/** Block the current thread until this queue is idle. */
- VkResult waitIdle();
+ VkResult waitIdle(MVKCommandUse cmdUse);
/** Return the name of this queue. */
const std::string& getName() { return _name; }
@@ -107,7 +107,7 @@
id<MTLCommandQueue> getMTLCommandQueue() { return _mtlQueue; }
/** Returns a Metal command buffer from the Metal queue. */
- id<MTLCommandBuffer> getMTLCommandBuffer(bool retainRefs = false);
+ id<MTLCommandBuffer> getMTLCommandBuffer(MVKCommandUse cmdUse, bool retainRefs = false);
#pragma mark Construction
@@ -193,7 +193,7 @@
public:
void execute() override;
- MVKQueueCommandBufferSubmission(MVKQueue* queue, const VkSubmitInfo* pSubmit, VkFence fence);
+ MVKQueueCommandBufferSubmission(MVKQueue* queue, const VkSubmitInfo* pSubmit, VkFence fence, MVKCommandUse cmdUse);
~MVKQueueCommandBufferSubmission() override;
@@ -209,6 +209,7 @@
MVKSmallVector<std::pair<MVKSemaphore*, uint64_t>> _signalSemaphores;
MVKFence* _fence;
id<MTLCommandBuffer> _activeMTLCommandBuffer;
+ MVKCommandUse _commandUse;
};
@@ -221,7 +222,7 @@
public:
MVKQueueFullCommandBufferSubmission(MVKQueue* queue, const VkSubmitInfo* pSubmit, VkFence fence) :
- MVKQueueCommandBufferSubmission(queue, pSubmit, fence) {
+ MVKQueueCommandBufferSubmission(queue, pSubmit, fence, kMVKCommandUseQueueSubmit) {
// pSubmit can be null if just tracking the fence alone
if (pSubmit) {
@@ -255,7 +256,6 @@
const VkPresentInfoKHR* pPresentInfo);
protected:
- id<MTLCommandBuffer> getMTLCommandBuffer();
void stopAutoGPUCapture();
MVKSmallVector<MVKPresentTimingInfo, 4> _presentInfo;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
index f9556af..22bd1b2 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
@@ -88,11 +88,11 @@
return rslt;
}
-VkResult MVKQueue::submit(uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) {
+VkResult MVKQueue::submit(uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence, MVKCommandUse cmdUse) {
// Fence-only submission
if (submitCount == 0 && fence) {
- return submit(new MVKQueueCommandBufferSubmission(this, nullptr, fence));
+ return submit(new MVKQueueCommandBufferSubmission(this, nullptr, fence, cmdUse));
}
VkResult rslt = VK_SUCCESS;
@@ -129,7 +129,7 @@
}
// Create an empty submit struct and fence, submit to queue and wait on fence.
-VkResult MVKQueue::waitIdle() {
+VkResult MVKQueue::waitIdle(MVKCommandUse cmdUse) {
if (_device->getConfigurationResult() != VK_SUCCESS) { return _device->getConfigurationResult(); }
@@ -143,13 +143,14 @@
// the command submission finishes, so we can't allocate MVKFence locally on the stack.
MVKFence* mvkFence = new MVKFence(_device, &vkFenceInfo);
VkFence vkFence = (VkFence)mvkFence;
- submit(0, nullptr, vkFence);
+ submit(0, nullptr, vkFence, cmdUse);
VkResult rslt = mvkWaitForFences(_device, 1, &vkFence, false);
mvkFence->destroy();
return rslt;
}
-id<MTLCommandBuffer> MVKQueue::getMTLCommandBuffer(bool retainRefs) {
+id<MTLCommandBuffer> MVKQueue::getMTLCommandBuffer(MVKCommandUse cmdUse, bool retainRefs) {
+ id<MTLCommandBuffer> mtlCmdBuff = nil;
#if MVK_XCODE_12
if ([_mtlQueue respondsToSelector: @selector(commandBufferWithDescriptor:)]) {
MTLCommandBufferDescriptor* mtlCmdBuffDesc = [MTLCommandBufferDescriptor new]; // temp retain
@@ -157,16 +158,17 @@
if (mvkConfig().debugMode) {
mtlCmdBuffDesc.errorOptions |= MTLCommandBufferErrorOptionEncoderExecutionStatus;
}
- id<MTLCommandBuffer> cmdBuff = [_mtlQueue commandBufferWithDescriptor: mtlCmdBuffDesc];
+ mtlCmdBuff = [_mtlQueue commandBufferWithDescriptor: mtlCmdBuffDesc];
[mtlCmdBuffDesc release]; // temp release
- return cmdBuff;
} else
#endif
if (retainRefs) {
- return [_mtlQueue commandBuffer];
+ mtlCmdBuff = [_mtlQueue commandBuffer];
} else {
- return [_mtlQueue commandBufferWithUnretainedReferences];
+ mtlCmdBuff = [_mtlQueue commandBufferWithUnretainedReferences];
}
+ setLabelIfNotNil(mtlCmdBuff, mvkMTLCommandBufferLabel(cmdUse));
+ return mtlCmdBuff;
}
@@ -282,7 +284,7 @@
// Returns the active MTLCommandBuffer, lazily retrieving it from the queue if needed.
id<MTLCommandBuffer> MVKQueueCommandBufferSubmission::getActiveMTLCommandBuffer() {
if ( !_activeMTLCommandBuffer ) {
- setActiveMTLCommandBuffer(_queue->getMTLCommandBuffer());
+ setActiveMTLCommandBuffer(_queue->getMTLCommandBuffer(_commandUse));
}
return _activeMTLCommandBuffer;
}
@@ -410,10 +412,12 @@
// retain() each here to ensure they live long enough for this submission to finish using them.
MVKQueueCommandBufferSubmission::MVKQueueCommandBufferSubmission(MVKQueue* queue,
const VkSubmitInfo* pSubmit,
- VkFence fence)
- : MVKQueueSubmission(queue,
- (pSubmit ? pSubmit->waitSemaphoreCount : 0),
- (pSubmit ? pSubmit->pWaitSemaphores : nullptr)) {
+ VkFence fence,
+ MVKCommandUse cmdUse) :
+ MVKQueueSubmission(queue,
+ (pSubmit ? pSubmit->waitSemaphoreCount : 0),
+ (pSubmit ? pSubmit->pWaitSemaphores : nullptr)),
+ _commandUse(cmdUse) {
// pSubmit can be null if just tracking the fence alone
if (pSubmit) {
@@ -466,7 +470,8 @@
// If the semaphores are encodable, wait on them by encoding them on the MTLCommandBuffer before presenting.
// If the semaphores are not encodable, wait on them inline after presenting.
// The semaphores know what to do.
- id<MTLCommandBuffer> mtlCmdBuff = getMTLCommandBuffer();
+ id<MTLCommandBuffer> mtlCmdBuff = _queue->getMTLCommandBuffer(kMVKCommandUseQueuePresent);
+ [mtlCmdBuff enqueue];
for (auto& ws : _waitSemaphores) { ws.first->encodeWait(mtlCmdBuff, 0); }
for (int i = 0; i < _presentInfo.size(); i++ ) {
MVKPresentableSwapchainImage *img = _presentInfo[i].presentableImage;
@@ -484,13 +489,6 @@
this->destroy();
}
-id<MTLCommandBuffer> MVKQueuePresentSurfaceSubmission::getMTLCommandBuffer() {
- id<MTLCommandBuffer> mtlCmdBuff = _queue->getMTLCommandBuffer();
- setLabelIfNotNil(mtlCmdBuff, @"vkQueuePresentKHR CommandBuffer");
- [mtlCmdBuff enqueue];
- return mtlCmdBuff;
-}
-
void MVKQueuePresentSurfaceSubmission::stopAutoGPUCapture() {
if (_queue->_queueFamily->getIndex() == mvkConfig().defaultGPUCaptureScopeQueueFamilyIndex &&
_queue->_index == mvkConfig().defaultGPUCaptureScopeQueueIndex) {
diff --git a/MoltenVK/MoltenVK/Utility/MVKFoundation.h b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
index afbc1c6..e061142 100644
--- a/MoltenVK/MoltenVK/Utility/MVKFoundation.h
+++ b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
@@ -63,32 +63,35 @@
/** Tracks the Vulkan command currently being used. */
typedef enum : uint8_t {
- kMVKCommandUseNone, /**< No use defined. */
- kMVKCommandUseQueueSubmit, /**< vkQueueSubmit. */
- kMVKCommandUseQueuePresent, /**< vkQueuePresentKHR. */
- kMVKCommandUseQueueWaitIdle, /**< vkQueueWaitIdle. */
- kMVKCommandUseDeviceWaitIdle, /**< vkDeviceWaitIdle. */
- kMVKCommandUseBeginRenderPass, /**< vkCmdBeginRenderPass. */
- kMVKCommandUseNextSubpass, /**< vkCmdNextSubpass. */
- kMVKCommandUsePipelineBarrier, /**< vkCmdPipelineBarrier. */
- kMVKCommandUseBlitImage, /**< vkCmdBlitImage. */
- kMVKCommandUseCopyImage, /**< vkCmdCopyImage. */
- kMVKCommandUseResolveImage, /**< vkCmdResolveImage - resolve stage. */
- kMVKCommandUseResolveExpandImage, /**< vkCmdResolveImage - expand stage. */
- kMVKCommandUseResolveCopyImage, /**< vkCmdResolveImage - copy stage. */
- kMVKCommandUseCopyBuffer, /**< vkCmdCopyBuffer. */
- kMVKCommandUseCopyBufferToImage, /**< vkCmdCopyBufferToImage. */
- kMVKCommandUseCopyImageToBuffer, /**< vkCmdCopyImageToBuffer. */
- kMVKCommandUseFillBuffer, /**< vkCmdFillBuffer. */
- kMVKCommandUseUpdateBuffer, /**< vkCmdUpdateBuffer. */
- kMVKCommandUseClearColorImage, /**< vkCmdClearColorImage. */
- kMVKCommandUseClearDepthStencilImage, /**< vkCmdClearDepthStencilImage. */
- kMVKCommandUseResetQueryPool, /**< vkCmdResetQueryPool. */
- kMVKCommandUseDispatch, /**< vkCmdDispatch. */
- kMVKCommandUseTessellationVertexTessCtl,/**< vkCmdDraw* - vertex and tessellation control stages. */
- kMVKCommandUseMultiviewInstanceCountAdjust,/**< vkCmdDrawIndirect* - adjust instance count for multiview. */
- kMVKCommandUseCopyQueryPoolResults, /**< vkCmdCopyQueryPoolResults. */
- kMVKCommandUseAccumOcclusionQuery /**< Any command terminating a Metal render pass with active visibility buffer. */
+ kMVKCommandUseNone, /**< No use defined. */
+ kMVKCommandUseEndCommandBuffer, /**< vkEndCommandBuffer (prefilled VkCommandBuffer). */
+ kMVKCommandUseQueueSubmit, /**< vkQueueSubmit. */
+ kMVKCommandUseAcquireNextImage, /**< vkAcquireNextImageKHR. */
+ kMVKCommandUseQueuePresent, /**< vkQueuePresentKHR. */
+ kMVKCommandUseQueueWaitIdle, /**< vkQueueWaitIdle. */
+ kMVKCommandUseDeviceWaitIdle, /**< vkDeviceWaitIdle. */
+ kMVKCommandUseInvalidateMappedMemoryRanges, /**< vkInvalidateMappedMemoryRanges. */
+ kMVKCommandUseBeginRenderPass, /**< vkCmdBeginRenderPass. */
+ kMVKCommandUseNextSubpass, /**< vkCmdNextSubpass. */
+ kMVKCommandUsePipelineBarrier, /**< vkCmdPipelineBarrier. */
+ kMVKCommandUseBlitImage, /**< vkCmdBlitImage. */
+ kMVKCommandUseCopyImage, /**< vkCmdCopyImage. */
+ kMVKCommandUseResolveImage, /**< vkCmdResolveImage - resolve stage. */
+ kMVKCommandUseResolveExpandImage, /**< vkCmdResolveImage - expand stage. */
+ kMVKCommandUseResolveCopyImage, /**< vkCmdResolveImage - copy stage. */
+ kMVKCommandUseCopyBuffer, /**< vkCmdCopyBuffer. */
+ kMVKCommandUseCopyBufferToImage, /**< vkCmdCopyBufferToImage. */
+ kMVKCommandUseCopyImageToBuffer, /**< vkCmdCopyImageToBuffer. */
+ kMVKCommandUseFillBuffer, /**< vkCmdFillBuffer. */
+ kMVKCommandUseUpdateBuffer, /**< vkCmdUpdateBuffer. */
+ kMVKCommandUseClearColorImage, /**< vkCmdClearColorImage. */
+ kMVKCommandUseClearDepthStencilImage, /**< vkCmdClearDepthStencilImage. */
+ kMVKCommandUseResetQueryPool, /**< vkCmdResetQueryPool. */
+ kMVKCommandUseDispatch, /**< vkCmdDispatch. */
+ kMVKCommandUseTessellationVertexTessCtl, /**< vkCmdDraw* - vertex and tessellation control stages. */
+ kMVKCommandUseMultiviewInstanceCountAdjust, /**< vkCmdDrawIndirect* - adjust instance count for multiview. */
+ kMVKCommandUseCopyQueryPoolResults, /**< vkCmdCopyQueryPoolResults. */
+ kMVKCommandUseAccumOcclusionQuery /**< Any command terminating a Metal render pass with active visibility buffer. */
} MVKCommandUse;
/** Represents a given stage of a graphics pipeline. */
diff --git a/MoltenVK/MoltenVK/Vulkan/vulkan.mm b/MoltenVK/MoltenVK/Vulkan/vulkan.mm
index 2546e84..25eb9d6 100644
--- a/MoltenVK/MoltenVK/Vulkan/vulkan.mm
+++ b/MoltenVK/MoltenVK/Vulkan/vulkan.mm
@@ -367,7 +367,7 @@
MVKTraceVulkanCallStart();
MVKQueue* mvkQ = MVKQueue::getMVKQueue(queue);
- VkResult rslt = mvkQ->submit(submitCount, pSubmits, fence);
+ VkResult rslt = mvkQ->submit(submitCount, pSubmits, fence, kMVKCommandUseQueueSubmit);
MVKTraceVulkanCallEnd();
return rslt;
}
@@ -377,7 +377,7 @@
MVKTraceVulkanCallStart();
MVKQueue* mvkQ = MVKQueue::getMVKQueue(queue);
- VkResult rslt = mvkQ->waitIdle();
+ VkResult rslt = mvkQ->waitIdle(kMVKCommandUseQueueWaitIdle);
MVKTraceVulkanCallEnd();
return rslt;
}