Merge pull request #656 from billhollings/master
Multiple fixes to image copy, BLIT, and resolve (CTS)
diff --git a/Docs/Whats_New.md b/Docs/Whats_New.md
index 18dd946..22a63de 100644
--- a/Docs/Whats_New.md
+++ b/Docs/Whats_New.md
@@ -30,6 +30,10 @@
- Separate `SPIRVToMSLConverterContext` into input config and output results.
- Fix pipeline cache lookups.
- Fix race condition between swapchain image destruction and presentation completion callback.
+- Set Metal texture usage to allow texture copy via view.
+- `vkCmdCopyImage()` support copying between compressed and uncompressed formats
+ and validate that formats are compatible for copying.
+- `vkCmdBufferImageCopy()` fix crash when setting bytes per image in non-arrayed images.
- Document that the functions in `vk_mvk_moltenvk.h` cannot be used with objects
retrieved through the *Vulkan SDK Loader and Layers* framework.
- Update `VK_MVK_MOLTENVK_SPEC_VERSION` to 21.
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
index 5eff3e4..b8e106b 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
@@ -209,7 +209,7 @@
VK_SHADER_STAGE_COMPUTE_BIT
};
for (auto stage : stages) {
- if (mvkAreFlagsEnabled(_stageFlags, stage)) {
+ if (mvkAreAllFlagsEnabled(_stageFlags, stage)) {
cmdEncoder->getPushConstants(stage)->setPushConstants(_offset, _pushConstants);
}
}
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.h b/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.h
index 3e3b007..bd7110a 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.h
@@ -33,17 +33,6 @@
#pragma mark -
#pragma mark MVKCmdCopyImage
-/** Describes the Metal texture copying parameters. */
-typedef struct {
- uint32_t srcLevel;
- uint32_t srcSlice;
- MTLOrigin srcOrigin;
- MTLSize srcSize;
- uint32_t dstLevel;
- uint32_t dstSlice;
- MTLOrigin dstOrigin;
-} MVKMetalCopyTextureRegion;
-
/** Vulkan command to copy image regions. */
class MVKCmdCopyImage : public MVKCommand {
@@ -62,14 +51,29 @@
MVKCommand::MVKCommand((MVKCommandTypePool<MVKCommand>*)pool) {}
protected:
- void addMetalCopyRegions(const VkImageCopy* pRegion);
+ void setContent(VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, MVKCommandUse commandUse);
+ void addImageCopyRegion(const VkImageCopy& region);
+ void addTempBufferImageCopyRegion(const VkImageCopy& region);
MVKImage* _srcImage;
VkImageLayout _srcLayout;
MVKImage* _dstImage;
VkImageLayout _dstLayout;
- std::vector<MVKMetalCopyTextureRegion> _mtlTexCopyRegions;
- MVKCommandUse _commandUse = kMVKCommandUseNone;
+ MTLPixelFormat _srcMTLPixFmt;
+ MTLPixelFormat _dstMTLPixFmt;
+ uint32_t _srcSampleCount;
+ uint32_t _dstSampleCount;
+ bool _isSrcCompressed;
+ bool _isDstCompressed;
+ bool _canCopyFormats;
+ bool _shouldUseTextureView;
+ bool _shouldUseTempBuffer;
+ std::vector<VkImageCopy> _imageCopyRegions;
+ std::vector<VkBufferImageCopy> _srcTmpBuffImgCopies;
+ std::vector<VkBufferImageCopy> _dstTmpBuffImgCopies;
+ size_t _tmpBuffSize;
+ MVKCommandUse _commandUse;
};
@@ -79,14 +83,11 @@
/** Number of vertices in a BLIT rectangle. */
#define kMVKBlitVertexCount 4
-/** Describes Metal texture rendering parameters. */
+/** Combines a VkImageBlit with vertices to render it. */
typedef struct {
- uint32_t srcLevel;
- uint32_t srcSlice;
- uint32_t dstLevel;
- uint32_t dstSlice;
+ VkImageBlit region;
MVKVertexPosTex vertices[kMVKBlitVertexCount];
-} MVKMetalBlitTextureRender;
+} MVKImageBlitRender;
/** Vulkan command to BLIT image regions. */
class MVKCmdBlitImage : public MVKCmdCopyImage {
@@ -108,17 +109,16 @@
~MVKCmdBlitImage() override;
protected:
- bool canCopy(const VkImageBlit* pRegion);
- void addMetalCopyRegions(const VkImageBlit* pRegion);
- void addMetalBlitRenders(const VkImageBlit* pRegion);
- void populateVertices(MVKVertexPosTex* vertices, const VkImageBlit* pRegion);
+ bool canCopy(const VkImageBlit& region);
+ void addImageBlitRegion(const VkImageBlit& region);
+ void addImageCopyRegionFromBlitRegion(const VkImageBlit& region);
+ void populateVertices(MVKVertexPosTex* vertices, const VkImageBlit& region);
void initMTLRenderPassDescriptor();
MTLRenderPassDescriptor* _mtlRenderPassDescriptor;
MTLSamplerMinMagFilter _mtlFilter;
- MTLPixelFormat _mtlPixFmt;
MVKRPSKeyBlitImg _blitKey;
- std::vector<MVKMetalBlitTextureRender> _mtlTexBlitRenders;
+ std::vector<MVKImageBlitRender> _mvkImageBlitRenders;
};
@@ -211,10 +211,12 @@
MVKCommand::MVKCommand((MVKCommandTypePool<MVKCommand>*)pool) {}
protected:
+ bool isArrayTexture();
+
MVKBuffer* _buffer;
MVKImage* _image;
VkImageLayout _imageLayout;
- std::vector<VkBufferImageCopy> _mtlBuffImgCopyRegions;
+ std::vector<VkBufferImageCopy> _bufferImageCopyRegions;
bool _toImage = false;
};
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm b/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm
index db6c561..56ef128 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm
@@ -38,81 +38,169 @@
VkImage dstImage,
VkImageLayout dstImageLayout,
uint32_t regionCount,
- const VkImageCopy* pRegions,
- MVKCommandUse commandUse) {
- _srcImage = (MVKImage*)srcImage;
- _srcLayout = srcImageLayout;
- _dstImage = (MVKImage*)dstImage;
- _dstLayout = dstImageLayout;
- _commandUse = commandUse;
+ const VkImageCopy* pRegions,
+ MVKCommandUse commandUse) {
- // Deterine the total number of texture layers being affected
- uint32_t layerCnt = 0;
- for (uint32_t i = 0; i < regionCount; i++) {
- layerCnt += pRegions[i].srcSubresource.layerCount;
- }
+ setContent(srcImage, srcImageLayout, dstImage, dstImageLayout, commandUse);
- // Add image regions
- _mtlTexCopyRegions.clear(); // Clear for reuse
- _mtlTexCopyRegions.reserve(layerCnt);
for (uint32_t i = 0; i < regionCount; i++) {
- addMetalCopyRegions(&pRegions[i]);
+ addImageCopyRegion(pRegions[i]);
}
- // Validate
- if ((_srcImage->getMTLTextureType() == MTLTextureType3D) != (_dstImage->getMTLTextureType() == MTLTextureType3D)) {
- setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdCopyImage(): Metal does not support copying to or from slices of a 3D texture."));
- }
+ // Validate
+ if ( !_canCopyFormats ) {
+ setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdCopyImage(): Cannot copy between incompatible formats, such as formats of different pixel sizes."));
+ }
+ if ((_srcImage->getMTLTextureType() == MTLTextureType3D) != (_dstImage->getMTLTextureType() == MTLTextureType3D)) {
+ setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdCopyImage(): Metal does not support copying to or from slices of a 3D texture."));
+ }
}
-// Adds a Metal copy region structure for each layer in the specified copy region.
-void MVKCmdCopyImage::addMetalCopyRegions(const VkImageCopy* pRegion) {
+// Sets basic content for use by this class and subclasses
+void MVKCmdCopyImage::setContent(VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ MVKCommandUse commandUse) {
+ _srcImage = (MVKImage*)srcImage;
+ _srcLayout = srcImageLayout;
+ _srcMTLPixFmt = _srcImage->getMTLPixelFormat();
+ _srcSampleCount = mvkSampleCountFromVkSampleCountFlagBits(_srcImage->getSampleCount());
+ _isSrcCompressed = _srcImage->getIsCompressed();
+ uint32_t srcBytesPerBlock = mvkMTLPixelFormatBytesPerBlock(_srcMTLPixFmt);
- MVKMetalCopyTextureRegion mtlImgRgn;
- mtlImgRgn.srcOrigin = mvkMTLOriginFromVkOffset3D(pRegion->srcOffset);
- mtlImgRgn.dstOrigin = mvkMTLOriginFromVkOffset3D(pRegion->dstOffset);
- mtlImgRgn.srcSize = mvkMTLSizeFromVkExtent3D(pRegion->extent);
- mtlImgRgn.srcLevel = pRegion->srcSubresource.mipLevel;
- mtlImgRgn.dstLevel = pRegion->dstSubresource.mipLevel;
+ _dstImage = (MVKImage*)dstImage;
+ _dstLayout = dstImageLayout;
+ _dstMTLPixFmt = _dstImage->getMTLPixelFormat();
+ _dstSampleCount = mvkSampleCountFromVkSampleCountFlagBits(_dstImage->getSampleCount());
+ _isDstCompressed = _dstImage->getIsCompressed();
+ uint32_t dstBytesPerBlock = mvkMTLPixelFormatBytesPerBlock(_dstMTLPixFmt);
- uint32_t srcBaseLayer = pRegion->srcSubresource.baseArrayLayer;
- uint32_t dstBaseLayer = pRegion->dstSubresource.baseArrayLayer;
- uint32_t layCnt = pRegion->srcSubresource.layerCount;
+ _canCopyFormats = (srcBytesPerBlock == dstBytesPerBlock) && (_srcSampleCount == _dstSampleCount);
+ _shouldUseTextureView = (_srcMTLPixFmt != _dstMTLPixFmt) && !(_isSrcCompressed || _isDstCompressed); // Different formats and neither is compressed
+ _shouldUseTempBuffer = (_srcMTLPixFmt != _dstMTLPixFmt) && (_isSrcCompressed || _isDstCompressed); // Different formats and at least one is compressed
- for (uint32_t layIdx = 0; layIdx < layCnt; layIdx++) {
- mtlImgRgn.srcSlice = srcBaseLayer + layIdx;
- mtlImgRgn.dstSlice = dstBaseLayer + layIdx;
- _mtlTexCopyRegions.push_back(mtlImgRgn);
+ _commandUse = commandUse;
+ _tmpBuffSize = 0;
+
+ _imageCopyRegions.clear(); // Clear for reuse
+ _srcTmpBuffImgCopies.clear(); // Clear for reuse
+ _dstTmpBuffImgCopies.clear(); // Clear for reuse
+}
+
+void MVKCmdCopyImage::addImageCopyRegion(const VkImageCopy& region) {
+ if (_shouldUseTempBuffer) {
+ addTempBufferImageCopyRegion(region); // Convert to image->buffer->image copies
+ } else {
+ _imageCopyRegions.push_back(region);
}
}
+// Add an image->buffer copy and buffer->image copy to replace the image->image copy
+void MVKCmdCopyImage::addTempBufferImageCopyRegion(const VkImageCopy& region) {
+ VkBufferImageCopy buffImgCpy;
+
+ // Add copy from source image to temp buffer.
+ buffImgCpy.bufferOffset = _tmpBuffSize;
+ buffImgCpy.bufferRowLength = 0;
+ buffImgCpy.bufferImageHeight = 0;
+ buffImgCpy.imageSubresource = region.srcSubresource;
+ buffImgCpy.imageOffset = region.srcOffset;
+ buffImgCpy.imageExtent = region.extent;
+ _srcTmpBuffImgCopies.push_back(buffImgCpy);
+
+ // Add copy from temp buffer to destination image.
+ // Extent is provided in source texels. If the source is compressed but the
+ // destination is not, each destination pixel will consume an entire source block,
+ // so we must downscale the destination extent by the size of the source block.
+ VkExtent3D dstExtent = region.extent;
+ if (_isSrcCompressed && !_isDstCompressed) {
+ VkExtent2D srcBlockExtent = mvkMTLPixelFormatBlockTexelSize(_srcMTLPixFmt);
+ dstExtent.width /= srcBlockExtent.width;
+ dstExtent.height /= srcBlockExtent.height;
+ }
+ buffImgCpy.bufferOffset = _tmpBuffSize;
+ buffImgCpy.bufferRowLength = 0;
+ buffImgCpy.bufferImageHeight = 0;
+ buffImgCpy.imageSubresource = region.dstSubresource;
+ buffImgCpy.imageOffset = region.dstOffset;
+ buffImgCpy.imageExtent = dstExtent;
+ _dstTmpBuffImgCopies.push_back(buffImgCpy);
+
+ NSUInteger bytesPerRow = mvkMTLPixelFormatBytesPerRow(_srcMTLPixFmt, region.extent.width);
+ NSUInteger bytesPerRegion = mvkMTLPixelFormatBytesPerLayer(_srcMTLPixFmt, bytesPerRow, region.extent.height);
+ _tmpBuffSize += bytesPerRegion;
+}
+
void MVKCmdCopyImage::encode(MVKCommandEncoder* cmdEncoder) {
- id<MTLTexture> srcMTLTex = _srcImage->getMTLTexture();
- id<MTLTexture> dstMTLTex = _dstImage->getMTLTexture();
- if ( !srcMTLTex || !dstMTLTex ) { return; }
+ id<MTLTexture> srcMTLTex = _srcImage->getMTLTexture();
+ id<MTLTexture> dstMTLTex = _dstImage->getMTLTexture();
+ if ( !srcMTLTex || !dstMTLTex ) { return; }
- if (srcMTLTex.pixelFormat != dstMTLTex.pixelFormat &&
- mvkFormatTypeFromVkFormat(_dstImage->getVkFormat()) != kMVKFormatCompressed &&
- mvkFormatTypeFromVkFormat(_srcImage->getVkFormat()) != kMVKFormatCompressed) {
- // If the pixel formats don't match, Metal won't abort, but it won't
- // do the copy either. But we can easily work around that... unless the
- // source format is compressed.
- srcMTLTex = [[srcMTLTex newTextureViewWithPixelFormat: dstMTLTex.pixelFormat] autorelease];
- }
+ // If the pixel formats are different but mappable, use a texture view on the source texture
+ if (_shouldUseTextureView) {
+ srcMTLTex = [[srcMTLTex newTextureViewWithPixelFormat: _dstMTLPixFmt] autorelease];
+ }
- id<MTLBlitCommandEncoder> mtlBlitEnc = cmdEncoder->getMTLBlitEncoder(_commandUse);
+ id<MTLBlitCommandEncoder> mtlBlitEnc = cmdEncoder->getMTLBlitEncoder(_commandUse);
- for (auto& cpyRgn : _mtlTexCopyRegions) {
- [mtlBlitEnc copyFromTexture: srcMTLTex
- sourceSlice: cpyRgn.srcSlice
- sourceLevel: cpyRgn.srcLevel
- sourceOrigin: cpyRgn.srcOrigin
- sourceSize: cpyRgn.srcSize
- toTexture: dstMTLTex
- destinationSlice: cpyRgn.dstSlice
- destinationLevel: cpyRgn.dstLevel
- destinationOrigin: cpyRgn.dstOrigin];
- }
+ // If copies can be performed using direct texture-texture copying, do so
+ for (auto& cpyRgn : _imageCopyRegions) {
+ uint32_t srcLevel = cpyRgn.srcSubresource.mipLevel;;
+ MTLOrigin srcOrigin = mvkMTLOriginFromVkOffset3D(cpyRgn.srcOffset);;
+ MTLSize srcSize = mvkMTLSizeFromVkExtent3D(cpyRgn.extent);
+ uint32_t dstLevel = cpyRgn.dstSubresource.mipLevel;
+ MTLOrigin dstOrigin = mvkMTLOriginFromVkOffset3D(cpyRgn.dstOffset);
+
+ uint32_t srcBaseLayer = cpyRgn.srcSubresource.baseArrayLayer;
+ uint32_t dstBaseLayer = cpyRgn.dstSubresource.baseArrayLayer;
+ uint32_t layCnt = cpyRgn.srcSubresource.layerCount;
+
+ for (uint32_t layIdx = 0; layIdx < layCnt; layIdx++) {
+ [mtlBlitEnc copyFromTexture: srcMTLTex
+ sourceSlice: srcBaseLayer + layIdx
+ sourceLevel: srcLevel
+ sourceOrigin: srcOrigin
+ sourceSize: srcSize
+ toTexture: dstMTLTex
+ destinationSlice: dstBaseLayer + layIdx
+ destinationLevel: dstLevel
+ destinationOrigin: dstOrigin];
+ }
+ }
+
+ // If copies could not be performed directly between images,
+ // use a temporary buffer acting as a waystation between the images.
+ if ( !_srcTmpBuffImgCopies.empty() ) {
+ MVKBufferDescriptorData tempBuffData;
+ tempBuffData.size = _tmpBuffSize;
+ tempBuffData.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+ MVKBuffer* tempBuff = getCommandEncodingPool()->getTransferMVKBuffer(tempBuffData);
+
+ MVKCmdBufferImageCopy cpyCmd(&getCommandPool()->_cmdBufferImageCopyPool);
+
+ // Copy from source image to buffer
+ // Create and execute a temporary buffer image command.
+ // To be threadsafe...do NOT acquire and return the command from the pool.
+ cpyCmd.setContent((VkBuffer) tempBuff,
+ (VkImage) _srcImage,
+ _srcLayout,
+ (uint32_t)_srcTmpBuffImgCopies.size(),
+ _srcTmpBuffImgCopies.data(),
+ false);
+ cpyCmd.encode(cmdEncoder);
+
+ // Copy from buffer to destination image
+ // Create and execute a temporary buffer image command.
+ // To be threadsafe...do NOT acquire and return the command from the pool.
+ cpyCmd.setContent((VkBuffer) tempBuff,
+ (VkImage) _dstImage,
+ _dstLayout,
+ (uint32_t)_dstTmpBuffImgCopies.size(),
+ _dstTmpBuffImgCopies.data(),
+ true);
+ cpyCmd.encode(cmdEncoder);
+ }
}
@@ -125,134 +213,87 @@
VkImageLayout dstImageLayout,
uint32_t regionCount,
const VkImageBlit* pRegions,
- VkFilter filter,
- MVKCommandUse commandUse) {
- _srcImage = (MVKImage*)srcImage;
- _srcLayout = srcImageLayout;
- _dstImage = (MVKImage*)dstImage;
- _dstLayout = dstImageLayout;
+ VkFilter filter,
+ MVKCommandUse commandUse) {
- _mtlPixFmt = _dstImage->getMTLPixelFormat();
- _mtlFilter = mvkMTLSamplerMinMagFilterFromVkFilter(filter);
+ MVKCmdCopyImage::setContent(srcImage, srcImageLayout, dstImage, dstImageLayout, commandUse);
- _blitKey.mtlPixFmt = (uint32_t)_mtlPixFmt;
- _blitKey.mtlTexType = (uint32_t)_srcImage->getMTLTextureType();
+ _mtlFilter = mvkMTLSamplerMinMagFilterFromVkFilter(filter);
- _commandUse = commandUse;
-
- // Determine which regions can be copied and which must be rendered to the destination texture
- bool canCopyRegion[regionCount];
- uint32_t copyRegionCount = 0;
- uint32_t renderRegionCount = 0;
- for (uint32_t i = 0; i < regionCount; i++) {
- const VkImageBlit* pRegion = &pRegions[i];
- uint32_t layCnt = pRegion->srcSubresource.layerCount;
- if ( canCopy(pRegion) && (_srcImage->getMTLPixelFormat() == _mtlPixFmt) ) {
- canCopyRegion[i] = true;
- copyRegionCount += layCnt;
- } else {
- canCopyRegion[i] = false;
- renderRegionCount += layCnt;
- }
- }
-
- // Add copy and BLIT regions accordingly
- _mtlTexCopyRegions.clear(); // Clear for reuse
- _mtlTexCopyRegions.reserve(copyRegionCount);
- _mtlTexBlitRenders.clear(); // Clear for reuse
- _mtlTexBlitRenders.reserve(renderRegionCount);
+ _blitKey.srcMTLPixelFormat = (uint32_t)_srcMTLPixFmt;
+ _blitKey.srcMTLTextureType = (uint32_t)_srcImage->getMTLTextureType();
+ _blitKey.dstMTLPixelFormat = (uint32_t)_dstMTLPixFmt;
+ _blitKey.dstSampleCount = _dstSampleCount;
for (uint32_t i = 0; i < regionCount; i++) {
- const VkImageBlit* pRegion = &pRegions[i];
- if (canCopyRegion[i]) {
- addMetalCopyRegions(pRegion);
- } else {
- addMetalBlitRenders(pRegion);
- }
+ addImageBlitRegion(pRegions[i]);
}
- // Validate
- if (_blitKey.isDepthFormat() && renderRegionCount > 0) {
- setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdBlitImage(): Scaling of depth/stencil images is not supported."));
- }
- if ( !_mtlTexBlitRenders.empty() && mvkMTLPixelFormatIsStencilFormat(_mtlPixFmt)) {
- setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdBlitImage(): Stencil image formats cannot be scaled or inverted."));
- }
+ // Validate
+ if ( !_mvkImageBlitRenders.empty() &&
+ (mvkMTLPixelFormatIsDepthFormat(_srcMTLPixFmt) ||
+ mvkMTLPixelFormatIsStencilFormat(_srcMTLPixFmt)) ) {
+
+ setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCmdBlitImage(): Scaling or inverting depth/stencil images is not supported."));
+ _mvkImageBlitRenders.clear();
+ }
}
-bool MVKCmdBlitImage::canCopy(const VkImageBlit* pRegion) {
- VkOffset3D srcSize = mvkVkOffset3DDifference(pRegion->srcOffsets[1], pRegion->srcOffsets[0]);
- VkOffset3D dstSize = mvkVkOffset3DDifference(pRegion->dstOffsets[1], pRegion->dstOffsets[0]);
+void MVKCmdBlitImage::addImageBlitRegion(const VkImageBlit& region) {
+ if (_canCopyFormats && canCopy(region)) {
+ addImageCopyRegionFromBlitRegion(region); // Convert to image copy
+ } else {
+ MVKImageBlitRender blitRender;
+ blitRender.region = region;
+ populateVertices(blitRender.vertices, region);
+ _mvkImageBlitRenders.push_back(blitRender);
+ }
+}
- // The source and destination sizes must be equal and not be negative in any direction
+// The source and destination sizes must be equal and not be negative in any direction
+bool MVKCmdBlitImage::canCopy(const VkImageBlit& region) {
+ VkOffset3D srcSize = mvkVkOffset3DDifference(region.srcOffsets[1], region.srcOffsets[0]);
+ VkOffset3D dstSize = mvkVkOffset3DDifference(region.dstOffsets[1], region.dstOffsets[0]);
return (mvkVkOffset3DsAreEqual(srcSize, dstSize) &&
(srcSize.x >= 0) && (srcSize.y >= 0) && (srcSize.z >= 0));
}
-// Adds a Metal copy region structure for each layer in the specified BLIT region.
-void MVKCmdBlitImage::addMetalCopyRegions(const VkImageBlit* pRegion) {
+void MVKCmdBlitImage::addImageCopyRegionFromBlitRegion(const VkImageBlit& region) {
+ const VkOffset3D& so0 = region.srcOffsets[0];
+ const VkOffset3D& so1 = region.srcOffsets[1];
- const VkOffset3D* pSo0 = &pRegion->srcOffsets[0];
- const VkOffset3D* pSo1 = &pRegion->srcOffsets[1];
+ VkImageCopy cpyRgn;
+ cpyRgn.srcSubresource = region.srcSubresource;
+ cpyRgn.srcOffset = region.srcOffsets[0];
+ cpyRgn.dstSubresource = region.dstSubresource;
+ cpyRgn.dstOffset = region.dstOffsets[0];
+ cpyRgn.extent.width = so1.x - so0.x;
+ cpyRgn.extent.height = so1.y - so0.y;
+ cpyRgn.extent.depth = so1.z - so0.z;
- MVKMetalCopyTextureRegion mtlImgRgn;
- mtlImgRgn.srcOrigin = mvkMTLOriginFromVkOffset3D(*pSo0);
- mtlImgRgn.dstOrigin = mvkMTLOriginFromVkOffset3D(pRegion->dstOffsets[0]);
- mtlImgRgn.srcSize = MTLSizeMake((pSo1->x - pSo0->x), (pSo1->y - pSo0->y), (pSo1->z - pSo0->z));
- mtlImgRgn.srcLevel = pRegion->srcSubresource.mipLevel;
- mtlImgRgn.dstLevel = pRegion->dstSubresource.mipLevel;
-
- uint32_t srcBaseLayer = pRegion->srcSubresource.baseArrayLayer;
- uint32_t dstBaseLayer = pRegion->dstSubresource.baseArrayLayer;
- uint32_t layCnt = pRegion->srcSubresource.layerCount;
-
- for (uint32_t layIdx = 0; layIdx < layCnt; layIdx++) {
- mtlImgRgn.srcSlice = srcBaseLayer + layIdx;
- mtlImgRgn.dstSlice = dstBaseLayer + layIdx;
- _mtlTexCopyRegions.push_back(mtlImgRgn);
- }
+ MVKCmdCopyImage::addImageCopyRegion(cpyRgn);
}
-// Adds a Metal BLIT render region structure for each layer in the specified BLIT region.
-void MVKCmdBlitImage::addMetalBlitRenders(const VkImageBlit* pRegion) {
-
- MVKMetalBlitTextureRender mtlBlitRndr;
- mtlBlitRndr.srcLevel = pRegion->srcSubresource.mipLevel;
- mtlBlitRndr.dstLevel = pRegion->dstSubresource.mipLevel;
- populateVertices(mtlBlitRndr.vertices, pRegion);
-
- uint32_t srcBaseLayer = pRegion->srcSubresource.baseArrayLayer;
- uint32_t dstBaseLayer = pRegion->dstSubresource.baseArrayLayer;
- uint32_t layCnt = pRegion->srcSubresource.layerCount;
-
- for (uint32_t layIdx = 0; layIdx < layCnt; layIdx++) {
- mtlBlitRndr.srcSlice = srcBaseLayer + layIdx;
- mtlBlitRndr.dstSlice = dstBaseLayer + layIdx;
- _mtlTexBlitRenders.push_back(mtlBlitRndr);
- }
-}
-
-// Populates the vertices in the specified array from the specified region.
-void MVKCmdBlitImage::populateVertices(MVKVertexPosTex* vertices, const VkImageBlit* pRegion) {
- const VkOffset3D* pSo0 = &pRegion->srcOffsets[0];
- const VkOffset3D* pSo1 = &pRegion->srcOffsets[1];
- const VkOffset3D* pDo0 = &pRegion->dstOffsets[0];
- const VkOffset3D* pDo1 = &pRegion->dstOffsets[1];
+void MVKCmdBlitImage::populateVertices(MVKVertexPosTex* vertices, const VkImageBlit& region) {
+ const VkOffset3D& so0 = region.srcOffsets[0];
+ const VkOffset3D& so1 = region.srcOffsets[1];
+ const VkOffset3D& do0 = region.dstOffsets[0];
+ const VkOffset3D& do1 = region.dstOffsets[1];
// Get the extents of the source and destination textures.
- VkExtent3D srcExtent = _srcImage->getExtent3D(pRegion->srcSubresource.mipLevel);
- VkExtent3D dstExtent = _dstImage->getExtent3D(pRegion->dstSubresource.mipLevel);
+ VkExtent3D srcExtent = _srcImage->getExtent3D(region.srcSubresource.mipLevel);
+ VkExtent3D dstExtent = _dstImage->getExtent3D(region.dstSubresource.mipLevel);
// Determine the bottom-left and top-right corners of the source and destination
// texture regions, each as a fraction of the corresponding texture size.
- CGPoint srcBL = CGPointMake((CGFloat)(pSo0->x) / (CGFloat)srcExtent.width,
- (CGFloat)(srcExtent.height - pSo1->y) / (CGFloat)srcExtent.height);
- CGPoint srcTR = CGPointMake((CGFloat)(pSo1->x) / (CGFloat)srcExtent.width,
- (CGFloat)(srcExtent.height - pSo0->y) / (CGFloat)srcExtent.height);
- CGPoint dstBL = CGPointMake((CGFloat)(pDo0->x) / (CGFloat)dstExtent.width,
- (CGFloat)(dstExtent.height - pDo1->y) / (CGFloat)dstExtent.height);
- CGPoint dstTR = CGPointMake((CGFloat)(pDo1->x) / (CGFloat)dstExtent.width,
- (CGFloat)(dstExtent.height - pDo0->y) / (CGFloat)dstExtent.height);
+ CGPoint srcBL = CGPointMake((CGFloat)(so0.x) / (CGFloat)srcExtent.width,
+ (CGFloat)(srcExtent.height - so1.y) / (CGFloat)srcExtent.height);
+ CGPoint srcTR = CGPointMake((CGFloat)(so1.x) / (CGFloat)srcExtent.width,
+ (CGFloat)(srcExtent.height - so0.y) / (CGFloat)srcExtent.height);
+ CGPoint dstBL = CGPointMake((CGFloat)(do0.x) / (CGFloat)dstExtent.width,
+ (CGFloat)(dstExtent.height - do1.y) / (CGFloat)dstExtent.height);
+ CGPoint dstTR = CGPointMake((CGFloat)(do1.x) / (CGFloat)dstExtent.width,
+ (CGFloat)(dstExtent.height - do0.y) / (CGFloat)dstExtent.height);
// The destination region is used for vertex positions,
// which are bounded by (-1.0 < p < 1.0) in clip-space.
@@ -282,7 +323,7 @@
pVtx->position.y = dstTR.y;
pVtx->texCoord.x = srcBL.x;
pVtx->texCoord.y = (1.0 - srcTR.y);
-
+
// Top right vertex
pVtx = &vertices[3];
pVtx->position.x = dstTR.x;
@@ -293,50 +334,56 @@
void MVKCmdBlitImage::encode(MVKCommandEncoder* cmdEncoder) {
- // Perform those BLITs that can be covered by simple texture copying.
- if ( !_mtlTexCopyRegions.empty() ) {
- MVKCmdCopyImage::encode(cmdEncoder);
- }
+ // Perform those BLITs that can be covered by simple texture copying.
+ if ( !_imageCopyRegions.empty() ) {
+ MVKCmdCopyImage::encode(cmdEncoder);
+ }
- // Perform those BLITs that require rendering to destination texture.
- if ( !_mtlTexBlitRenders.empty() && !_blitKey.isDepthFormat() ) {
+ // Perform those BLITs that require rendering to destination texture.
+ if ( !_mvkImageBlitRenders.empty() ) {
- cmdEncoder->endCurrentMetalEncoding();
+ cmdEncoder->endCurrentMetalEncoding();
- id<MTLTexture> srcMTLTex = _srcImage->getMTLTexture();
- id<MTLTexture> dstMTLTex = _dstImage->getMTLTexture();
- if ( !srcMTLTex || !dstMTLTex ) { return; }
+ id<MTLTexture> srcMTLTex = _srcImage->getMTLTexture();
+ id<MTLTexture> dstMTLTex = _dstImage->getMTLTexture();
+ if ( !srcMTLTex || !dstMTLTex ) { return; }
- bool isArrayType = _blitKey.isArrayType();
+ MTLRenderPassColorAttachmentDescriptor* mtlColorAttDesc = _mtlRenderPassDescriptor.colorAttachments[0];
+ mtlColorAttDesc.texture = dstMTLTex;
- MTLRenderPassColorAttachmentDescriptor* mtlColorAttDesc = _mtlRenderPassDescriptor.colorAttachments[0];
- mtlColorAttDesc.texture = dstMTLTex;
+ uint32_t vtxBuffIdx = getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKVertexContentBufferIndex);
- uint32_t vtxBuffIdx = getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKVertexContentBufferIndex);
+ MVKCommandEncodingPool* cmdEncPool = getCommandEncodingPool();
- MVKCommandEncodingPool* cmdEncPool = getCommandEncodingPool();
+ for (auto& bltRend : _mvkImageBlitRenders) {
- for (auto& bltRend : _mtlTexBlitRenders) {
+ mtlColorAttDesc.level = bltRend.region.dstSubresource.mipLevel;
- // Update the render pass descriptor for the texture level and slice, and create a render encoder.
- mtlColorAttDesc.level = bltRend.dstLevel;
- mtlColorAttDesc.slice = bltRend.dstSlice;
- id<MTLRenderCommandEncoder> mtlRendEnc = [cmdEncoder->_mtlCmdBuffer renderCommandEncoderWithDescriptor: _mtlRenderPassDescriptor];
- setLabelIfNotNil(mtlRendEnc, mvkMTLRenderCommandEncoderLabel(_commandUse));
+ uint32_t srcBaseLayer = bltRend.region.srcSubresource.baseArrayLayer;
+ uint32_t dstBaseLayer = bltRend.region.dstSubresource.baseArrayLayer;
- [mtlRendEnc pushDebugGroup: @"vkCmdBlitImage"];
- [mtlRendEnc setRenderPipelineState: cmdEncPool->getCmdBlitImageMTLRenderPipelineState(_blitKey)];
- cmdEncoder->setVertexBytes(mtlRendEnc, bltRend.vertices, sizeof(bltRend.vertices), vtxBuffIdx);
- [mtlRendEnc setFragmentTexture: srcMTLTex atIndex: 0];
- [mtlRendEnc setFragmentSamplerState: cmdEncPool->getCmdBlitImageMTLSamplerState(_mtlFilter) atIndex: 0];
- if (isArrayType) {
- cmdEncoder->setFragmentBytes(mtlRendEnc, &bltRend, sizeof(bltRend), 0);
+ uint32_t layCnt = bltRend.region.srcSubresource.layerCount;
+ for (uint32_t layIdx = 0; layIdx < layCnt; layIdx++) {
+ // Update the render pass descriptor for the texture level and slice, and create a render encoder.
+ mtlColorAttDesc.slice = dstBaseLayer + layIdx;
+ id<MTLRenderCommandEncoder> mtlRendEnc = [cmdEncoder->_mtlCmdBuffer renderCommandEncoderWithDescriptor: _mtlRenderPassDescriptor];
+ setLabelIfNotNil(mtlRendEnc, mvkMTLRenderCommandEncoderLabel(_commandUse));
+
+ [mtlRendEnc pushDebugGroup: @"vkCmdBlitImage"];
+ [mtlRendEnc setRenderPipelineState: cmdEncPool->getCmdBlitImageMTLRenderPipelineState(_blitKey)];
+ cmdEncoder->setVertexBytes(mtlRendEnc, bltRend.vertices, sizeof(bltRend.vertices), vtxBuffIdx);
+
+ [mtlRendEnc setFragmentTexture: srcMTLTex atIndex: 0];
+ [mtlRendEnc setFragmentSamplerState: cmdEncPool->getCmdBlitImageMTLSamplerState(_mtlFilter) atIndex: 0];
+ uint32_t srcSlice = srcBaseLayer + layIdx;
+ cmdEncoder->setFragmentBytes(mtlRendEnc, &srcSlice, sizeof(srcSlice), 0);
+
+ [mtlRendEnc drawPrimitives: MTLPrimitiveTypeTriangleStrip vertexStart: 0 vertexCount: kMVKBlitVertexCount];
+ [mtlRendEnc popDebugGroup];
+ [mtlRendEnc endEncoding];
}
- [mtlRendEnc drawPrimitives: MTLPrimitiveTypeTriangleStrip vertexStart: 0 vertexCount: kMVKBlitVertexCount];
- [mtlRendEnc popDebugGroup];
- [mtlRendEnc endEncoding];
- }
- }
+ }
+ }
}
@@ -397,7 +444,8 @@
addResolveSlices(rslvRgn);
}
- _srcImage->getTransferDescriptorData(_transferImageData);
+ _dstImage->getTransferDescriptorData(_transferImageData);
+ _transferImageData.samples = _srcImage->getSampleCount();
}
/**
@@ -487,7 +535,7 @@
if (expRgnCnt > 0) {
MVKCmdBlitImage expandCmd(&getCommandPool()->_cmdBlitImagePool);
expandCmd.setContent((VkImage)_dstImage, _dstLayout, (VkImage)xfrImage, _dstLayout,
- uint32_t(_expansionRegions.size()), _expansionRegions.data(),
+ expRgnCnt, _expansionRegions.data(),
VK_FILTER_LINEAR, kMVKCommandUseResolveExpandImage);
expandCmd.encode(cmdEncoder);
}
@@ -499,7 +547,7 @@
if (cpyRgnCnt > 0) {
MVKCmdCopyImage copyCmd(&getCommandPool()->_cmdCopyImagePool);
copyCmd.setContent((VkImage)_srcImage, _srcLayout, (VkImage)xfrImage, _dstLayout,
- uint32_t(_copyRegions.size()), _copyRegions.data(), kMVKCommandUseResolveCopyImage);
+ cpyRgnCnt, _copyRegions.data(), kMVKCommandUseResolveCopyImage);
copyCmd.encode(cmdEncoder);
}
@@ -641,10 +689,10 @@
_toImage = toImage;
// Add buffer regions
- _mtlBuffImgCopyRegions.clear(); // Clear for reuse
- _mtlBuffImgCopyRegions.reserve(regionCount);
+ _bufferImageCopyRegions.clear(); // Clear for reuse
+ _bufferImageCopyRegions.reserve(regionCount);
for (uint32_t i = 0; i < regionCount; i++) {
- _mtlBuffImgCopyRegions.push_back(pRegions[i]);
+ _bufferImageCopyRegions.push_back(pRegions[i]);
}
// Validate
@@ -663,7 +711,7 @@
MTLPixelFormat mtlPixFmt = _image->getMTLPixelFormat();
MVKCommandUse cmdUse = _toImage ? kMVKCommandUseCopyBufferToImage : kMVKCommandUseCopyImageToBuffer;
- for (auto& cpyRgn : _mtlBuffImgCopyRegions) {
+ for (auto& cpyRgn : _bufferImageCopyRegions) {
MTLOrigin mtlTxtOrigin = mvkMTLOriginFromVkOffset3D(cpyRgn.imageOffset);
MTLSize mtlTxtSize = mvkMTLSizeFromVkExtent3D(cpyRgn.imageExtent);
@@ -683,8 +731,8 @@
MTLBlitOption blitOptions = MTLBlitOptionNone;
if (mvkMTLPixelFormatIsDepthFormat(mtlPixFmt) && mvkMTLPixelFormatIsStencilFormat(mtlPixFmt)) {
VkImageAspectFlags imgFlags = cpyRgn.imageSubresource.aspectMask;
- bool wantDepth = mvkAreFlagsEnabled(imgFlags, VK_IMAGE_ASPECT_DEPTH_BIT);
- bool wantStencil = mvkAreFlagsEnabled(imgFlags, VK_IMAGE_ASPECT_STENCIL_BIT);
+ bool wantDepth = mvkAreAllFlagsEnabled(imgFlags, VK_IMAGE_ASPECT_DEPTH_BIT);
+ bool wantStencil = mvkAreAllFlagsEnabled(imgFlags, VK_IMAGE_ASPECT_STENCIL_BIT);
// The stencil component is always 1 byte per pixel.
// Don't reduce depths of 32-bit depth/stencil formats.
@@ -708,12 +756,10 @@
#endif
#if MVK_MACOS
- if (_toImage && mvkFormatTypeFromMTLPixelFormat(mtlPixFmt) == kMVKFormatCompressed &&
- mtlTexture.textureType == MTLTextureType3D) {
- // If we're copying to a compressed 3D image, the image data need to be decompressed.
- // If we're copying to mip level 0, we can skip the copy and just decode
- // directly into the image. Otherwise, we need to use an intermediate
- // buffer.
+ // If we're copying to a compressed 3D image, the image data need to be decompressed.
+ // If we're copying to mip level 0, we can skip the copy and just decode
+ // directly into the image. Otherwise, we need to use an intermediate buffer.
+ if (_toImage && _image->getIsCompressed() && mtlTexture.textureType == MTLTextureType3D) {
MVKCmdCopyBufferToImageInfo info;
info.srcRowStride = bytesPerRow & 0xffffffff;
info.srcRowStrideHigh = bytesPerRow >> 32;
@@ -778,6 +824,10 @@
if (!needsTempBuff) { continue; }
}
#endif
+
+ // Don't supply bytes per image if not an arrayed texture
+ if ( !isArrayTexture() ) { bytesPerImg = 0; }
+
id<MTLBlitCommandEncoder> mtlBlitEnc = cmdEncoder->getMTLBlitEncoder(cmdUse);
for (uint32_t lyrIdx = 0; lyrIdx < cpyRgn.imageSubresource.layerCount; lyrIdx++) {
@@ -808,6 +858,16 @@
}
}
+bool MVKCmdBufferImageCopy::isArrayTexture() {
+ MTLTextureType mtlTexType = _image->getMTLTextureType();
+ return (mtlTexType == MTLTextureType3D ||
+ mtlTexType == MTLTextureType2DArray ||
+#if MVK_MACOS
+ mtlTexType == MTLTextureType2DMultisampleArray ||
+#endif
+ mtlTexType == MTLTextureType1DArray);
+}
+
#pragma mark -
#pragma mark MVKCmdClearAttachments
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
index 48fdab7..40dfbd9 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
@@ -42,14 +42,14 @@
_canAcceptCommands = true;
VkCommandBufferUsageFlags usage = pBeginInfo->flags;
- _isReusable = !mvkAreFlagsEnabled(usage, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT);
- _supportsConcurrentExecution = mvkAreFlagsEnabled(usage, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
+ _isReusable = !mvkAreAllFlagsEnabled(usage, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT);
+ _supportsConcurrentExecution = mvkAreAllFlagsEnabled(usage, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
// If this is a secondary command buffer, and contains inheritance info, set the inheritance info and determine
// whether it contains render pass continuation info. Otherwise, clear the inheritance info, and ignore it.
const VkCommandBufferInheritanceInfo* pInheritInfo = (_isSecondary ? pBeginInfo->pInheritanceInfo : NULL);
bool hasInheritInfo = mvkSetOrClear(&_secondaryInheritanceInfo, pInheritInfo);
- _doesContinueRenderPass = mvkAreFlagsEnabled(usage, VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) && hasInheritInfo;
+ _doesContinueRenderPass = mvkAreAllFlagsEnabled(usage, VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) && hasInheritInfo;
return getConfigurationResult();
}
@@ -76,7 +76,7 @@
_initialVisibilityResultMTLBuffer = nil; // not retained
setConfigurationResult(VK_NOT_READY);
- if (mvkAreFlagsEnabled(flags, VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT)) {
+ if (mvkAreAllFlagsEnabled(flags, VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT)) {
// TODO: what are we releasing or returning here?
}
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
index 328ff5c..033f613 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
@@ -272,10 +272,10 @@
if ( !(_cmdEncoder->supportsDynamicState(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK) &&
mvkIsAnyFlagEnabled(faceMask, VK_STENCIL_FRONT_AND_BACK)) ) { return; }
- if (mvkAreFlagsEnabled(faceMask, VK_STENCIL_FACE_FRONT_BIT)) {
+ if (mvkAreAllFlagsEnabled(faceMask, VK_STENCIL_FACE_FRONT_BIT)) {
_depthStencilData.frontFaceStencilData.readMask = stencilCompareMask;
}
- if (mvkAreFlagsEnabled(faceMask, VK_STENCIL_FACE_BACK_BIT)) {
+ if (mvkAreAllFlagsEnabled(faceMask, VK_STENCIL_FACE_BACK_BIT)) {
_depthStencilData.backFaceStencilData.readMask = stencilCompareMask;
}
@@ -289,10 +289,10 @@
if ( !(_cmdEncoder->supportsDynamicState(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK) &&
mvkIsAnyFlagEnabled(faceMask, VK_STENCIL_FRONT_AND_BACK)) ) { return; }
- if (mvkAreFlagsEnabled(faceMask, VK_STENCIL_FACE_FRONT_BIT)) {
+ if (mvkAreAllFlagsEnabled(faceMask, VK_STENCIL_FACE_FRONT_BIT)) {
_depthStencilData.frontFaceStencilData.writeMask = stencilWriteMask;
}
- if (mvkAreFlagsEnabled(faceMask, VK_STENCIL_FACE_BACK_BIT)) {
+ if (mvkAreAllFlagsEnabled(faceMask, VK_STENCIL_FACE_BACK_BIT)) {
_depthStencilData.backFaceStencilData.writeMask = stencilWriteMask;
}
@@ -342,10 +342,10 @@
if ( !(_cmdEncoder->supportsDynamicState(VK_DYNAMIC_STATE_STENCIL_REFERENCE) &&
mvkIsAnyFlagEnabled(faceMask, VK_STENCIL_FRONT_AND_BACK)) ) { return; }
- if (mvkAreFlagsEnabled(faceMask, VK_STENCIL_FACE_FRONT_BIT)) {
+ if (mvkAreAllFlagsEnabled(faceMask, VK_STENCIL_FACE_FRONT_BIT)) {
_frontFaceValue = stencilReference;
}
- if (mvkAreFlagsEnabled(faceMask, VK_STENCIL_FACE_BACK_BIT)) {
+ if (mvkAreAllFlagsEnabled(faceMask, VK_STENCIL_FACE_BACK_BIT)) {
_backFaceValue = stencilReference;
}
@@ -793,7 +793,7 @@
NSUInteger offset = pQueryPool->getVisibilityResultOffset(query);
NSUInteger maxOffset = _cmdEncoder->_pDeviceMetalFeatures->maxQueryBufferSize - kMVKQuerySlotSizeInBytes;
- bool shouldCount = _cmdEncoder->_pDeviceFeatures->occlusionQueryPrecise && mvkAreFlagsEnabled(flags, VK_QUERY_CONTROL_PRECISE_BIT);
+ bool shouldCount = _cmdEncoder->_pDeviceFeatures->occlusionQueryPrecise && mvkAreAllFlagsEnabled(flags, VK_QUERY_CONTROL_PRECISE_BIT);
_mtlVisibilityResultMode = shouldCount ? MTLVisibilityResultModeCounting : MTLVisibilityResultModeBoolean;
_mtlVisibilityResultOffset = min(offset, maxOffset);
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm b/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
index 3bdb118..1c5d51e 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandPool.mm
@@ -33,7 +33,7 @@
// Reset all of the command buffers
VkResult MVKCommandPool::reset(VkCommandPoolResetFlags flags) {
- bool releaseRez = mvkAreFlagsEnabled(flags, VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT);
+ bool releaseRez = mvkAreAllFlagsEnabled(flags, VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT);
VkCommandBufferResetFlags cmdBuffFlags = releaseRez ? VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT : 0;
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.h b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.h
index 55bef82..0749be9 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.h
@@ -35,25 +35,38 @@
* This structure can be used as a key in a std::map and std::unordered_map.
*/
typedef struct MVKRPSKeyBlitImg_t {
- uint16_t mtlPixFmt = 0; /**< MTLPixelFormat */
- uint16_t mtlTexType = 0; /**< MTLTextureType */
+ uint16_t srcMTLPixelFormat = 0; /**< as MTLPixelFormat */
+ uint16_t srcMTLTextureType = 0; /**< as MTLTextureType */
+ uint16_t dstMTLPixelFormat = 0; /**< as MTLPixelFormat */
+ uint16_t dstSampleCount = 0;
bool operator==(const MVKRPSKeyBlitImg_t& rhs) const {
- return ((mtlPixFmt == rhs.mtlPixFmt) && (mtlTexType == rhs.mtlTexType));
+ if (srcMTLPixelFormat != rhs.srcMTLPixelFormat) { return false; }
+ if (srcMTLTextureType != rhs.srcMTLTextureType) { return false; }
+ if (dstMTLPixelFormat != rhs.dstMTLPixelFormat) { return false; }
+ if (dstSampleCount != rhs.dstSampleCount) { return false; }
+ return true;
}
- inline MTLPixelFormat getMTLPixelFormat() { return (MTLPixelFormat)mtlPixFmt; }
+ inline MTLPixelFormat getSrcMTLPixelFormat() { return (MTLPixelFormat)srcMTLPixelFormat; }
- inline bool isDepthFormat() { return mvkMTLPixelFormatIsDepthFormat(getMTLPixelFormat()); }
+ inline MTLPixelFormat getDstMTLPixelFormat() { return (MTLPixelFormat)dstMTLPixelFormat; }
- inline MTLTextureType getMTLTextureType() { return (MTLTextureType)mtlTexType; }
-
- inline bool isArrayType() { return (mtlTexType == MTLTextureType2DArray) || (mtlTexType == MTLTextureType1DArray); }
+ inline bool isSrcArrayType() {
+ return (srcMTLTextureType == MTLTextureType2DArray ||
+#if MVK_MACOS
+ srcMTLTextureType == MTLTextureType2DMultisampleArray ||
+#endif
+ srcMTLTextureType == MTLTextureType1DArray); }
std::size_t hash() const {
- std::size_t hash = mtlTexType;
+ std::size_t hash = srcMTLPixelFormat;
hash <<= 16;
- hash |= mtlPixFmt;
+ hash |= srcMTLTextureType;
+ hash <<= 16;
+ hash |= dstMTLPixelFormat;
+ hash <<= 16;
+ hash |= dstSampleCount;
return hash;
}
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm
index 29d56a7..bf86b8f 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandResourceFactory.mm
@@ -38,8 +38,9 @@
plDesc.vertexFunction = getFunctionNamed("vtxCmdBlitImage");
plDesc.fragmentFunction = getBlitFragFunction(blitKey);
+ plDesc.sampleCount = blitKey.dstSampleCount;
- plDesc.colorAttachments[0].pixelFormat = blitKey.getMTLPixelFormat();
+ plDesc.colorAttachments[0].pixelFormat = blitKey.getDstMTLPixelFormat();
MTLVertexDescriptor* vtxDesc = plDesc.vertexDescriptor;
@@ -132,11 +133,11 @@
id<MTLFunction> MVKCommandResourceFactory::getBlitFragFunction(MVKRPSKeyBlitImg& blitKey) {
id<MTLFunction> mtlFunc = nil;
- NSString* typeStr = getMTLFormatTypeString(blitKey.getMTLPixelFormat());
+ NSString* typeStr = getMTLFormatTypeString(blitKey.getSrcMTLPixelFormat());
- bool isArrayType = blitKey.isArrayType();
+ bool isArrayType = blitKey.isSrcArrayType();
NSString* arraySuffix = isArrayType ? @"_array" : @"";
- NSString* sliceArg = isArrayType ? @", blitInfo.srcSlice" : @"";
+ NSString* sliceArg = isArrayType ? @", srcSlice" : @"";
@autoreleasepool {
NSMutableString* msl = [NSMutableString stringWithCapacity: (2 * KIBI) ];
@@ -148,27 +149,14 @@
[msl appendLineMVK: @" float2 v_texCoord;"];
[msl appendLineMVK: @"} VaryingsPosTex;"];
[msl appendLineMVK];
- if (isArrayType) {
- [msl appendLineMVK: @"typedef struct {"];
- [msl appendLineMVK: @" uint srcLevel;"];
- [msl appendLineMVK: @" uint srcSlice;"];
- [msl appendLineMVK: @" uint dstLevel;"];
- [msl appendLineMVK: @" uint dstSlice;"];
- [msl appendLineMVK: @"} BlitInfo;"];
- [msl appendLineMVK];
- }
NSString* funcName = @"fragBlit";
[msl appendFormat: @"fragment %@4 %@(VaryingsPosTex varyings [[stage_in]],", typeStr, funcName];
[msl appendLineMVK];
[msl appendFormat: @" texture2d%@<%@> texture [[texture(0)]],", arraySuffix, typeStr];
[msl appendLineMVK];
- if (isArrayType) {
- [msl appendLineMVK: @" sampler sampler [[sampler(0)]],"];
- [msl appendLineMVK: @" constant BlitInfo& blitInfo [[buffer(0)]]) {"];
- } else {
- [msl appendLineMVK: @" sampler sampler [[sampler(0)]]) {"];
- }
+ [msl appendLineMVK: @" sampler sampler [[sampler(0)]],"];
+ [msl appendLineMVK: @" constant uint& srcSlice [[buffer(0)]]) {"];
[msl appendFormat: @" return texture.sample(sampler, varyings.v_texCoord%@);", sliceArg];
[msl appendLineMVK];
[msl appendLineMVK: @"}"];
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
index 832f460..1396bac 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
@@ -409,7 +409,7 @@
for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
// Determine if this binding is used by this shader stage
- _applyToStage[i] = mvkAreFlagsEnabled(pBinding->stageFlags, mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)));
+ _applyToStage[i] = mvkAreAllFlagsEnabled(pBinding->stageFlags, mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)));
// If this binding is used by the shader, set the Metal resource index
if (_applyToStage[i]) {
initMetalResourceIndexOffsets(&_mtlResourceIndexOffsets.stages[i],
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
index 58647c0..e697ab8 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
@@ -221,6 +221,7 @@
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
+ MVKFormatType mvkFmt = mvkFormatTypeFromVkFormat(format);
bool hasAttachmentUsage = mvkIsAnyFlagEnabled(usage, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT |
@@ -241,8 +242,7 @@
if (tiling == VK_IMAGE_TILING_LINEAR) { return VK_ERROR_FORMAT_NOT_SUPPORTED; }
// Metal does not allow compressed or depth/stencil formats on 1D textures
- if (mvkFormatTypeFromVkFormat(format) == kMVKFormatDepthStencil ||
- mvkFormatTypeFromVkFormat(format) == kMVKFormatCompressed) {
+ if (mvkFmt == kMVKFormatDepthStencil || mvkFmt == kMVKFormatCompressed) {
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
maxExt.width = pLimits->maxImageDimension1D;
@@ -263,8 +263,7 @@
if (tiling == VK_IMAGE_TILING_LINEAR) {
// Linear textures have additional restrictions under Metal:
// - They may not be depth/stencil or compressed textures.
- if (mvkFormatTypeFromVkFormat(format) == kMVKFormatDepthStencil ||
- mvkFormatTypeFromVkFormat(format) == kMVKFormatCompressed) {
+ if (mvkFmt == kMVKFormatDepthStencil || mvkFmt == kMVKFormatCompressed) {
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
#if MVK_MACOS
@@ -281,7 +280,7 @@
// Compressed multisampled textures aren't supported.
// Multisampled cube textures aren't supported.
// Non-renderable multisampled textures aren't supported.
- if (mvkFormatTypeFromVkFormat(format) == kMVKFormatCompressed ||
+ if (mvkFmt == kMVKFormatCompressed ||
mvkIsAnyFlagEnabled(flags, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) ||
!mvkIsAnyFlagEnabled(fmtProps.optimalTilingFeatures, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT|VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) ) {
sampleCounts = VK_SAMPLE_COUNT_1_BIT;
@@ -295,18 +294,17 @@
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
// Metal does not allow compressed or depth/stencil formats on 3D textures
- if (mvkFormatTypeFromVkFormat(format) == kMVKFormatDepthStencil
+ if (mvkFmt == kMVKFormatDepthStencil
#if MVK_IOS
- || mvkFormatTypeFromVkFormat(format) == kMVKFormatCompressed
+ || mvkFmt == kMVKFormatCompressed
#endif
) {
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
#if MVK_MACOS
- if (mvkFormatTypeFromVkFormat(format) == kMVKFormatCompressed) {
- // If this is a compressed format and there's no codec, it isn't
- // supported.
- if (!mvkCanDecodeFormat(format) ) { return VK_ERROR_FORMAT_NOT_SUPPORTED; }
+ // If this is a compressed format and there's no codec, it isn't supported.
+ if ((mvkFmt == kMVKFormatCompressed) && !mvkCanDecodeFormat(format)) {
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
#endif
maxExt.width = pLimits->maxImageDimension3D;
@@ -321,8 +319,7 @@
if (tiling == VK_IMAGE_TILING_LINEAR) { return VK_ERROR_FORMAT_NOT_SUPPORTED; }
// Metal does not allow compressed or depth/stencil formats on anything but 2D textures
- if (mvkFormatTypeFromVkFormat(format) == kMVKFormatDepthStencil ||
- mvkFormatTypeFromVkFormat(format) == kMVKFormatCompressed) {
+ if (mvkFmt == kMVKFormatDepthStencil || mvkFmt == kMVKFormatCompressed) {
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
maxExt = { 1, 1, 1};
@@ -1814,7 +1811,7 @@
// See if this pipeline has a parent. This can come either directly
// via basePipelineHandle or indirectly via basePipelineIndex.
MVKPipeline* parentPL = VK_NULL_HANDLE;
- if ( mvkAreFlagsEnabled(pCreateInfo->flags, VK_PIPELINE_CREATE_DERIVATIVE_BIT) ) {
+ if ( mvkAreAllFlagsEnabled(pCreateInfo->flags, VK_PIPELINE_CREATE_DERIVATIVE_BIT) ) {
VkPipeline vkParentPL = pCreateInfo->basePipelineHandle;
int32_t parentPLIdx = pCreateInfo->basePipelineIndex;
if ( !vkParentPL && (parentPLIdx >= 0)) { vkParentPL = pPipelines[parentPLIdx]; }
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
index db47a1d..7403d03 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
@@ -61,6 +61,9 @@
/** Returns the Vulkan image format of this image. */
VkFormat getVkFormat();
+ /** Returns whether this texture is compressed. */
+ bool getIsCompressed();
+
/**
* Returns the 3D extent of this image at the base mipmap level.
* For 2D or cube images, the Z component will be 1.
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
index cc977a5..c08c036 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
@@ -41,6 +41,10 @@
VkFormat MVKImage::getVkFormat() { return mvkVkFormatFromMTLPixelFormat(_mtlPixelFormat); }
+bool MVKImage::getIsCompressed() {
+ return mvkFormatTypeFromMTLPixelFormat(_mtlPixelFormat) == kMVKFormatCompressed;
+}
+
VkExtent3D MVKImage::getExtent3D(uint32_t mipLevel) {
return mvkMipmapLevelSizeFromBaseSize3D(_extent, mipLevel);
}
@@ -163,7 +167,7 @@
#endif
#if MVK_IOS
// Only transient attachments may use memoryless storage
- if (!mvkAreFlagsEnabled(_usage, VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) ) {
+ if (!mvkAreAllFlagsEnabled(_usage, VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) ) {
mvkDisableFlag(pMemoryRequirements->memoryTypeBits, _device->getPhysicalDevice()->getLazilyAllocatedMemoryTypes());
}
#endif
@@ -373,16 +377,15 @@
}
MTLTextureUsage MVKImage::getMTLTextureUsage() {
+
MTLTextureUsage usage = mvkMTLTextureUsageFromVkImageUsageFlags(_usage);
- // If this is a depth/stencil texture, and the device supports it, tell
- // Metal we may create texture views of this, too.
- if ((_mtlPixelFormat == MTLPixelFormatDepth32Float_Stencil8
-#if MVK_MACOS
- || _mtlPixelFormat == MTLPixelFormatDepth24Unorm_Stencil8
-#endif
- ) && _device->_pMetalFeatures->stencilViews) {
- mvkEnableFlag(usage, MTLTextureUsagePixelFormatView);
+ // Remove view usage from D/S if Metal doesn't support it
+ if ( !_device->_pMetalFeatures->stencilViews &&
+ mvkMTLPixelFormatIsDepthFormat(_mtlPixelFormat) &&
+ mvkMTLPixelFormatIsStencilFormat(_mtlPixelFormat)) {
+
+ mvkDisableFlag(usage, MTLTextureUsagePixelFormatView);
}
// If this format doesn't support being blitted to, and the usage
@@ -390,7 +393,7 @@
// MTLTextureUsageRenderTarget.
VkFormatProperties props;
_device->getPhysicalDevice()->getFormatProperties(getVkFormat(), &props);
- if (!mvkAreFlagsEnabled(_isLinear ? props.linearTilingFeatures : props.optimalTilingFeatures, VK_FORMAT_FEATURE_BLIT_DST_BIT) &&
+ if (!mvkAreAllFlagsEnabled(_isLinear ? props.linearTilingFeatures : props.optimalTilingFeatures, VK_FORMAT_FEATURE_BLIT_DST_BIT) &&
!mvkIsAnyFlagEnabled(_usage, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
mvkDisableFlag(usage, MTLTextureUsageRenderTarget);
}
@@ -579,8 +582,8 @@
_usage = pCreateInfo->usage;
_is3DCompressed = (pCreateInfo->imageType == VK_IMAGE_TYPE_3D) && (mvkFormatTypeFromVkFormat(pCreateInfo->format) == kMVKFormatCompressed);
- _isDepthStencilAttachment = (mvkAreFlagsEnabled(pCreateInfo->usage, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ||
- mvkAreFlagsEnabled(mvkVkFormatProperties(pCreateInfo->format).optimalTilingFeatures, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT));
+ _isDepthStencilAttachment = (mvkAreAllFlagsEnabled(pCreateInfo->usage, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ||
+ mvkAreAllFlagsEnabled(mvkVkFormatProperties(pCreateInfo->format).optimalTilingFeatures, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT));
_canSupportMTLTextureView = !_isDepthStencilAttachment || _device->_pMetalFeatures->stencilViews;
_hasExpectedTexelSize = (mvkMTLPixelFormatBytesPerBlock(_mtlPixelFormat) == mvkVkFormatBytesPerBlock(pCreateInfo->format));
@@ -890,7 +893,7 @@
if (pCreateInfo->subresourceRange.layerCount != image->_extent.depth) {
reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImageView(): Metal does not fully support views on a subset of a 3D texture.");
}
- if (!mvkAreFlagsEnabled(_usage, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
+ if (!mvkAreAllFlagsEnabled(_usage, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImageView(): 2D views on 3D images are only supported for color attachments."));
} else if (mvkIsAnyFlagEnabled(_usage, ~VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImageView(): 2D views on 3D images are only supported for color attachments.");
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
index 237b1b0..d3978df 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
@@ -1309,7 +1309,7 @@
MVKMTLFunction MVKComputePipeline::getMTLFunction(const VkComputePipelineCreateInfo* pCreateInfo) {
const VkPipelineShaderStageCreateInfo* pSS = &pCreateInfo->stage;
- if ( !mvkAreFlagsEnabled(pSS->stage, VK_SHADER_STAGE_COMPUTE_BIT) ) { return MVKMTLFunctionNull; }
+ if ( !mvkAreAllFlagsEnabled(pSS->stage, VK_SHADER_STAGE_COMPUTE_BIT) ) { return MVKMTLFunctionNull; }
SPIRVToMSLConversionConfiguration shaderContext;
shaderContext.options.entryPointName = pCreateInfo->stage.pName;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm b/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
index 1b5e77b..0090244 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
@@ -71,7 +71,7 @@
uint32_t endQuery = firstQuery + queryCount;
- if (mvkAreFlagsEnabled(flags, VK_QUERY_RESULT_WAIT_BIT)) {
+ if (mvkAreAllFlagsEnabled(flags, VK_QUERY_RESULT_WAIT_BIT)) {
_availabilityBlocker.wait(lock, [this, firstQuery, endQuery]{
return areQueriesHostAvailable(firstQuery, endQuery);
});
@@ -104,14 +104,14 @@
VkResult MVKQueryPool::getResult(uint32_t query, void* pQryData, VkQueryResultFlags flags) {
bool isAvailable = _availability[query] == Available;
- bool shouldOutput = (isAvailable || mvkAreFlagsEnabled(flags, VK_QUERY_RESULT_PARTIAL_BIT));
- bool shouldOutput64Bit = mvkAreFlagsEnabled(flags, VK_QUERY_RESULT_64_BIT);
+ bool shouldOutput = (isAvailable || mvkAreAllFlagsEnabled(flags, VK_QUERY_RESULT_PARTIAL_BIT));
+ bool shouldOutput64Bit = mvkAreAllFlagsEnabled(flags, VK_QUERY_RESULT_64_BIT);
// Output the results of this query
if (shouldOutput) { getResult(query, pQryData, shouldOutput64Bit); }
// If requested, output the availability bit
- if (mvkAreFlagsEnabled(flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
+ if (mvkAreAllFlagsEnabled(flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
if (shouldOutput64Bit) {
uintptr_t pAvailability = (uintptr_t)pQryData + (_queryElementCount * sizeof(uint64_t));
*(uint64_t*)pAvailability = isAvailable;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm b/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
index 42a3b8b..992f1cb 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
@@ -268,7 +268,7 @@
.flags = 0,
};
- if (mvkAreFlagsEnabled(pCreateInfo->flags, VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR)) {
+ if (mvkAreAllFlagsEnabled(pCreateInfo->flags, VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR)) {
mvkEnableFlag(imgInfo.flags, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT);
}
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSync.h b/MoltenVK/MoltenVK/GPUObjects/MVKSync.h
index 1fa52cf..fa5f647 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKSync.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKSync.h
@@ -197,7 +197,7 @@
#pragma mark Construction
MVKFence(MVKDevice* device, const VkFenceCreateInfo* pCreateInfo) :
- MVKVulkanAPIDeviceObject(device), _isSignaled(mvkAreFlagsEnabled(pCreateInfo->flags, VK_FENCE_CREATE_SIGNALED_BIT)) {}
+ MVKVulkanAPIDeviceObject(device), _isSignaled(mvkAreAllFlagsEnabled(pCreateInfo->flags, VK_FENCE_CREATE_SIGNALED_BIT)) {}
protected:
void propogateDebugName() override {}
diff --git a/MoltenVK/MoltenVK/Utility/MVKFoundation.h b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
index f6c123b..0fddedd 100644
--- a/MoltenVK/MoltenVK/Utility/MVKFoundation.h
+++ b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
@@ -472,19 +472,19 @@
template<typename T1, typename T2>
void mvkDisableFlag(T1& value, const T2 bitMask) { value &= ~bitMask; }
-/** Returns whether the specified value has ALL of the flags specified in bitMask enabled (set to 1). */
-template<typename T1, typename T2>
-bool mvkAreFlagsEnabled(T1 value, const T2 bitMask) { return ((value & bitMask) == bitMask); }
-
/** Returns whether the specified value has ANY of the flags specified in bitMask enabled (set to 1). */
template<typename T1, typename T2>
bool mvkIsAnyFlagEnabled(T1 value, const T2 bitMask) { return !!(value & bitMask); }
-/** Returns whether the specified value has ONLY ALL of the flags specified in bitMask enabled (set to 1), and none others. */
+/** Returns whether the specified value has ALL of the flags specified in bitMask enabled (set to 1). */
template<typename T1, typename T2>
-bool mvkAreOnlyAllFlagsEnabled(T1 value, const T2 bitMask) { return (value == bitMask); }
+bool mvkAreAllFlagsEnabled(T1 value, const T2 bitMask) { return ((value & bitMask) == bitMask); }
/** Returns whether the specified value has ONLY one or more of the flags specified in bitMask enabled (set to 1), and none others. */
template<typename T1, typename T2>
-bool mvkAreOnlyAnyFlagsEnabled(T1 value, const T2 bitMask) { return (mvkIsAnyFlagEnabled(value, bitMask) && ((value | bitMask) == bitMask)); }
+bool mvkIsOnlyAnyFlagEnabled(T1 value, const T2 bitMask) { return (mvkIsAnyFlagEnabled(value, bitMask) && ((value | bitMask) == bitMask)); }
+
+/** Returns whether the specified value has ONLY ALL of the flags specified in bitMask enabled (set to 1), and none others. */
+template<typename T1, typename T2>
+bool mvkAreOnlyAllFlagsEnabled(T1 value, const T2 bitMask) { return (value == bitMask); }
diff --git a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
index 0cdf559..4e4390e 100644
--- a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
+++ b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
@@ -809,64 +809,67 @@
}
MVK_PUBLIC_SYMBOL VkImageType mvkVkImageTypeFromMTLTextureType(MTLTextureType mtlTextureType) {
- switch (mtlTextureType) {
- case MTLTextureType1D: return VK_IMAGE_TYPE_1D;
- case MTLTextureType1DArray: return VK_IMAGE_TYPE_1D;
- case MTLTextureType3D: return VK_IMAGE_TYPE_3D;
- default: return VK_IMAGE_TYPE_2D;
- }
+ switch (mtlTextureType) {
+ case MTLTextureType1D:
+ case MTLTextureType1DArray:
+ return VK_IMAGE_TYPE_1D;
+ case MTLTextureType3D:
+ return VK_IMAGE_TYPE_3D;
+ default:
+ return VK_IMAGE_TYPE_2D;
+ }
}
MVK_PUBLIC_SYMBOL MTLTextureType mvkMTLTextureTypeFromVkImageViewType(VkImageViewType vkImageViewType,
- bool isMultisample) {
- switch (vkImageViewType) {
- case VK_IMAGE_VIEW_TYPE_1D: return MTLTextureType1D;
- case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return MTLTextureType1DArray;
- case VK_IMAGE_VIEW_TYPE_2D: return (isMultisample ? MTLTextureType2DMultisample : MTLTextureType2D);
- case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+ bool isMultisample) {
+ switch (vkImageViewType) {
+ case VK_IMAGE_VIEW_TYPE_1D: return MTLTextureType1D;
+ case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return MTLTextureType1DArray;
+ case VK_IMAGE_VIEW_TYPE_2D: return (isMultisample ? MTLTextureType2DMultisample : MTLTextureType2D);
+ case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
#if MVK_MACOS
- if (isMultisample) {
- return MTLTextureType2DMultisampleArray;
- }
+ if (isMultisample) { return MTLTextureType2DMultisampleArray; }
#endif
- return MTLTextureType2DArray;
- case VK_IMAGE_VIEW_TYPE_3D: return MTLTextureType3D;
- case VK_IMAGE_VIEW_TYPE_CUBE: return MTLTextureTypeCube;
- case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return MTLTextureTypeCubeArray;
- default: return MTLTextureType2D;
- }
+ return MTLTextureType2DArray;
+ case VK_IMAGE_VIEW_TYPE_3D: return MTLTextureType3D;
+ case VK_IMAGE_VIEW_TYPE_CUBE: return MTLTextureTypeCube;
+ case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return MTLTextureTypeCubeArray;
+ default: return MTLTextureType2D;
+ }
}
MVK_PUBLIC_SYMBOL MTLTextureUsage mvkMTLTextureUsageFromVkImageUsageFlags(VkImageUsageFlags vkImageUsageFlags) {
MTLTextureUsage mtlUsage = MTLTextureUsageUnknown;
- if ( mvkAreFlagsEnabled(vkImageUsageFlags, VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ) {
- mvkEnableFlag(mtlUsage, MTLTextureUsageShaderRead);
- }
- if ( mvkAreFlagsEnabled(vkImageUsageFlags, VK_IMAGE_USAGE_TRANSFER_DST_BIT) ) {
- mvkEnableFlag(mtlUsage, MTLTextureUsageRenderTarget);
- }
- if ( mvkAreFlagsEnabled(vkImageUsageFlags, VK_IMAGE_USAGE_SAMPLED_BIT) ) {
- mvkEnableFlag(mtlUsage, MTLTextureUsageShaderRead);
- mvkEnableFlag(mtlUsage, MTLTextureUsagePixelFormatView);
- }
- if ( mvkAreFlagsEnabled(vkImageUsageFlags, VK_IMAGE_USAGE_STORAGE_BIT) ) {
- mvkEnableFlag(mtlUsage, MTLTextureUsageShaderRead);
- mvkEnableFlag(mtlUsage, MTLTextureUsageShaderWrite);
- mvkEnableFlag(mtlUsage, MTLTextureUsagePixelFormatView);
- }
- if ( mvkAreFlagsEnabled(vkImageUsageFlags, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) ) {
- mvkEnableFlag(mtlUsage, MTLTextureUsageShaderRead);
- mvkEnableFlag(mtlUsage, MTLTextureUsagePixelFormatView);
- }
- if ( mvkAreFlagsEnabled(vkImageUsageFlags, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) ) {
- mvkEnableFlag(mtlUsage, MTLTextureUsageRenderTarget);
- mvkEnableFlag(mtlUsage, MTLTextureUsagePixelFormatView);
- }
- if ( mvkAreFlagsEnabled(vkImageUsageFlags, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ) {
- mvkEnableFlag(mtlUsage, MTLTextureUsageRenderTarget);
- mvkDisableFlag(mtlUsage, MTLTextureUsagePixelFormatView); // Clears bit. Do this last.
- }
+ // Read from...
+ if (mvkIsAnyFlagEnabled(vkImageUsageFlags, (VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_STORAGE_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))) {
+ mvkEnableFlag(mtlUsage, MTLTextureUsageShaderRead);
+ }
+
+ // Write to...
+ if (mvkIsAnyFlagEnabled(vkImageUsageFlags, (VK_IMAGE_USAGE_STORAGE_BIT))) {
+ mvkEnableFlag(mtlUsage, MTLTextureUsageShaderWrite);
+ }
+
+ // Render to...
+ if (mvkIsAnyFlagEnabled(vkImageUsageFlags, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT))) { // Scaling a BLIT may use rendering.
+ mvkEnableFlag(mtlUsage, MTLTextureUsageRenderTarget);
+ }
+
+ // Create view on...
+ if (mvkIsAnyFlagEnabled(vkImageUsageFlags, (VK_IMAGE_USAGE_TRANSFER_SRC_BIT | // May use temp view if transfer involves format change
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_STORAGE_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT))) { // D/S may be filtered out after device check
+ mvkEnableFlag(mtlUsage, MTLTextureUsagePixelFormatView);
+ }
return mtlUsage;
}
@@ -874,12 +877,12 @@
MVK_PUBLIC_SYMBOL VkImageUsageFlags mvkVkImageUsageFlagsFromMTLTextureUsage(MTLTextureUsage mtlUsage, MTLPixelFormat mtlFormat) {
VkImageUsageFlags vkImageUsageFlags = 0;
- if ( mvkAreFlagsEnabled(mtlUsage, MTLTextureUsageShaderRead) ) {
+ if ( mvkAreAllFlagsEnabled(mtlUsage, MTLTextureUsageShaderRead) ) {
mvkEnableFlag(vkImageUsageFlags, VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
mvkEnableFlag(vkImageUsageFlags, VK_IMAGE_USAGE_SAMPLED_BIT);
mvkEnableFlag(vkImageUsageFlags, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
}
- if ( mvkAreFlagsEnabled(mtlUsage, MTLTextureUsageRenderTarget) ) {
+ if ( mvkAreAllFlagsEnabled(mtlUsage, MTLTextureUsageRenderTarget) ) {
mvkEnableFlag(vkImageUsageFlags, VK_IMAGE_USAGE_TRANSFER_DST_BIT);
if (mvkMTLPixelFormatIsDepthFormat(mtlFormat) || mvkMTLPixelFormatIsStencilFormat(mtlFormat)) {
mvkEnableFlag(vkImageUsageFlags, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
@@ -887,7 +890,7 @@
mvkEnableFlag(vkImageUsageFlags, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
}
}
- if ( mvkAreFlagsEnabled(mtlUsage, MTLTextureUsageShaderWrite) ) {
+ if ( mvkAreAllFlagsEnabled(mtlUsage, MTLTextureUsageShaderWrite) ) {
mvkEnableFlag(vkImageUsageFlags, VK_IMAGE_USAGE_STORAGE_BIT);
}
@@ -1003,10 +1006,10 @@
MVK_PUBLIC_SYMBOL MTLColorWriteMask mvkMTLColorWriteMaskFromVkChannelFlags(VkColorComponentFlags vkWriteFlags) {
MTLColorWriteMask mtlWriteMask = MTLColorWriteMaskNone;
- if (mvkAreFlagsEnabled(vkWriteFlags, VK_COLOR_COMPONENT_R_BIT)) { mvkEnableFlag(mtlWriteMask, MTLColorWriteMaskRed); }
- if (mvkAreFlagsEnabled(vkWriteFlags, VK_COLOR_COMPONENT_G_BIT)) { mvkEnableFlag(mtlWriteMask, MTLColorWriteMaskGreen); }
- if (mvkAreFlagsEnabled(vkWriteFlags, VK_COLOR_COMPONENT_B_BIT)) { mvkEnableFlag(mtlWriteMask, MTLColorWriteMaskBlue); }
- if (mvkAreFlagsEnabled(vkWriteFlags, VK_COLOR_COMPONENT_A_BIT)) { mvkEnableFlag(mtlWriteMask, MTLColorWriteMaskAlpha); }
+ if (mvkAreAllFlagsEnabled(vkWriteFlags, VK_COLOR_COMPONENT_R_BIT)) { mvkEnableFlag(mtlWriteMask, MTLColorWriteMaskRed); }
+ if (mvkAreAllFlagsEnabled(vkWriteFlags, VK_COLOR_COMPONENT_G_BIT)) { mvkEnableFlag(mtlWriteMask, MTLColorWriteMaskGreen); }
+ if (mvkAreAllFlagsEnabled(vkWriteFlags, VK_COLOR_COMPONENT_B_BIT)) { mvkEnableFlag(mtlWriteMask, MTLColorWriteMaskBlue); }
+ if (mvkAreAllFlagsEnabled(vkWriteFlags, VK_COLOR_COMPONENT_A_BIT)) { mvkEnableFlag(mtlWriteMask, MTLColorWriteMaskAlpha); }
return mtlWriteMask;
}
@@ -1325,14 +1328,14 @@
// barrier is to be placed before the render stages, it should come before the vertex stage, otherwise
// if the barrier is to be placed after the render stages, it should come after the fragment stage.
if (placeBarrierBefore) {
- bool placeBeforeFragment = mvkAreOnlyAnyFlagsEnabled(vkStages, (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ bool placeBeforeFragment = mvkIsOnlyAnyFlagEnabled(vkStages, (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT));
return placeBeforeFragment ? MTLRenderStageFragment : MTLRenderStageVertex;
} else {
- bool placeAfterVertex = mvkAreOnlyAnyFlagsEnabled(vkStages, (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
+ bool placeAfterVertex = mvkIsOnlyAnyFlagEnabled(vkStages, (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
@@ -1365,10 +1368,10 @@
MVK_PUBLIC_SYMBOL MTLStorageMode mvkMTLStorageModeFromVkMemoryPropertyFlags(VkMemoryPropertyFlags vkFlags) {
// If not visible to the host: Private
- if ( !mvkAreFlagsEnabled(vkFlags, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) ) {
+ if ( !mvkAreAllFlagsEnabled(vkFlags, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) ) {
#if MVK_IOS
// iOS: If lazily allocated, Memoryless
- if (mvkAreFlagsEnabled(vkFlags, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)) {
+ if (mvkAreAllFlagsEnabled(vkFlags, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)) {
return MTLStorageModeMemoryless;
}
#endif
@@ -1376,7 +1379,7 @@
}
// If visible to the host and coherent: Shared
- if (mvkAreFlagsEnabled(vkFlags, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) {
+ if (mvkAreAllFlagsEnabled(vkFlags, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) {
return MTLStorageModeShared;
}