Merge pull request #1384 from billhollings/build-updates

Various build updates
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm b/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm
index 7d1d379..7cf349d 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdTransfer.mm
@@ -88,8 +88,6 @@
     VkBufferImageCopy vkDstCopies[copyCnt];
     size_t tmpBuffSize = 0;
 
-    _srcImage->flushToDevice(0, VK_WHOLE_SIZE);
-
     for (uint32_t copyIdx = 0; copyIdx < copyCnt; copyIdx++) {
         auto& vkIC = _vkImageCopies[copyIdx];
         
@@ -1614,11 +1612,9 @@
     NSUInteger dstMTLBuffOffset = _dstBuffer->getMTLBufferOffset() + _dstOffset;
 
     // Copy data to the source MTLBuffer
-    MVKMTLBufferAllocation* srcMTLBufferAlloc = (MVKMTLBufferAllocation*)cmdEncoder->getCommandEncodingPool()->acquireMTLBufferAllocation(_dataSize);
+    MVKMTLBufferAllocation* srcMTLBufferAlloc = cmdEncoder->getCommandEncodingPool()->acquireMTLBufferAllocation(_dataSize);
     void* pBuffData = srcMTLBufferAlloc->getContents();
-    mlock(pBuffData, _dataSize);
     memcpy(pBuffData, _srcDataCache.data(), _dataSize);
-    munlock(pBuffData, _dataSize);
 
     [mtlBlitEnc copyFromBuffer: srcMTLBufferAlloc->_mtlBuffer
                   sourceOffset: srcMTLBufferAlloc->_offset
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
index 801b3f7..85f312b 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.mm
@@ -695,7 +695,7 @@
 
 // Return the MTLBuffer allocation to the pool once the command buffer is done with it
 const MVKMTLBufferAllocation* MVKCommandEncoder::getTempMTLBuffer(NSUInteger length, bool isPrivate, bool isDedicated) {
-    const MVKMTLBufferAllocation* mtlBuffAlloc = getCommandEncodingPool()->acquireMTLBufferAllocation(length, isPrivate, isDedicated);
+    MVKMTLBufferAllocation* mtlBuffAlloc = getCommandEncodingPool()->acquireMTLBufferAllocation(length, isPrivate, isDedicated);
     [_mtlCmdBuffer addCompletedHandler: ^(id<MTLCommandBuffer> mcb) { mtlBuffAlloc->returnToPool(); }];
     return mtlBuffAlloc;
 }
@@ -708,9 +708,7 @@
 const MVKMTLBufferAllocation* MVKCommandEncoder::copyToTempMTLBufferAllocation(const void* bytes, NSUInteger length, bool isDedicated) {
 	const MVKMTLBufferAllocation* mtlBuffAlloc = getTempMTLBuffer(length, false, isDedicated);
     void* pBuffData = mtlBuffAlloc->getContents();
-    mlock(pBuffData, length);
     memcpy(pBuffData, bytes, length);
-    munlock(pBuffData, length);
 
     return mtlBuffAlloc;
 }
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.h b/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.h
index 2b3e8ae..6318f64 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.h
@@ -66,7 +66,7 @@
      * To return the returned allocation back to the pool to be reused,
      * call the returnToPool() function on the returned allocation.
      */
-    const MVKMTLBufferAllocation* acquireMTLBufferAllocation(NSUInteger length, bool isPrivate = false, bool isDedicated = false);
+    MVKMTLBufferAllocation* acquireMTLBufferAllocation(NSUInteger length, bool isPrivate = false, bool isDedicated = false);
 
 	/**
 	 * Returns a MTLRenderPipelineState dedicated to rendering to several attachments
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.mm
index 16f702a..78f8bbc 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncodingPool.mm
@@ -77,7 +77,7 @@
 	MVK_ENC_REZ_ACCESS(_cmdClearDefaultDepthStencilState, newMTLDepthStencilState(useDepth, useStencil));
 }
 
-const MVKMTLBufferAllocation* MVKCommandEncodingPool::acquireMTLBufferAllocation(NSUInteger length, bool isPrivate, bool isDedicated) {
+MVKMTLBufferAllocation* MVKCommandEncodingPool::acquireMTLBufferAllocation(NSUInteger length, bool isPrivate, bool isDedicated) {
     MVKAssert(isPrivate || !isDedicated, "Dedicated, host-shared temporary buffers are not supported."); 
     if (isDedicated) {
         return _dedicatedMtlBufferAllocator.acquireMTLBufferRegion(length);
diff --git a/MoltenVK/MoltenVK/Commands/MVKMTLBufferAllocation.h b/MoltenVK/MoltenVK/Commands/MVKMTLBufferAllocation.h
index 7488305..9cc7ccd 100644
--- a/MoltenVK/MoltenVK/Commands/MVKMTLBufferAllocation.h
+++ b/MoltenVK/MoltenVK/Commands/MVKMTLBufferAllocation.h
@@ -51,17 +51,20 @@
     MVKMTLBufferAllocationPool* getPool() const { return _pool; }
 
 	/** Returns this object back to the pool that created it. */
-    void returnToPool() const;
+    void returnToPool();
 
 	/** Constructs this instance with the specified pool as its origin. */
     MVKMTLBufferAllocation(MVKMTLBufferAllocationPool* pool,
                            id<MTLBuffer> mtlBuffer,
                            NSUInteger offset,
-                           NSUInteger length) : _pool(pool), _mtlBuffer(mtlBuffer), _offset(offset), _length(length) {}
+                           NSUInteger length,
+                           uint64_t poolIndex) : _pool(pool), _mtlBuffer(mtlBuffer), _offset(offset), _length(length), _poolIndex(poolIndex) {}
 
 protected:
-	MVKMTLBufferAllocationPool* _pool;
+    friend class MVKMTLBufferAllocationPool;
 
+    MVKMTLBufferAllocationPool* _pool;
+    uint64_t _poolIndex;
 };
 
 
@@ -78,6 +81,11 @@
 class MVKMTLBufferAllocationPool : public MVKObjectPool<MVKMTLBufferAllocation>, public MVKDeviceTrackingMixin {
 
 public:
+    /** Returns a new allocation. */
+    MVKMTLBufferAllocation* acquireAllocation();
+
+    /** Returns a new allocation (without mutual exclusion). */
+    MVKMTLBufferAllocation* acquireAllocationUnlocked();
 
 	/** Returns the Vulkan API opaque object controlling this object. */
 	MVKVulkanAPIObject* getVulkanAPIObject() override { return _device->getVulkanAPIObject(); };
@@ -93,7 +101,8 @@
 	
 	MVKBaseObject* getBaseObject() override { return this; };
 	MVKMTLBufferAllocation* newObject() override;
-	void returnAllocation(MVKMTLBufferAllocation* ba) { _isThreadSafe ? returnObjectSafely(ba) : returnObject(ba); }
+    void returnAllocationUnlocked(MVKMTLBufferAllocation* ba);
+    void returnAllocation(MVKMTLBufferAllocation* ba);
 	uint32_t calcMTLBufferAllocationCount();
     void addMTLBuffer();
 
@@ -101,8 +110,9 @@
     NSUInteger _allocationLength;
     NSUInteger _mtlBufferLength;
     MTLStorageMode _mtlStorageMode;
-	MVKSmallVector<id<MTLBuffer>, 64> _mtlBuffers;
-	bool _isThreadSafe;
+    struct MTLBufferTracker { id<MTLBuffer> mtlBuffer; uint64_t allocationCount; };
+    MVKSmallVector<MTLBufferTracker, 64> _mtlBuffers;
+    bool _isThreadSafe;
 };
 
 
@@ -132,7 +142,7 @@
      * To return the MVKMTLBufferAllocation back to the pool, call 
      * the returnToPool() function on the returned instance.
      */
-    const MVKMTLBufferAllocation* acquireMTLBufferRegion(NSUInteger length);
+    MVKMTLBufferAllocation* acquireMTLBufferRegion(NSUInteger length);
 
     /**
      * Configures this instance to dispense MVKMTLBufferAllocation up to the specified
diff --git a/MoltenVK/MoltenVK/Commands/MVKMTLBufferAllocation.mm b/MoltenVK/MoltenVK/Commands/MVKMTLBufferAllocation.mm
index 2aabbf9..ee7b46f 100644
--- a/MoltenVK/MoltenVK/Commands/MVKMTLBufferAllocation.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKMTLBufferAllocation.mm
@@ -24,7 +24,7 @@
 
 MVKVulkanAPIObject* MVKMTLBufferAllocation::getVulkanAPIObject() { return _pool->getVulkanAPIObject(); };
 
-void MVKMTLBufferAllocation::returnToPool() const { _pool->returnAllocation((MVKMTLBufferAllocation*)this); }
+void MVKMTLBufferAllocation::returnToPool() { _pool->returnAllocation(this); }
 
 
 #pragma mark -
@@ -39,16 +39,49 @@
     // of future allocation to beyond this allocation.
     NSUInteger offset = _nextOffset;
     _nextOffset += _allocationLength;
-    return new MVKMTLBufferAllocation(this, _mtlBuffers.back(), offset, _allocationLength);
+    return new MVKMTLBufferAllocation(this, _mtlBuffers.back().mtlBuffer, offset, _allocationLength, _mtlBuffers.size() - 1);
 }
 
 // Adds a new MTLBuffer to the buffer pool and resets the next offset to the start of it
 void MVKMTLBufferAllocationPool::addMTLBuffer() {
     MTLResourceOptions mbOpts = (_mtlStorageMode << MTLResourceStorageModeShift) | MTLResourceCPUCacheModeDefaultCache;
-    _mtlBuffers.push_back([_device->getMTLDevice() newBufferWithLength: _mtlBufferLength options: mbOpts]);
+    _mtlBuffers.push_back({ [_device->getMTLDevice() newBufferWithLength: _mtlBufferLength options: mbOpts], 0 });
     _nextOffset = 0;
 }
 
+MVKMTLBufferAllocation* MVKMTLBufferAllocationPool::acquireAllocationUnlocked() {
+    MVKMTLBufferAllocation* ba = acquireObject();
+    if (!_mtlBuffers[ba->_poolIndex].allocationCount++) {
+        [ba->_mtlBuffer setPurgeableState: MTLPurgeableStateNonVolatile];
+    }
+    return ba;
+}
+
+MVKMTLBufferAllocation* MVKMTLBufferAllocationPool::acquireAllocation() {
+    if (_isThreadSafe) {
+        std::lock_guard<std::mutex> lock(_lock);
+        return acquireAllocationUnlocked();
+    } else {
+        return acquireAllocationUnlocked();
+    }
+}
+
+void MVKMTLBufferAllocationPool::returnAllocationUnlocked(MVKMTLBufferAllocation* ba) {
+    if (!--_mtlBuffers[ba->_poolIndex].allocationCount) {
+        [ba->_mtlBuffer setPurgeableState: MTLPurgeableStateVolatile];
+    }
+    returnObject(ba);
+}
+
+void MVKMTLBufferAllocationPool::returnAllocation(MVKMTLBufferAllocation* ba) {
+    if (_isThreadSafe) {
+        std::lock_guard<std::mutex> lock(_lock);
+        returnAllocationUnlocked(ba);
+    } else {
+        returnAllocationUnlocked(ba);
+    }
+}
+
 
 MVKMTLBufferAllocationPool::MVKMTLBufferAllocationPool(MVKDevice* device, NSUInteger allocationLength, bool makeThreadSafe,
 													   bool isDedicated, MTLStorageMode mtlStorageMode) :
@@ -73,14 +106,17 @@
 }
 
 MVKMTLBufferAllocationPool::~MVKMTLBufferAllocationPool() {
-    mvkReleaseContainerContents(_mtlBuffers);
+    for (uint32_t bufferIndex = 0; bufferIndex < _mtlBuffers.size(); ++bufferIndex) {
+        [_mtlBuffers[bufferIndex].mtlBuffer release];
+    }
+    _mtlBuffers.clear();
 }
 
 
 #pragma mark -
 #pragma mark MVKMTLBufferAllocator
 
-const MVKMTLBufferAllocation* MVKMTLBufferAllocator::acquireMTLBufferRegion(NSUInteger length) {
+MVKMTLBufferAllocation* MVKMTLBufferAllocator::acquireMTLBufferRegion(NSUInteger length) {
 	MVKAssert(length <= _maxAllocationLength, "This MVKMTLBufferAllocator has been configured to dispense MVKMTLBufferRegions no larger than %lu bytes.", (unsigned long)_maxAllocationLength);
 
 	// Can't allocate a segment smaller than the minimum MTLBuffer alignment.
@@ -88,12 +124,7 @@
 
     // Convert max length to the next power-of-two exponent to use as a lookup
     NSUInteger p2Exp = mvkPowerOfTwoExponent(length);
-	MVKMTLBufferAllocationPool* pRP = _regionPools[p2Exp];
-	const MVKMTLBufferAllocation* region = _isThreadSafe ? pRP->acquireObjectSafely() : pRP->acquireObject();
-	if (region) {
-		[region->_mtlBuffer setPurgeableState: MTLPurgeableStateVolatile];
-	}
-	return region;
+    return _regionPools[p2Exp]->acquireAllocation();
 }
 
 MVKMTLBufferAllocator::MVKMTLBufferAllocator(MVKDevice* device, NSUInteger maxRegionLength, bool makeThreadSafe, bool isDedicated, MTLStorageMode mtlStorageMode) : MVKBaseDeviceObject(device) {
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
index 64128ea..c1ca6ba 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
@@ -397,7 +397,7 @@
 protected:
 	inline uint8_t* getData() { return _mvkMTLBufferAllocation ? (uint8_t*)_mvkMTLBufferAllocation->getContents() : nullptr; }
 
-	const MVKMTLBufferAllocation* _mvkMTLBufferAllocation = nullptr;
+	MVKMTLBufferAllocation* _mvkMTLBufferAllocation = nullptr;
 };
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
index 1e9dddc..289bc44 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
@@ -180,7 +180,7 @@
 			  VkWriteDescriptorSetInlineUniformBlockEXT* pInlineUniformBlock);
 
 	/** Returns an MTLBuffer region allocation. */
-	const MVKMTLBufferAllocation* acquireMTLBufferRegion(NSUInteger length);
+    MVKMTLBufferAllocation* acquireMTLBufferRegion(NSUInteger length);
 	/**
 	 * Returns the Metal argument buffer to which resources are written,
 	 * or return nil if Metal argument buffers are not being used.
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
index ff9fcd5..72a95cc 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
@@ -339,7 +339,7 @@
     }
 }
 
-const MVKMTLBufferAllocation* MVKDescriptorSet::acquireMTLBufferRegion(NSUInteger length) {
+MVKMTLBufferAllocation* MVKDescriptorSet::acquireMTLBufferRegion(NSUInteger length) {
 	return _pool->_inlineBlockMTLBufferAllocator.acquireMTLBufferRegion(length);
 }
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
index f08893b..ac1dbfd 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
@@ -3621,7 +3621,7 @@
 		for (uint32_t i = 0; i < memRangeCount; i++) {
 			const VkMappedMemoryRange* pMem = &pMemRanges[i];
 			MVKDeviceMemory* mvkMem = (MVKDeviceMemory*)pMem->memory;
-			VkResult r = mvkMem->pullFromDevice(pMem->offset, pMem->size, false, &mvkBlitEnc);
+			VkResult r = mvkMem->pullFromDevice(pMem->offset, pMem->size, &mvkBlitEnc);
 			if (rslt == VK_SUCCESS) { rslt = r; }
 		}
 		if (mvkBlitEnc.mtlBlitEncoder) { [mvkBlitEnc.mtlBlitEncoder endEncoding]; }
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
index b08aa46..953e3b8 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
@@ -93,17 +93,11 @@
 	/** Returns whether this device memory is currently mapped to host memory. */
 	bool isMapped() { return _mappedRange.size > 0; }
 
-	/**
-	 * If this memory is host-visible, the specified memory range is flushed to the device.
-	 * Normally, flushing will only occur if the device memory is non-coherent, but flushing
-	 * to coherent memory can be forced by setting evenIfCoherent to true.
-	 */
-	VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size, bool evenIfCoherent = false);
+	/** If this memory is host-visible, the specified memory range is flushed to the device. */
+	VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size);
 
 	/**
 	 * If this memory is host-visible, pulls the specified memory range from the device.
-	 * Normally, pulling will only occur if the device memory is non-coherent, but pulling
-	 * to coherent memory can be forced by setting evenIfCoherent to true.
 	 *
 	 * If pBlitEnc is not null, it points to a holder for a MTLBlitCommandEncoder and its
 	 * associated MTLCommandBuffer. If this instance has a MTLBuffer using managed memory,
@@ -114,7 +108,6 @@
 	 */
 	VkResult pullFromDevice(VkDeviceSize offset,
 							VkDeviceSize size,
-							bool evenIfCoherent = false,
 							MVKMTLBlitEncoder* pBlitEnc = nullptr);
 
 
@@ -172,8 +165,10 @@
 	id<MTLHeap> _mtlHeap = nil;
 	void* _pMemory = nullptr;
 	void* _pHostMemory = nullptr;
-	bool _isDedicated = false;
+	VkMemoryPropertyFlags _vkMemProps;
 	MTLStorageMode _mtlStorageMode;
 	MTLCPUCacheMode _mtlCPUCacheMode;
+	bool _isDedicated = false;
+
 };
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
index eac900f..25686df 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
@@ -55,8 +55,11 @@
 
 	*ppData = (void*)((uintptr_t)_pMemory + offset);
 
-	// Coherent memory does not require flushing by app, so we must flush now, to handle any texture updates.
-	pullFromDevice(offset, size, isMemoryHostCoherent());
+	// Coherent memory does not require flushing by app, so we must flush now
+	// to support Metal textures that actually reside in non-coherent memory.
+	if (mvkIsAnyFlagEnabled(_vkMemProps, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) {
+		pullFromDevice(offset, size);
+	}
 
 	return VK_SUCCESS;
 }
@@ -68,54 +71,57 @@
 		return;
 	}
 
-	// Coherent memory does not require flushing by app, so we must flush now.
-	flushToDevice(_mappedRange.offset, _mappedRange.size, isMemoryHostCoherent());
+	// Coherent memory does not require flushing by app, so we must flush now
+	// to support Metal textures that actually reside in non-coherent memory.
+	if (mvkIsAnyFlagEnabled(_vkMemProps, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) {
+		flushToDevice(_mappedRange.offset, _mappedRange.size);
+	}
 
 	_mappedRange.offset = 0;
 	_mappedRange.size = 0;
 }
 
-VkResult MVKDeviceMemory::flushToDevice(VkDeviceSize offset, VkDeviceSize size, bool evenIfCoherent) {
-	// Coherent memory is flushed on unmap(), so it is only flushed if forced
+VkResult MVKDeviceMemory::flushToDevice(VkDeviceSize offset, VkDeviceSize size) {
 	VkDeviceSize memSize = adjustMemorySize(size, offset);
-	if (memSize > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) ) {
+	if (memSize == 0 || !isMemoryHostAccessible()) { return VK_SUCCESS; }
 
 #if MVK_MACOS
-		if (_mtlBuffer && _mtlStorageMode == MTLStorageModeManaged) {
-			[_mtlBuffer didModifyRange: NSMakeRange(offset, memSize)];
-		}
+	if (_mtlBuffer && _mtlStorageMode == MTLStorageModeManaged) {
+		[_mtlBuffer didModifyRange: NSMakeRange(offset, memSize)];
+	}
 #endif
 
-		// If we have an MTLHeap object, there's no need to sync memory manually between images and the buffer.
-		if (!_mtlHeap) {
-			lock_guard<mutex> lock(_rezLock);
-			for (auto& img : _imageMemoryBindings) { img->flushToDevice(offset, memSize); }
-			for (auto& buf : _buffers) { buf->flushToDevice(offset, memSize); }
-		}
+	// If we have an MTLHeap object, there's no need to sync memory manually between resources and the buffer.
+	if ( !_mtlHeap ) {
+		lock_guard<mutex> lock(_rezLock);
+		for (auto& img : _imageMemoryBindings) { img->flushToDevice(offset, memSize); }
+		for (auto& buf : _buffers) { buf->flushToDevice(offset, memSize); }
 	}
+
 	return VK_SUCCESS;
 }
 
 VkResult MVKDeviceMemory::pullFromDevice(VkDeviceSize offset,
 										 VkDeviceSize size,
-										 bool evenIfCoherent,
 										 MVKMTLBlitEncoder* pBlitEnc) {
-	// Coherent memory is flushed on unmap(), so it is only flushed if forced
     VkDeviceSize memSize = adjustMemorySize(size, offset);
-	if (memSize > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) && !_mtlHeap) {
+	if (memSize == 0 || !isMemoryHostAccessible()) { return VK_SUCCESS; }
+
+#if MVK_MACOS
+	if (pBlitEnc && _mtlBuffer && _mtlStorageMode == MTLStorageModeManaged) {
+		if ( !pBlitEnc->mtlCmdBuffer) { pBlitEnc->mtlCmdBuffer = _device->getAnyQueue()->getMTLCommandBuffer(kMVKCommandUseInvalidateMappedMemoryRanges); }
+		if ( !pBlitEnc->mtlBlitEncoder) { pBlitEnc->mtlBlitEncoder = [pBlitEnc->mtlCmdBuffer blitCommandEncoder]; }
+		[pBlitEnc->mtlBlitEncoder synchronizeResource: _mtlBuffer];
+	}
+#endif
+
+	// If we have an MTLHeap object, there's no need to sync memory manually between resources and the buffer.
+	if ( !_mtlHeap ) {
 		lock_guard<mutex> lock(_rezLock);
         for (auto& img : _imageMemoryBindings) { img->pullFromDevice(offset, memSize); }
         for (auto& buf : _buffers) { buf->pullFromDevice(offset, memSize); }
-
-#if MVK_MACOS
-		if (pBlitEnc && _mtlBuffer && _mtlStorageMode == MTLStorageModeManaged) {
-			if ( !pBlitEnc->mtlCmdBuffer) { pBlitEnc->mtlCmdBuffer = _device->getAnyQueue()->getMTLCommandBuffer(kMVKCommandUseInvalidateMappedMemoryRanges); }
-			if ( !pBlitEnc->mtlBlitEncoder) { pBlitEnc->mtlBlitEncoder = [pBlitEnc->mtlCmdBuffer blitCommandEncoder]; }
-			[pBlitEnc->mtlBlitEncoder synchronizeResource: _mtlBuffer];
-		}
-#endif
-
 	}
+
 	return VK_SUCCESS;
 }
 
@@ -271,9 +277,9 @@
 								 const VkMemoryAllocateInfo* pAllocateInfo,
 								 const VkAllocationCallbacks* pAllocator) : MVKVulkanAPIDeviceObject(device) {
 	// Set Metal memory parameters
-	VkMemoryPropertyFlags vkMemProps = _device->_pMemoryProperties->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags;
-	_mtlStorageMode = mvkMTLStorageModeFromVkMemoryPropertyFlags(vkMemProps);
-	_mtlCPUCacheMode = mvkMTLCPUCacheModeFromVkMemoryPropertyFlags(vkMemProps);
+	_vkMemProps = _device->_pMemoryProperties->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags;
+	_mtlStorageMode = mvkMTLStorageModeFromVkMemoryPropertyFlags(_vkMemProps);
+	_mtlCPUCacheMode = mvkMTLCPUCacheModeFromVkMemoryPropertyFlags(_vkMemProps);
 
 	_allocationSize = pAllocateInfo->allocationSize;
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm b/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
index f3006f8..cf3853f 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
@@ -223,9 +223,7 @@
 	const MVKMTLBufferAllocation* tempBuff = cmdEncoder->getTempMTLBuffer(queryCount * _queryElementCount * sizeof(uint64_t));
 	void* pBuffData = tempBuff->getContents();
 	size_t size = queryCount * _queryElementCount * sizeof(uint64_t);
-	mlock(pBuffData, size);
 	memcpy(pBuffData, &_timestamps[firstQuery], size);
-	munlock(pBuffData, size);
 	offset = tempBuff->_offset;
 	return tempBuff->_mtlBuffer;
 }