Support using Metal texel buffer for linear images to increase host coherency.

MVKDeviceMemory track MVKImages and MVKBuffers separately.
Per Vulkan spec, restrict linear images to 2D, non-array, single mipmap.
Use texel buffer if possible for texture on coherent device memory.
Only flush MVKImages (not MVKBuffers) when device memory mapped.
Do not flush texel buffer images.
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.h b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.h
index 1be5ae2..369ed4a 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.h
@@ -36,6 +36,9 @@
 	/** Returns the memory requirements of this resource by populating the specified structure. */
 	VkResult getMemoryRequirements(VkMemoryRequirements* pMemoryRequirements) override;
 
+	/** Binds this resource to the specified offset within the specified memory allocation. */
+	VkResult bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) override;
+
 	/** Applies the specified global memory barrier. */
     void applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
                             VkPipelineStageFlags dstStageMask,
@@ -64,6 +67,8 @@
 	
 	MVKBuffer(MVKDevice* device, const VkBufferCreateInfo* pCreateInfo);
 
+	~MVKBuffer() override;
+
 protected:
 	using MVKResource::needsHostReadSync;
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm
index db14ae7..ed71b3f 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm
@@ -36,6 +36,14 @@
 	return VK_SUCCESS;
 }
 
+VkResult MVKBuffer::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
+	if (_deviceMemory) { _deviceMemory->removeBuffer(this); }
+
+	MVKResource::bindDeviceMemory(mvkMem, memOffset);
+
+	return _deviceMemory ? _deviceMemory->addBuffer(this) : VK_SUCCESS;
+}
+
 void MVKBuffer::applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
 								   VkPipelineStageFlags dstStageMask,
 								   VkMemoryBarrier* pMemoryBarrier,
@@ -83,7 +91,10 @@
 MVKBuffer::MVKBuffer(MVKDevice* device, const VkBufferCreateInfo* pCreateInfo) : MVKResource(device) {
     _byteAlignment = _device->_pMetalFeatures->mtlBufferAlignment;
     _byteCount = pCreateInfo->size;
-	_isBuffer  = true;
+}
+
+MVKBuffer::~MVKBuffer() {
+	if (_deviceMemory) { _deviceMemory->removeBuffer(this); }
 }
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
index 9ee8ad5..433032b 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
@@ -23,7 +23,8 @@
 
 #import <Metal/Metal.h>
 
-class MVKResource;
+class MVKBuffer;
+class MVKImage;
 
 
 #pragma mark MVKDeviceMemory
@@ -97,25 +98,28 @@
     ~MVKDeviceMemory() override;
 
 protected:
-	friend MVKResource;
+	friend MVKBuffer;
+	friend MVKImage;
 
 	VkDeviceSize adjustMemorySize(VkDeviceSize size, VkDeviceSize offset);
-	VkResult addResource(MVKResource* rez);
-	void removeResource(MVKResource* rez);
+	VkResult addBuffer(MVKBuffer* mvkBuff);
+	void removeBuffer(MVKBuffer* mvkBuff);
+	VkResult addImage(MVKImage* mvkImg);
+	void removeImage(MVKImage* mvkImg);
 	bool ensureMTLBuffer();
 	bool ensureHostMemory();
 	void freeHostMemory();
 
-	std::vector<MVKResource*> _resources;
+	std::vector<MVKBuffer*> _buffers;
+	std::vector<MVKImage*> _images;
 	std::mutex _rezLock;
-    VkDeviceSize _allocationSize;
-	VkDeviceSize _mapOffset;
-	VkDeviceSize _mapSize;
-	id<MTLBuffer> _mtlBuffer;
-	void* _pMemory;
-	void* _pHostMemory;
-	bool _isMapped;
-	std::mutex _lock;
+    VkDeviceSize _allocationSize = 0;
+	VkDeviceSize _mapOffset = 0;
+	VkDeviceSize _mapSize = 0;
+	id<MTLBuffer> _mtlBuffer = nil;
+	void* _pMemory = nullptr;
+	void* _pHostMemory = nullptr;
+	bool _isMapped = false;
 	MTLResourceOptions _mtlResourceOptions;
 	MTLStorageMode _mtlStorageMode;
 	MTLCPUCacheMode _mtlCPUCacheMode;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
index 91f0f46..0e3c194 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
@@ -17,6 +17,7 @@
  */
 
 #include "MVKDeviceMemory.h"
+#include "MVKBuffer.h"
 #include "MVKImage.h"
 #include "mvk_datatypes.h"
 #include "MVKFoundation.h"
@@ -48,7 +49,7 @@
 	*ppData = (void*)((uintptr_t)_pMemory + offset);
 
 	// Coherent memory does not require flushing by app, so we must flush now, to handle any texture updates.
-	if (isMemoryHostCoherent()) { pullFromDevice(offset, size, true); }
+	pullFromDevice(offset, size, isMemoryHostCoherent());
 
 	return VK_SUCCESS;
 }
@@ -61,7 +62,7 @@
 	}
 
 	// Coherent memory does not require flushing by app, so we must flush now.
-	if (isMemoryHostCoherent()) { flushToDevice(_mapOffset, _mapSize, true); }
+	flushToDevice(_mapOffset, _mapSize, isMemoryHostCoherent());
 
 	_mapOffset = 0;
 	_mapSize = 0;
@@ -72,14 +73,15 @@
 	// Coherent memory is flushed on unmap(), so it is only flushed if forced
 	VkDeviceSize memSize = adjustMemorySize(size, offset);
 	if (memSize > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) ) {
-		lock_guard<mutex> lock(_rezLock);
-        for (auto& rez : _resources) { rez->flushToDevice(offset, memSize); }
 
 #if MVK_MACOS
 		if (_mtlBuffer && _mtlStorageMode == MTLStorageModeManaged) {
 			[_mtlBuffer didModifyRange: NSMakeRange(offset, memSize)];
 		}
 #endif
+
+		lock_guard<mutex> lock(_rezLock);
+        for (auto& img : _images) { img->flushToDevice(offset, memSize); }
 	}
 	return VK_SUCCESS;
 }
@@ -89,7 +91,7 @@
     VkDeviceSize memSize = adjustMemorySize(size, offset);
 	if (memSize > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) ) {
 		lock_guard<mutex> lock(_rezLock);
-        for (auto& rez : _resources) { rez->pullFromDevice(offset, memSize); }
+        for (auto& img : _images) { img->pullFromDevice(offset, memSize); }
 	}
 	return VK_SUCCESS;
 }
@@ -100,21 +102,34 @@
 	return (size == VK_WHOLE_SIZE) ? (_allocationSize - offset) : size;
 }
 
-VkResult MVKDeviceMemory::addResource(MVKResource* rez) {
+VkResult MVKDeviceMemory::addBuffer(MVKBuffer* mvkBuff) {
 	lock_guard<mutex> lock(_rezLock);
 
-	if (rez->_isBuffer && !ensureMTLBuffer() ) {
+	if (!ensureMTLBuffer() ) {
 		return mvkNotifyErrorWithText(VK_ERROR_OUT_OF_DEVICE_MEMORY, "Could not bind a VkBuffer to a VkDeviceMemory of size %llu bytes. The maximum memory-aligned size of a VkDeviceMemory that supports a VkBuffer is %llu bytes.", _allocationSize, _device->_pMetalFeatures->maxMTLBufferSize);
 	}
 
-	_resources.push_back(rez);
+	_buffers.push_back(mvkBuff);
 
 	return VK_SUCCESS;
 }
 
-void MVKDeviceMemory::removeResource(MVKResource* rez) {
+void MVKDeviceMemory::removeBuffer(MVKBuffer* mvkBuff) {
 	lock_guard<mutex> lock(_rezLock);
-	mvkRemoveAllOccurances(_resources, rez);
+	mvkRemoveAllOccurances(_buffers, mvkBuff);
+}
+
+VkResult MVKDeviceMemory::addImage(MVKImage* mvkImg) {
+	lock_guard<mutex> lock(_rezLock);
+
+	_images.push_back(mvkImg);
+
+	return VK_SUCCESS;
+}
+
+void MVKDeviceMemory::removeImage(MVKImage* mvkImg) {
+	lock_guard<mutex> lock(_rezLock);
+	mvkRemoveAllOccurances(_images, mvkImg);
 }
 
 // Ensures that this instance is backed by a MTLBuffer object,
@@ -171,12 +186,6 @@
 	_mtlCPUCacheMode = mvkMTLCPUCacheModeFromVkMemoryPropertyFlags(vkMemProps);
 
 	_allocationSize = pAllocateInfo->allocationSize;
-	_mtlBuffer = nil;
-	_pMemory = nullptr;
-	_pHostMemory = nullptr;
-	_isMapped = false;
-	_mapOffset = 0;
-	_mapSize = 0;
 
 	// If memory needs to be coherent it must reside in an MTLBuffer, since an open-ended map() must work.
 	if (isMemoryHostCoherent() && !ensureMTLBuffer() ) {
@@ -187,8 +196,10 @@
 MVKDeviceMemory::~MVKDeviceMemory() {
     // Unbind any resources that are using me. Iterate a copy of the collection,
     // to allow the resource to callback to remove itself from the collection.
-    auto rezCopies = _resources;
-    for (auto& rez : rezCopies) { rez->bindDeviceMemory(nullptr, 0); }
+    auto buffCopies = _buffers;
+    for (auto& buf : buffCopies) { buf->bindDeviceMemory(nullptr, 0); }
+	auto imgCopies = _images;
+	for (auto& img : imgCopies) { img->bindDeviceMemory(nullptr, 0); }
 
 	[_mtlBuffer release];
 	_mtlBuffer = nil;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
index e9e185a..087b5f2 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
@@ -194,11 +194,13 @@
 	~MVKImage() override;
 
 protected:
+	friend class MVKDeviceMemory;
 	friend class MVKImageView;
 	using MVKResource::needsHostReadSync;
 
 	MVKImageSubresource* getSubresource(uint32_t mipLevel, uint32_t arrayLayer);
-	void initMTLTextureViewSupport();
+	bool validateLinear(const VkImageCreateInfo* pCreateInfo);
+	bool validateUseTexelBuffer();
 	void initSubresources(const VkImageCreateInfo* pCreateInfo);
 	void initSubresourceLayout(MVKImageSubresource& imgSubRez);
 	virtual id<MTLTexture> newMTLTexture();
@@ -207,8 +209,9 @@
 	MTLTextureDescriptor* getMTLTextureDescriptor();
     void updateMTLTextureContent(MVKImageSubresource& subresource, VkDeviceSize offset, VkDeviceSize size);
     void getMTLTextureContent(MVKImageSubresource& subresource, VkDeviceSize offset, VkDeviceSize size);
-	VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size) override;
-	VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size) override;
+	bool shouldFlushHostMemory();
+	VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size);
+	VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size);
 	bool needsHostReadSync(VkPipelineStageFlags srcStageMask,
 						   VkPipelineStageFlags dstStageMask,
 						   VkImageMemoryBarrier* pImageMemoryBarrier);
@@ -227,6 +230,8 @@
     bool _isDepthStencilAttachment;
 	bool _canSupportMTLTextureView;
     bool _hasExpectedTexelSize;
+	bool _usesTexelBuffer;
+	bool _isLinear;
 };
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
index 59f8260..552eb4e 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
@@ -165,24 +165,49 @@
 
 // Memory may have been mapped before image was bound, and needs to be loaded into the MTLTexture.
 VkResult MVKImage::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
-	VkResult rslt = MVKResource::bindDeviceMemory(mvkMem, memOffset);
-	if (isMemoryHostAccessible()) { flushToDevice(getDeviceMemoryOffset(), getByteCount()); }
-	return rslt;
+	if (_deviceMemory) { _deviceMemory->removeImage(this); }
+
+	MVKResource::bindDeviceMemory(mvkMem, memOffset);
+
+	_usesTexelBuffer = validateUseTexelBuffer();
+
+	flushToDevice(getDeviceMemoryOffset(), getByteCount());
+
+	return _deviceMemory ? _deviceMemory->addImage(this) : VK_SUCCESS;
 }
 
+bool MVKImage::validateUseTexelBuffer() {
+	VkExtent2D blockExt = mvkMTLPixelFormatBlockTexelSize(_mtlPixelFormat);
+	bool isUncompressed = blockExt.width == 1 && blockExt.height == 1;
+
+	bool useTexelBuffer = _device->_pMetalFeatures->texelBuffers;								// Texel buffers available
+	useTexelBuffer = useTexelBuffer && isMemoryHostAccessible() && _isLinear && isUncompressed;	// Applicable memory layout
+	useTexelBuffer = useTexelBuffer && _deviceMemory && _deviceMemory->_mtlBuffer;				// Buffer is available to overlay
+
+#if MVK_MACOS
+	useTexelBuffer = useTexelBuffer && !isMemoryHostCoherent();	// macOS cannot use shared memory for texel buffers
+#endif
+
+	return useTexelBuffer;
+}
+
+bool MVKImage::shouldFlushHostMemory() { return isMemoryHostAccessible() && !_usesTexelBuffer; }
+
 // Flushes the device memory at the specified memory range into the MTLTexture. Updates
 // all subresources that overlap the specified range and are in an updatable layout state.
 VkResult MVKImage::flushToDevice(VkDeviceSize offset, VkDeviceSize size) {
-	for (auto& subRez : _subresources) {
-		switch (subRez.layoutState) {
-			case VK_IMAGE_LAYOUT_UNDEFINED:			// TODO: VK_IMAGE_LAYOUT_UNDEFINED should be illegal
-			case VK_IMAGE_LAYOUT_PREINITIALIZED:
-			case VK_IMAGE_LAYOUT_GENERAL: {
-				updateMTLTextureContent(subRez, offset, size);
-				break;
+	if (shouldFlushHostMemory()) {
+		for (auto& subRez : _subresources) {
+			switch (subRez.layoutState) {
+				case VK_IMAGE_LAYOUT_UNDEFINED:
+				case VK_IMAGE_LAYOUT_PREINITIALIZED:
+				case VK_IMAGE_LAYOUT_GENERAL: {
+					updateMTLTextureContent(subRez, offset, size);
+					break;
+				}
+				default:
+					break;
 			}
-			default:
-				break;
 		}
 	}
 	return VK_SUCCESS;
@@ -191,14 +216,16 @@
 // Pulls content from the MTLTexture into the device memory at the specified memory range.
 // Pulls from all subresources that overlap the specified range and are in an updatable layout state.
 VkResult MVKImage::pullFromDevice(VkDeviceSize offset, VkDeviceSize size) {
-	for (auto& subRez : _subresources) {
-		switch (subRez.layoutState) {
-			case VK_IMAGE_LAYOUT_GENERAL: {
-                getMTLTextureContent(subRez, offset, size);
-				break;
+	if (shouldFlushHostMemory()) {
+		for (auto& subRez : _subresources) {
+			switch (subRez.layoutState) {
+				case VK_IMAGE_LAYOUT_GENERAL: {
+					getMTLTextureContent(subRez, offset, size);
+					break;
+				}
+				default:
+					break;
 			}
-			default:
-				break;
 		}
 	}
 	return VK_SUCCESS;
@@ -249,6 +276,10 @@
 id<MTLTexture> MVKImage::newMTLTexture() {
     if (_ioSurface) {
         return [getMTLDevice() newTextureWithDescriptor: getMTLTextureDescriptor() iosurface: _ioSurface plane: 0];
+	} else if (_usesTexelBuffer) {
+        return [_deviceMemory->_mtlBuffer newTextureWithDescriptor: getMTLTextureDescriptor()
+															offset: getDeviceMemoryOffset()
+													   bytesPerRow: _subresources[0].layout.rowPitch];
     } else {
         return [getMTLDevice() newTextureWithDescriptor: getMTLTextureDescriptor()];
     }
@@ -337,24 +368,22 @@
 //specified subresource definition, from the underlying memory buffer.
 void MVKImage::updateMTLTextureContent(MVKImageSubresource& subresource,
                                        VkDeviceSize offset, VkDeviceSize size) {
-    // Check if subresource overlaps the memory range.
+
+	VkImageSubresource& imgSubRez = subresource.subresource;
+	VkSubresourceLayout& imgLayout = subresource.layout;
+
+	// Check if subresource overlaps the memory range.
     VkDeviceSize memStart = offset;
     VkDeviceSize memEnd = offset + size;
-    VkDeviceSize imgStart = subresource.layout.offset;
-    VkDeviceSize imgEnd = subresource.layout.offset + subresource.layout.size;
+    VkDeviceSize imgStart = imgLayout.offset;
+    VkDeviceSize imgEnd = imgLayout.offset + imgLayout.size;
     if (imgStart >= memEnd || imgEnd <= memStart) { return; }
 
 	// Don't update if host memory has not been mapped yet.
 	void* pHostMem = getHostMemoryAddress();
 	if ( !pHostMem ) { return; }
 
-    VkImageSubresource& imgSubRez = subresource.subresource;
-    VkSubresourceLayout& imgLayout = subresource.layout;
-
-    uint32_t mipLvl = imgSubRez.mipLevel;
-    uint32_t layer = imgSubRez.arrayLayer;
-
-    VkExtent3D mipExtent = getExtent3D(mipLvl);
+    VkExtent3D mipExtent = getExtent3D(imgSubRez.mipLevel);
     VkImageType imgType = getImageType();
     void* pImgBytes = (void*)((uintptr_t)pHostMem + imgLayout.offset);
 
@@ -363,8 +392,8 @@
     mtlRegion.size = mvkMTLSizeFromVkExtent3D(mipExtent);
 
     [getMTLTexture() replaceRegion: mtlRegion
-                       mipmapLevel: mipLvl
-                             slice: layer
+                       mipmapLevel: imgSubRez.mipLevel
+                             slice: imgSubRez.arrayLayer
                          withBytes: pImgBytes
                        bytesPerRow: (imgType != VK_IMAGE_TYPE_1D ? imgLayout.rowPitch : 0)
                      bytesPerImage: (imgType == VK_IMAGE_TYPE_3D ? imgLayout.depthPitch : 0)];
@@ -374,24 +403,22 @@
 // the underlying MTLTexture, corresponding to the specified subresource definition.
 void MVKImage::getMTLTextureContent(MVKImageSubresource& subresource,
                                     VkDeviceSize offset, VkDeviceSize size) {
-    // Check if subresource overlaps the memory range.
+
+	VkImageSubresource& imgSubRez = subresource.subresource;
+	VkSubresourceLayout& imgLayout = subresource.layout;
+
+	// Check if subresource overlaps the memory range.
     VkDeviceSize memStart = offset;
     VkDeviceSize memEnd = offset + size;
-    VkDeviceSize imgStart = subresource.layout.offset;
-    VkDeviceSize imgEnd = subresource.layout.offset + subresource.layout.size;
+    VkDeviceSize imgStart = imgLayout.offset;
+    VkDeviceSize imgEnd = imgLayout.offset + imgLayout.size;
     if (imgStart >= memEnd || imgEnd <= memStart) { return; }
 
 	// Don't update if host memory has not been mapped yet.
 	void* pHostMem = getHostMemoryAddress();
 	if ( !pHostMem ) { return; }
 
-    VkImageSubresource& imgSubRez = subresource.subresource;
-    VkSubresourceLayout& imgLayout = subresource.layout;
-
-    uint32_t mipLvl = imgSubRez.mipLevel;
-    uint32_t layer = imgSubRez.arrayLayer;
-
-    VkExtent3D mipExtent = getExtent3D(mipLvl);
+    VkExtent3D mipExtent = getExtent3D(imgSubRez.mipLevel);
     VkImageType imgType = getImageType();
     void* pImgBytes = (void*)((uintptr_t)pHostMem + imgLayout.offset);
 
@@ -403,8 +430,8 @@
                   bytesPerRow: (imgType != VK_IMAGE_TYPE_1D ? imgLayout.rowPitch : 0)
                 bytesPerImage: (imgType == VK_IMAGE_TYPE_3D ? imgLayout.depthPitch : 0)
                    fromRegion: mtlRegion
-                  mipmapLevel: mipLvl
-                        slice: layer];
+                  mipmapLevel: imgSubRez.mipLevel
+                        slice: imgSubRez.arrayLayer];
 }
 
 
@@ -444,8 +471,10 @@
 
     _isDepthStencilAttachment = (mvkAreFlagsEnabled(pCreateInfo->usage, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ||
                                  mvkAreFlagsEnabled(mvkVkFormatProperties(pCreateInfo->format).optimalTilingFeatures, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT));
-
+	_canSupportMTLTextureView = !_isDepthStencilAttachment;
     _hasExpectedTexelSize = (mvkMTLPixelFormatBytesPerBlock(_mtlPixelFormat) == mvkVkFormatBytesPerBlock(pCreateInfo->format));
+	_isLinear = validateLinear(pCreateInfo);
+	_usesTexelBuffer = false;
 
    // Calc _byteCount after _mtlTexture & _byteAlignment
     for (uint32_t mipLvl = 0; mipLvl < _mipLevels; mipLvl++) {
@@ -453,9 +482,45 @@
     }
 
     initSubresources(pCreateInfo);
-	initMTLTextureViewSupport();
 }
 
+bool MVKImage::validateLinear(const VkImageCreateInfo* pCreateInfo) {
+	if (pCreateInfo->tiling != VK_IMAGE_TILING_LINEAR) { return false; }
+
+	if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, imageType must be VK_IMAGE_TYPE_2D."));
+		return false;
+	}
+
+	if (_isDepthStencilAttachment) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, format must not be a depth/stencil format."));
+		return false;
+	}
+
+	if (_mipLevels > 1) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, mipLevels must be 1."));
+		return false;
+	}
+
+	if (_arrayLayers > 1) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, arrayLayers must be 1."));
+		return false;
+	}
+
+	if (_samples > 1) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, samples must be VK_SAMPLE_COUNT_1_BIT."));
+		return false;
+	}
+
+	if (mvkAreOnlyAnyFlagsEnabled(_usage, (VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT))) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, usage must only include VK_IMAGE_USAGE_TRANSFER_SRC_BIT and/or VK_IMAGE_USAGE_TRANSFER_DST_BIT."));
+		return false;
+	}
+
+	return true;
+}
+
+
 // Initializes the subresource definitions.
 void MVKImage::initSubresources(const VkImageCreateInfo* pCreateInfo) {
 	_subresources.reserve(_mipLevels * _arrayLayers);
@@ -498,13 +563,8 @@
 	layout.depthPitch = bytesPerLayerCurrLevel;
 }
 
-// Determines whether this image can support Metal texture views,
-// and sets the _canSupportMTLTextureView variable appropriately.
-void MVKImage::initMTLTextureViewSupport() {
-	_canSupportMTLTextureView = !_isDepthStencilAttachment;
-}
-
 MVKImage::~MVKImage() {
+	if (_deviceMemory) { _deviceMemory->removeImage(this); }
 	resetMTLTexture();
     resetIOSurface();
 }
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKResource.h b/MoltenVK/MoltenVK/GPUObjects/MVKResource.h
index 3680ee8..900f3a4 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKResource.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKResource.h
@@ -71,17 +71,9 @@
 	
 #pragma mark Construction
 
-	/** Constructs an instance for the specified device. */
     MVKResource(MVKDevice* device) : MVKBaseDeviceObject(device) {}
 
-	/** Destructor. */
-	~MVKResource() override;
-
 protected:
-	friend MVKDeviceMemory;
-	
-	virtual VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size) { return VK_SUCCESS; };
-	virtual VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size) { return VK_SUCCESS; };
 	virtual bool needsHostReadSync(VkPipelineStageFlags srcStageMask,
 								   VkPipelineStageFlags dstStageMask,
 								   VkMemoryBarrier* pMemoryBarrier);
@@ -90,5 +82,4 @@
 	VkDeviceSize _deviceMemoryOffset = 0;
     VkDeviceSize _byteCount = 0;
     VkDeviceSize _byteAlignment = 0;
-	bool _isBuffer  = false;
 };
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKResource.mm b/MoltenVK/MoltenVK/GPUObjects/MVKResource.mm
index fc8aaef..1060985 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKResource.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKResource.mm
@@ -23,18 +23,13 @@
 #pragma mark MVKResource
 
 VkResult MVKResource::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
-	if (_deviceMemory) { _deviceMemory->removeResource(this); }
-
 	_deviceMemory = mvkMem;
 	_deviceMemoryOffset = memOffset;
-
-	return _deviceMemory ? _deviceMemory->addResource(this) : VK_SUCCESS;
+	return VK_SUCCESS;
 }
 
-/**
- * Returns whether the specified global memory barrier requires a sync between this
- * texture and host memory for the purpose of the host reading texture memory.
- */
+// Returns whether the specified global memory barrier requires a sync between this
+// texture and host memory for the purpose of the host reading texture memory.
 bool MVKResource::needsHostReadSync(VkPipelineStageFlags srcStageMask,
 									VkPipelineStageFlags dstStageMask,
 									VkMemoryBarrier* pMemoryBarrier) {
@@ -48,10 +43,3 @@
 #endif
 }
 
-
-#pragma mark Construction
-
-MVKResource::~MVKResource() {
-    if (_deviceMemory) { _deviceMemory->removeResource(this); }
-};
-
diff --git a/MoltenVK/MoltenVK/Utility/MVKFoundation.h b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
index a08dfe9..32b4043 100644
--- a/MoltenVK/MoltenVK/Utility/MVKFoundation.h
+++ b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
@@ -363,7 +363,11 @@
 template<typename T1, typename T2>
 bool mvkIsAnyFlagEnabled(T1 value, const T2 bitMask) { return !!(value & bitMask); }
 
-/** Returns whether the specified value has ONLY of the flags specified in bitMask enabled (set to 1). */
+/** Returns whether the specified value has ONLY ALL of the flags specified in bitMask enabled (set to 1), and none others. */
 template<typename T1, typename T2>
-bool mvkAreOnlyFlagsEnabled(T1 value, const T2 bitMask) { return (value == bitMask); }
+bool mvkAreOnlyAllFlagsEnabled(T1 value, const T2 bitMask) { return (value == bitMask); }
+
+/** Returns whether the specified value has ONLY one or more of the flags specified in bitMask enabled (set to 1), and none others. */
+template<typename T1, typename T2>
+bool mvkAreOnlyAnyFlagsEnabled(T1 value, const T2 bitMask) { return (mvkIsAnyFlagEnabled(value, bitMask) && ((value | bitMask) == bitMask)); }