Merge pull request #173 from billhollings/master

Device memory management changes.
diff --git a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
index 44609f9..71d5457 100644
--- a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
+++ b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
@@ -48,13 +48,13 @@
  */
 #define MVK_VERSION_MAJOR   1
 #define MVK_VERSION_MINOR   0
-#define MVK_VERSION_PATCH   9
+#define MVK_VERSION_PATCH   10
 
 #define MVK_MAKE_VERSION(major, minor, patch)    (((major) * 10000) + ((minor) * 100) + (patch))
 #define MVK_VERSION     MVK_MAKE_VERSION(MVK_VERSION_MAJOR, MVK_VERSION_MINOR, MVK_VERSION_PATCH)
 
 
-#define VK_MVK_MOLTENVK_SPEC_VERSION            4
+#define VK_MVK_MOLTENVK_SPEC_VERSION            5
 #define VK_MVK_MOLTENVK_EXTENSION_NAME			"VK_MVK_moltenvk"
 
 /**
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.h b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.h
index a9d2e56..369ed4a 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.h
@@ -36,6 +36,9 @@
 	/** Returns the memory requirements of this resource by populating the specified structure. */
 	VkResult getMemoryRequirements(VkMemoryRequirements* pMemoryRequirements) override;
 
+	/** Binds this resource to the specified offset within the specified memory allocation. */
+	VkResult bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) override;
+
 	/** Applies the specified global memory barrier. */
     void applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
                             VkPipelineStageFlags dstStageMask,
@@ -54,10 +57,10 @@
 #pragma mark Metal
 
 	/** Returns the Metal buffer underlying this memory allocation. */
-	id<MTLBuffer> getMTLBuffer();
+	inline id<MTLBuffer> getMTLBuffer() { return _deviceMemory ? _deviceMemory->getMTLBuffer() : nullptr; }
 
 	/** Returns the offset at which the contents of this instance starts within the underlying Metal buffer. */
-	NSUInteger getMTLBufferOffset();
+	inline NSUInteger getMTLBufferOffset() { return _deviceMemoryOffset; }
 
 
 #pragma mark Construction
@@ -69,17 +72,9 @@
 protected:
 	using MVKResource::needsHostReadSync;
 
-    void* map(VkDeviceSize offset, VkDeviceSize size) override;
-	VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size) override;
-	VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size) override;
-    VkResult copyMTLBufferContent(VkDeviceSize offset, VkDeviceSize size, bool intoMTLBuffer);
-    NSRange mtlBufferRange(VkDeviceSize offset, VkDeviceSize size);
 	bool needsHostReadSync(VkPipelineStageFlags srcStageMask,
 						   VkPipelineStageFlags dstStageMask,
 						   VkBufferMemoryBarrier* pBufferMemoryBarrier);
-
-    id<MTLBuffer> _mtlBuffer;
-    std::mutex _lock;
 };
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm
index 8357c97..ed71b3f 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKBuffer.mm
@@ -31,11 +31,19 @@
 
 VkResult MVKBuffer::getMemoryRequirements(VkMemoryRequirements* pMemoryRequirements) {
 	pMemoryRequirements->size = getByteCount();
-	pMemoryRequirements->alignment = getByteAlignment();
+	pMemoryRequirements->alignment = _byteAlignment;
 	pMemoryRequirements->memoryTypeBits = _device->getPhysicalDevice()->getAllMemoryTypes();
 	return VK_SUCCESS;
 }
 
+VkResult MVKBuffer::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
+	if (_deviceMemory) { _deviceMemory->removeBuffer(this); }
+
+	MVKResource::bindDeviceMemory(mvkMem, memOffset);
+
+	return _deviceMemory ? _deviceMemory->addBuffer(this) : VK_SUCCESS;
+}
+
 void MVKBuffer::applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
 								   VkPipelineStageFlags dstStageMask,
 								   VkMemoryBarrier* pMemoryBarrier,
@@ -73,118 +81,20 @@
 #if MVK_MACOS
 	return (mvkIsAnyFlagEnabled(dstStageMask, (VK_PIPELINE_STAGE_HOST_BIT)) &&
 			mvkIsAnyFlagEnabled(pBufferMemoryBarrier->dstAccessMask, (VK_ACCESS_HOST_READ_BIT)) &&
-			_deviceMemory->isMemoryHostAccessible() && !_deviceMemory->isMemoryHostCoherent());
+			isMemoryHostAccessible() && !isMemoryHostCoherent());
 #endif
 }
 
-/** Called when the bound device memory is updated. Flushes any associated resource memory. */
-VkResult MVKBuffer::flushToDevice(VkDeviceSize offset, VkDeviceSize size) {
-    VkResult rslt = copyMTLBufferContent(offset, size, true);
-
-#if MVK_MACOS
-    if (_deviceMemory->getMTLStorageMode() == MTLStorageModeManaged) {
-        [getMTLBuffer() didModifyRange: mtlBufferRange(offset, size)];
-    }
-#endif
-
-    return rslt;
-}
-
-// Called when the bound device memory is invalidated. Pulls any associated resource memory from the device.
-VkResult MVKBuffer::pullFromDevice(VkDeviceSize offset, VkDeviceSize size) {
-    VkResult rslt = copyMTLBufferContent(offset, size, false);
-
-    // If we are pulling to populate a newly created device memory MTLBuffer,
-    // from a previously created local MTLBuffer, remove the local MTLBuffer.
-	// Use autorelease in case the earlier MTLBuffer was encoded.
-    if (_mtlBuffer && _deviceMemory->getMTLBuffer()) {
-        [_mtlBuffer autorelease];
-        _mtlBuffer = nil;
-    }
-
-    return rslt;
-}
-
-void* MVKBuffer::map(VkDeviceSize offset, VkDeviceSize size) {
-    return (void*)((uintptr_t)getMTLBuffer().contents + mtlBufferRange(offset, size).location);
-}
-
-// Copies host content into or out of the MTLBuffer.
-VkResult MVKBuffer::copyMTLBufferContent(VkDeviceSize offset, VkDeviceSize size, bool intoMTLBuffer) {
-
-    // Only copy if there is separate host memory and this buffer overlaps the host memory range
-    void* pMemBase = _deviceMemory->getLogicalMappedMemory();
-    if (pMemBase && doesOverlap(offset, size)) {
-
-        NSRange copyRange = mtlBufferRange(offset, size);
-        VkDeviceSize memOffset = max(offset, _deviceMemoryOffset);
-
-//        MVKLogDebug("Copying contents %s buffer %p at buffer offset %d memory offset %d and length %d.", (intoMTLBuffer ? "to" : "from"), this, copyRange.location, memOffset, copyRange.length);
-
-        void* pMemBytes = (void*)((uintptr_t)pMemBase + memOffset);
-        void* pMTLBuffBytes = (void*)((uintptr_t)getMTLBuffer().contents + copyRange.location);
-
-        // Copy in the direction indicated.
-        // Don't copy if the source and destination are the same address, which will
-        // occur if the underlying MTLBuffer comes from the device memory object.
-        if (pMemBytes != pMTLBuffBytes) {
-//            MVKLogDebug("Copying buffer contents.");
-            if (intoMTLBuffer) {
-                memcpy(pMTLBuffBytes, pMemBytes, copyRange.length);
-            } else {
-                memcpy(pMemBytes, pMTLBuffBytes, copyRange.length);
-            }
-        }
-    }
-
-    return VK_SUCCESS;
-}
-
-
-#pragma mark Metal
-
-// If a local MTLBuffer already exists, use it.
-// If the device memory has a MTLBuffer, use it.
-// Otherwise, create a new MTLBuffer and use it from now on.
-id<MTLBuffer> MVKBuffer::getMTLBuffer() {
-
-    if (_mtlBuffer) { return _mtlBuffer; }
-
-    id<MTLBuffer> devMemMTLBuff = _deviceMemory->getMTLBuffer();
-    if (devMemMTLBuff) { return devMemMTLBuff; }
-
-	// Lock and check again in case another thread has created the buffer.
-    lock_guard<mutex> lock(_lock);
-    if (_mtlBuffer) { return _mtlBuffer; }
-    
-    NSUInteger mtlBuffLen = mvkAlignByteOffset(_byteCount, _byteAlignment);
-    _mtlBuffer = [getMTLDevice() newBufferWithLength: mtlBuffLen
-                                             options: _deviceMemory->getMTLResourceOptions()];     // retained
-//    MVKLogDebug("MVKBuffer %p creating local MTLBuffer of size %d.", this, _mtlBuffer.length);
-    return _mtlBuffer;
-}
-
-NSUInteger MVKBuffer::getMTLBufferOffset() { return _mtlBuffer ? 0 : _deviceMemoryOffset; }
-
-// Returns an NSRange that maps the specified host memory range to the MTLBuffer.
-NSRange MVKBuffer::mtlBufferRange(VkDeviceSize offset, VkDeviceSize size) {
-    NSUInteger localRangeLoc = min((offset > _deviceMemoryOffset) ? (offset - _deviceMemoryOffset) : 0, _byteCount);
-    NSUInteger localRangeLen = min(size, _byteCount - localRangeLoc);
-    return NSMakeRange(getMTLBufferOffset() + localRangeLoc, localRangeLen);
-}
-
 
 #pragma mark Construction
 
 MVKBuffer::MVKBuffer(MVKDevice* device, const VkBufferCreateInfo* pCreateInfo) : MVKResource(device) {
     _byteAlignment = _device->_pMetalFeatures->mtlBufferAlignment;
     _byteCount = pCreateInfo->size;
-    _mtlBuffer = nil;
 }
 
 MVKBuffer::~MVKBuffer() {
-    [_mtlBuffer release];
-    _mtlBuffer = nil;
+	if (_deviceMemory) { _deviceMemory->removeBuffer(this); }
 }
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
index c09dacc..433032b 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.h
@@ -23,7 +23,8 @@
 
 #import <Metal/Metal.h>
 
-class MVKResource;
+class MVKBuffer;
+class MVKImage;
 
 
 #pragma mark MVKDeviceMemory
@@ -42,18 +43,11 @@
     /** Returns the memory already committed by this instance. */
     inline VkDeviceSize getDeviceMemoryCommitment() { return _allocationSize; }
 
-    /**
-     * Returns the host memory address that represents what would be the beginning of the 
-     * mapped address space if the entire device memory represented by this object were to
-     * be mapped to host memory.
-     *
-     * This is the address to which the offset value in the vMapMemory() call references.
-     * It only has physical meaning if offset is zero, otherwise it is a logical address
-     * used to calculate resource offsets.
-     *
-     * This function must only be called between vkMapMemory() and vkUnmapMemory() calls.
-     */
-    inline void* getLogicalMappedMemory() { return _pLogicalMappedMemory; }
+	/**
+	 * Returns the host memory address of this memory, or NULL if the memory
+	 * is marked as device-only and cannot be mapped to a host address.
+	 */
+	inline void* getHostMemoryAddress() { return _pMemory; }
 
 	/**
 	 * Maps the memory address at the specified offset from the start of this memory allocation,
@@ -64,9 +58,6 @@
 	/** Unmaps a previously mapped memory range. */
 	void unmap();
 
-    /** Allocates mapped host memory, and returns a pointer to it. */
-    void* allocateMappedMemory(VkDeviceSize offset, VkDeviceSize size);
-
 	/** 
 	 * If this memory is host-visible, the specified memory range is flushed to the device.
 	 * Normally, flushing will only occur if the device memory is non-coherent, but flushing
@@ -107,25 +98,30 @@
     ~MVKDeviceMemory() override;
 
 protected:
-	friend MVKResource;
+	friend MVKBuffer;
+	friend MVKImage;
 
 	VkDeviceSize adjustMemorySize(VkDeviceSize size, VkDeviceSize offset);
-    bool mapToUniqueResource(VkDeviceSize offset, VkDeviceSize size);
-	void addResource(MVKResource* rez);
-	void removeResource(MVKResource* rez);
+	VkResult addBuffer(MVKBuffer* mvkBuff);
+	void removeBuffer(MVKBuffer* mvkBuff);
+	VkResult addImage(MVKImage* mvkImg);
+	void removeImage(MVKImage* mvkImg);
+	bool ensureMTLBuffer();
+	bool ensureHostMemory();
+	void freeHostMemory();
 
-	std::vector<MVKResource*> _resources;
+	std::vector<MVKBuffer*> _buffers;
+	std::vector<MVKImage*> _images;
 	std::mutex _rezLock;
-    VkDeviceSize _allocationSize;
-	VkDeviceSize _mapOffset;
-	VkDeviceSize _mapSize;
-	id<MTLBuffer> _mtlBuffer;
-	std::mutex _lock;
+    VkDeviceSize _allocationSize = 0;
+	VkDeviceSize _mapOffset = 0;
+	VkDeviceSize _mapSize = 0;
+	id<MTLBuffer> _mtlBuffer = nil;
+	void* _pMemory = nullptr;
+	void* _pHostMemory = nullptr;
+	bool _isMapped = false;
 	MTLResourceOptions _mtlResourceOptions;
 	MTLStorageMode _mtlStorageMode;
 	MTLCPUCacheMode _mtlCPUCacheMode;
-    void* _pMappedHostAllocation;
-    void* _pMappedMemory;
-    void* _pLogicalMappedMemory;
 };
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
index ee90132..f1cbbeb 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDeviceMemory.mm
@@ -17,6 +17,7 @@
  */
 
 #include "MVKDeviceMemory.h"
+#include "MVKBuffer.h"
 #include "MVKImage.h"
 #include "mvk_datatypes.h"
 #include "MVKFoundation.h"
@@ -27,134 +28,60 @@
 
 #pragma mark MVKDeviceMemory
 
-// Metal does not support the concept of mappable device memory separate from individual
-// resources. There are a number of potentially conflicting requirements defined by Vulkan
-// that make it a challenge to map device memory to Metal resources.
-// 1) Memory can be mapped and populated prior to any resources being bound.
-// 2) Coherent memory can be mapped forever and simply overwritten without regard for
-//    requiring host generated update indications.
-// 3) MTLTextures are never natively coherent.
-// 4) MTLBuffers are restricted to smaller sizes (eg. 256MB - 1GB) than MTLTextures.
-//
-// To try to deal with all of this...
-// 1) If the mapped range falls within a single resource, we map it directly. This allows
-//    us to maximize the size of the resources (images and buffers can be larger)...and
-//    coherent buffers can be mapped directly.
-// 2) If we can't map to a single resource, and memory must be coherent, allocate a single
-//    coherent MTLBuffer for the entire memory range. If any attached resources already have
-//    content, the subsequent coherent pullFromDevice() will populate the larger MTLBuffer.
-// 3) If we can't map to a single resource, and memory is not coherent, we can allocate the
-//    host portion as an aligned malloc, and the individual resources will copy to and from it.
-// 4) There is no way around requiring coherent memory that is used for image to be updated
-//    by the host, or at least unmapped, so that we have a signal to update MTLTexture content.
 VkResult MVKDeviceMemory::map(VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData) {
 
-    if ( !isMemoryHostAccessible() ) {
-        return mvkNotifyErrorWithText(VK_ERROR_MEMORY_MAP_FAILED, "Private GPU-only memory cannot be mapped to host memory.");
-    }
+	if ( !isMemoryHostAccessible() ) {
+		return mvkNotifyErrorWithText(VK_ERROR_MEMORY_MAP_FAILED, "Private GPU-only memory cannot be mapped to host memory.");
+	}
 
-    if (_pMappedMemory) {
-        return mvkNotifyErrorWithText(VK_ERROR_MEMORY_MAP_FAILED, "Memory is already mapped. Call vkUnmapMemory() first.");
-    }
+	if (_isMapped) {
+		return mvkNotifyErrorWithText(VK_ERROR_MEMORY_MAP_FAILED, "Memory is already mapped. Call vkUnmapMemory() first.");
+	}
 
-    VkDeviceSize mapSize = adjustMemorySize(size, offset);
-//    MVKLogDebug("Mapping device memory %p with offset %d and size %d.", this, offset, mapSize);
-    if ( !mapToUniqueResource(offset, mapSize) ) {
-        if (isMemoryHostCoherent()) {
-            if ( !_mtlBuffer ) {
+	if ( !ensureHostMemory() ) {
+		return mvkNotifyErrorWithText(VK_ERROR_OUT_OF_HOST_MEMORY, "Could not allocate %llu bytes of host-accessible device memory.", _allocationSize);
+	}
 
-				// Lock and check again in case another thread has created the buffer.
-				lock_guard<mutex> lock(_lock);
-				if ( !_mtlBuffer ) {
-					NSUInteger mtlBuffLen = mvkAlignByteOffset(_allocationSize, _device->_pMetalFeatures->mtlBufferAlignment);
-					_mtlBuffer = [getMTLDevice() newBufferWithLength: mtlBuffLen options: _mtlResourceOptions];     // retained
-//                	MVKLogDebug("Allocating host mapped memory %p with offset %d and size %d via underlying coherent MTLBuffer %p of size %d.", this, offset, mapSize, _mtlBuffer , _mtlBuffer.length);
-				}
-			}
-            _pLogicalMappedMemory = _mtlBuffer.contents;
-            _pMappedMemory = (void*)((uintptr_t)_pLogicalMappedMemory + offset);
-        } else {
-//            MVKLogDebug("Allocating host mapped memory %p with offset %d and size %d via host allocation.", this, offset, mapSize);
-            _pMappedMemory = allocateMappedMemory(offset, mapSize);
-        }
-    }
+	_mapOffset = offset;
+	_mapSize = adjustMemorySize(size, offset);
+	_isMapped = true;
 
-    *ppData = _pMappedMemory;
-    _mapOffset = offset;
-    _mapSize = mapSize;
+	*ppData = (void*)((uintptr_t)_pMemory + offset);
 
 	// Coherent memory does not require flushing by app, so we must flush now, to handle any texture updates.
-	if (isMemoryHostCoherent()) { pullFromDevice(offset, size, true); }
+	pullFromDevice(offset, size, isMemoryHostCoherent());
 
 	return VK_SUCCESS;
 }
 
 void MVKDeviceMemory::unmap() {
-//    MVKLogDebug("Unapping device memory %p.", this);
 
-    if (!_pMappedMemory) {
-        mvkNotifyErrorWithText(VK_ERROR_MEMORY_MAP_FAILED, "Memory is not mapped. Call vkMapMemory() first.");
-        return;
-    }
+	if ( !_isMapped ) {
+		mvkNotifyErrorWithText(VK_ERROR_MEMORY_MAP_FAILED, "Memory is not mapped. Call vkMapMemory() first.");
+		return;
+	}
 
 	// Coherent memory does not require flushing by app, so we must flush now.
-	if (isMemoryHostCoherent()) { flushToDevice(_mapOffset, _mapSize, true); }
-
-    free(_pMappedHostAllocation);
-    _pMappedHostAllocation = VK_NULL_HANDLE;
-    _pMappedMemory = VK_NULL_HANDLE;
-    _pLogicalMappedMemory = VK_NULL_HANDLE;
+	flushToDevice(_mapOffset, _mapSize, isMemoryHostCoherent());
 
 	_mapOffset = 0;
 	_mapSize = 0;
-}
-
-// Attempts to map the memory defined by the offset and size to a unique resource, and returns
-// whether such a mapping was possible. If it was, the mapped region is stored in _pMappedMemory.
-bool MVKDeviceMemory::mapToUniqueResource(VkDeviceSize offset, VkDeviceSize size) {
-	lock_guard<mutex> lock(_rezLock);
-	MVKResource* uniqueRez = nullptr;
-	for (auto& rez : _resources) {
-		if (rez->doesContain(offset, size)) {
-			if (uniqueRez) { return false; }	// More than one resource mapped to the region
-			uniqueRez = rez;
-		}
-    }
-
-	if (uniqueRez) {
-		_pMappedMemory = uniqueRez->map(offset, size);
-		return true;
-	}
-
-	return false;
-}
-
-void* MVKDeviceMemory::allocateMappedMemory(VkDeviceSize offset, VkDeviceSize size) {
-
-    void* pMapAlloc = VK_NULL_HANDLE;
-
-//    MVKLogDebug("Allocating %d bytes of device memory %p.", size, this);
-
-    size_t mmAlign = _device->_pProperties->limits.minMemoryMapAlignment;
-    VkDeviceSize deltaOffset = offset % mmAlign;
-    int err = posix_memalign(&pMapAlloc, mmAlign, mvkAlignByteOffset(size + deltaOffset, mmAlign));
-    if (err) {
-        mvkNotifyErrorWithText(VK_ERROR_MEMORY_MAP_FAILED, "Could not allocate host memory to map to GPU memory.");
-        return nullptr;
-    }
-
-    _pMappedHostAllocation = pMapAlloc;
-    _pLogicalMappedMemory = (void*)((uintptr_t)pMapAlloc - offset);
-
-    return (void*)((uintptr_t)pMapAlloc + deltaOffset);
+	_isMapped = false;
 }
 
 VkResult MVKDeviceMemory::flushToDevice(VkDeviceSize offset, VkDeviceSize size, bool evenIfCoherent) {
 	// Coherent memory is flushed on unmap(), so it is only flushed if forced
-	if (size > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) ) {
+	VkDeviceSize memSize = adjustMemorySize(size, offset);
+	if (memSize > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) ) {
+
+#if MVK_MACOS
+		if (_mtlBuffer && _mtlStorageMode == MTLStorageModeManaged) {
+			[_mtlBuffer didModifyRange: NSMakeRange(offset, memSize)];
+		}
+#endif
+
 		lock_guard<mutex> lock(_rezLock);
-		VkDeviceSize memSize = adjustMemorySize(size, offset);
-        for (auto& rez : _resources) { rez->flushToDevice(offset, memSize); }
+        for (auto& img : _images) { img->flushToDevice(offset, memSize); }
 	}
 	return VK_SUCCESS;
 }
@@ -164,54 +91,118 @@
     VkDeviceSize memSize = adjustMemorySize(size, offset);
 	if (memSize > 0 && isMemoryHostAccessible() && (evenIfCoherent || !isMemoryHostCoherent()) ) {
 		lock_guard<mutex> lock(_rezLock);
-        for (auto& rez : _resources) { rez->pullFromDevice(offset, memSize); }
+        for (auto& img : _images) { img->pullFromDevice(offset, memSize); }
 	}
 	return VK_SUCCESS;
 }
 
-/** 
- * If the size parameter is the special constant VK_WHOLE_SIZE, returns the size of memory 
- * between offset and the end of the buffer, otherwise simply returns size.
- */
+// If the size parameter is the special constant VK_WHOLE_SIZE, returns the size of memory
+// between offset and the end of the buffer, otherwise simply returns size.
 VkDeviceSize MVKDeviceMemory::adjustMemorySize(VkDeviceSize size, VkDeviceSize offset) {
 	return (size == VK_WHOLE_SIZE) ? (_allocationSize - offset) : size;
 }
 
-void MVKDeviceMemory::addResource(MVKResource* rez) {
+VkResult MVKDeviceMemory::addBuffer(MVKBuffer* mvkBuff) {
 	lock_guard<mutex> lock(_rezLock);
-	_resources.push_back(rez);
+
+	if (!ensureMTLBuffer() ) {
+		return mvkNotifyErrorWithText(VK_ERROR_OUT_OF_DEVICE_MEMORY, "Could not bind a VkBuffer to a VkDeviceMemory of size %llu bytes. The maximum memory-aligned size of a VkDeviceMemory that supports a VkBuffer is %llu bytes.", _allocationSize, _device->_pMetalFeatures->maxMTLBufferSize);
+	}
+
+	_buffers.push_back(mvkBuff);
+
+	return VK_SUCCESS;
 }
 
-void MVKDeviceMemory::removeResource(MVKResource* rez) {
+void MVKDeviceMemory::removeBuffer(MVKBuffer* mvkBuff) {
 	lock_guard<mutex> lock(_rezLock);
-	mvkRemoveAllOccurances(_resources, rez);
+	mvkRemoveAllOccurances(_buffers, mvkBuff);
+}
+
+VkResult MVKDeviceMemory::addImage(MVKImage* mvkImg) {
+	lock_guard<mutex> lock(_rezLock);
+
+	_images.push_back(mvkImg);
+
+	return VK_SUCCESS;
+}
+
+void MVKDeviceMemory::removeImage(MVKImage* mvkImg) {
+	lock_guard<mutex> lock(_rezLock);
+	mvkRemoveAllOccurances(_images, mvkImg);
+}
+
+// Ensures that this instance is backed by a MTLBuffer object,
+// creating the MTLBuffer if needed, and returns whether it was successful.
+bool MVKDeviceMemory::ensureMTLBuffer() {
+
+	if (_mtlBuffer) { return true; }
+
+	NSUInteger memLen = mvkAlignByteOffset(_allocationSize, _device->_pMetalFeatures->mtlBufferAlignment);
+
+	if (memLen > _device->_pMetalFeatures->maxMTLBufferSize) { return false; }
+
+	// If host memory was already allocated, it is copied into the new MTLBuffer, and then released.
+	if (_pHostMemory) {
+		_mtlBuffer = [getMTLDevice() newBufferWithBytes: _pHostMemory length: memLen options: _mtlResourceOptions];     // retained
+		freeHostMemory();
+	} else {
+		_mtlBuffer = [getMTLDevice() newBufferWithLength: memLen options: _mtlResourceOptions];     // retained
+	}
+	_pMemory = isMemoryHostAccessible() ? _mtlBuffer.contents : nullptr;
+
+	return true;
+}
+
+// Ensures that host-accessible memory is available, allocating it if necessary.
+bool MVKDeviceMemory::ensureHostMemory() {
+
+	if (_pMemory) { return true; }
+
+	if ( !_pHostMemory) {
+		size_t memAlign = _device->_pMetalFeatures->mtlBufferAlignment;
+		NSUInteger memLen = mvkAlignByteOffset(_allocationSize, memAlign);
+		int err = posix_memalign(&_pHostMemory, memAlign, memLen);
+		if (err) { return false; }
+	}
+
+	_pMemory = _pHostMemory;
+
+	return true;
+}
+
+void MVKDeviceMemory::freeHostMemory() {
+	free(_pHostMemory);
+	_pHostMemory = nullptr;
 }
 
 MVKDeviceMemory::MVKDeviceMemory(MVKDevice* device,
 								 const VkMemoryAllocateInfo* pAllocateInfo,
 								 const VkAllocationCallbacks* pAllocator) : MVKBaseDeviceObject(device) {
-	_allocationSize = pAllocateInfo->allocationSize;
-	_mtlBuffer = nil;
-	_mapOffset = 0;
-	_mapSize = 0;
-
-    _pMappedHostAllocation = VK_NULL_HANDLE;
-    _pMappedMemory = VK_NULL_HANDLE;
-    _pLogicalMappedMemory = VK_NULL_HANDLE;
-
 	// Set Metal memory parameters
 	VkMemoryPropertyFlags vkMemProps = _device->_pMemoryProperties->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags;
 	_mtlResourceOptions = mvkMTLResourceOptionsFromVkMemoryPropertyFlags(vkMemProps);
 	_mtlStorageMode = mvkMTLStorageModeFromVkMemoryPropertyFlags(vkMemProps);
 	_mtlCPUCacheMode = mvkMTLCPUCacheModeFromVkMemoryPropertyFlags(vkMemProps);
+
+	_allocationSize = pAllocateInfo->allocationSize;
+
+	// If memory needs to be coherent it must reside in an MTLBuffer, since an open-ended map() must work.
+	if (isMemoryHostCoherent() && !ensureMTLBuffer() ) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_OUT_OF_DEVICE_MEMORY, "Could not allocate a host-coherent VkDeviceMemory of size %llu bytes. The maximum memory-aligned size of a host-coherent VkDeviceMemory is %llu bytes.", _allocationSize, _device->_pMetalFeatures->maxMTLBufferSize));
+	}
 }
 
 MVKDeviceMemory::~MVKDeviceMemory() {
     // Unbind any resources that are using me. Iterate a copy of the collection,
     // to allow the resource to callback to remove itself from the collection.
-    auto rezCopies = _resources;
-    for (auto& rez : rezCopies) { rez->bindDeviceMemory(nullptr, 0); }
+    auto buffCopies = _buffers;
+    for (auto& buf : buffCopies) { buf->bindDeviceMemory(nullptr, 0); }
+	auto imgCopies = _images;
+	for (auto& img : imgCopies) { img->bindDeviceMemory(nullptr, 0); }
 
 	[_mtlBuffer release];
 	_mtlBuffer = nil;
+
+	freeHostMemory();
 }
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
index 3e83c41..087b5f2 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
@@ -107,6 +107,9 @@
 	/** Returns the memory requirements of this resource by populating the specified structure. */
 	VkResult getMemoryRequirements(VkMemoryRequirements* pMemoryRequirements) override;
 
+	/** Binds this resource to the specified offset within the specified memory allocation. */
+	VkResult bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) override;
+
 	/** Applies the specified global memory barrier. */
     void applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
                             VkPipelineStageFlags dstStageMask,
@@ -191,11 +194,13 @@
 	~MVKImage() override;
 
 protected:
+	friend class MVKDeviceMemory;
 	friend class MVKImageView;
 	using MVKResource::needsHostReadSync;
 
 	MVKImageSubresource* getSubresource(uint32_t mipLevel, uint32_t arrayLayer);
-	void initMTLTextureViewSupport();
+	bool validateLinear(const VkImageCreateInfo* pCreateInfo);
+	bool validateUseTexelBuffer();
 	void initSubresources(const VkImageCreateInfo* pCreateInfo);
 	void initSubresourceLayout(MVKImageSubresource& imgSubRez);
 	virtual id<MTLTexture> newMTLTexture();
@@ -204,9 +209,9 @@
 	MTLTextureDescriptor* getMTLTextureDescriptor();
     void updateMTLTextureContent(MVKImageSubresource& subresource, VkDeviceSize offset, VkDeviceSize size);
     void getMTLTextureContent(MVKImageSubresource& subresource, VkDeviceSize offset, VkDeviceSize size);
-    void* map(VkDeviceSize offset, VkDeviceSize size) override;
-	VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size) override;
-	VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size) override;
+	bool shouldFlushHostMemory();
+	VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size);
+	VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size);
 	bool needsHostReadSync(VkPipelineStageFlags srcStageMask,
 						   VkPipelineStageFlags dstStageMask,
 						   VkImageMemoryBarrier* pImageMemoryBarrier);
@@ -225,6 +230,8 @@
     bool _isDepthStencilAttachment;
 	bool _canSupportMTLTextureView;
     bool _hasExpectedTexelSize;
+	bool _usesTexelBuffer;
+	bool _isLinear;
 };
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
index 53a47dc..552eb4e 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
@@ -132,10 +132,8 @@
 	}
 }
 
-/**
- * Returns whether the specified image memory barrier requires a sync between this
- * texture and host memory for the purpose of the host reading texture memory.
- */
+// Returns whether the specified image memory barrier requires a sync between this
+// texture and host memory for the purpose of the host reading texture memory.
 bool MVKImage::needsHostReadSync(VkPipelineStageFlags srcStageMask,
 								 VkPipelineStageFlags dstStageMask,
 								 VkImageMemoryBarrier* pImageMemoryBarrier) {
@@ -146,11 +144,11 @@
 	return ((pImageMemoryBarrier->newLayout == VK_IMAGE_LAYOUT_GENERAL) &&
 			mvkIsAnyFlagEnabled(dstStageMask, (VK_PIPELINE_STAGE_HOST_BIT)) &&
 			mvkIsAnyFlagEnabled(pImageMemoryBarrier->dstAccessMask, (VK_ACCESS_HOST_READ_BIT)) &&
-			_deviceMemory->isMemoryHostAccessible() && getMTLStorageMode() != MTLStorageModeShared);
+			isMemoryHostAccessible() && !isMemoryHostCoherent());
 #endif
 }
 
-/** Returns a pointer to the internal subresource for the specified MIP level layer. */
+// Returns a pointer to the internal subresource for the specified MIP level layer.
 MVKImageSubresource* MVKImage::getSubresource(uint32_t mipLevel, uint32_t arrayLayer) {
 	uint32_t srIdx = (mipLevel * _arrayLayers) + arrayLayer;
 	return (srIdx < _subresources.size()) ? &_subresources[srIdx] : NULL;
@@ -165,48 +163,74 @@
 	return VK_SUCCESS;
 }
 
-/** 
- * Flushes the device memory at the specified memory range into the MTLTexture. Updates
- * all subresources that overlap the specified range and are in an updatable layout state.
- */
+// Memory may have been mapped before image was bound, and needs to be loaded into the MTLTexture.
+VkResult MVKImage::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
+	if (_deviceMemory) { _deviceMemory->removeImage(this); }
+
+	MVKResource::bindDeviceMemory(mvkMem, memOffset);
+
+	_usesTexelBuffer = validateUseTexelBuffer();
+
+	flushToDevice(getDeviceMemoryOffset(), getByteCount());
+
+	return _deviceMemory ? _deviceMemory->addImage(this) : VK_SUCCESS;
+}
+
+bool MVKImage::validateUseTexelBuffer() {
+	VkExtent2D blockExt = mvkMTLPixelFormatBlockTexelSize(_mtlPixelFormat);
+	bool isUncompressed = blockExt.width == 1 && blockExt.height == 1;
+
+	bool useTexelBuffer = _device->_pMetalFeatures->texelBuffers;								// Texel buffers available
+	useTexelBuffer = useTexelBuffer && isMemoryHostAccessible() && _isLinear && isUncompressed;	// Applicable memory layout
+	useTexelBuffer = useTexelBuffer && _deviceMemory && _deviceMemory->_mtlBuffer;				// Buffer is available to overlay
+
+#if MVK_MACOS
+	useTexelBuffer = useTexelBuffer && !isMemoryHostCoherent();	// macOS cannot use shared memory for texel buffers
+#endif
+
+	return useTexelBuffer;
+}
+
+bool MVKImage::shouldFlushHostMemory() { return isMemoryHostAccessible() && !_usesTexelBuffer; }
+
+// Flushes the device memory at the specified memory range into the MTLTexture. Updates
+// all subresources that overlap the specified range and are in an updatable layout state.
 VkResult MVKImage::flushToDevice(VkDeviceSize offset, VkDeviceSize size) {
-	for (auto& subRez : _subresources) {
-		switch (subRez.layoutState) {
-			case VK_IMAGE_LAYOUT_UNDEFINED:			// TODO: VK_IMAGE_LAYOUT_UNDEFINED should be illegal
-			case VK_IMAGE_LAYOUT_PREINITIALIZED:
-			case VK_IMAGE_LAYOUT_GENERAL: {
-				updateMTLTextureContent(subRez, offset, size);
-				break;
+	if (shouldFlushHostMemory()) {
+		for (auto& subRez : _subresources) {
+			switch (subRez.layoutState) {
+				case VK_IMAGE_LAYOUT_UNDEFINED:
+				case VK_IMAGE_LAYOUT_PREINITIALIZED:
+				case VK_IMAGE_LAYOUT_GENERAL: {
+					updateMTLTextureContent(subRez, offset, size);
+					break;
+				}
+				default:
+					break;
 			}
-			default:
-				break;
 		}
 	}
 	return VK_SUCCESS;
 }
 
-/**
- * Pulls content from the MTLTexture into the device memory at the specified memory range. 
- * Pulls from all subresources that overlap the specified range and are in an updatable layout state.
- */
+// Pulls content from the MTLTexture into the device memory at the specified memory range.
+// Pulls from all subresources that overlap the specified range and are in an updatable layout state.
 VkResult MVKImage::pullFromDevice(VkDeviceSize offset, VkDeviceSize size) {
-	for (auto& subRez : _subresources) {
-		switch (subRez.layoutState) {
-			case VK_IMAGE_LAYOUT_GENERAL: {
-                getMTLTextureContent(subRez, offset, size);
-				break;
+	if (shouldFlushHostMemory()) {
+		for (auto& subRez : _subresources) {
+			switch (subRez.layoutState) {
+				case VK_IMAGE_LAYOUT_GENERAL: {
+					getMTLTextureContent(subRez, offset, size);
+					break;
+				}
+				default:
+					break;
 			}
-			default:
-				break;
 		}
 	}
 	return VK_SUCCESS;
 }
 
-void* MVKImage::map(VkDeviceSize offset, VkDeviceSize size) {
-	return _deviceMemory->allocateMappedMemory(offset, size);
-}
-
 
 #pragma mark Metal
 
@@ -246,21 +270,22 @@
     return VK_SUCCESS;
 }
 
-/**
- * Creates and returns a retained Metal texture suitable for use in this instance.
- *
- * This implementation creates a new MTLTexture from a MTLTextureDescriptor and possible IOSurface.
- * Subclasses may override this function to create the MTLTexture in a different manner.
- */
+// Creates and returns a retained Metal texture suitable for use in this instance.
+// This implementation creates a new MTLTexture from a MTLTextureDescriptor and possible IOSurface.
+// Subclasses may override this function to create the MTLTexture in a different manner.
 id<MTLTexture> MVKImage::newMTLTexture() {
     if (_ioSurface) {
         return [getMTLDevice() newTextureWithDescriptor: getMTLTextureDescriptor() iosurface: _ioSurface plane: 0];
+	} else if (_usesTexelBuffer) {
+        return [_deviceMemory->_mtlBuffer newTextureWithDescriptor: getMTLTextureDescriptor()
+															offset: getDeviceMemoryOffset()
+													   bytesPerRow: _subresources[0].layout.rowPitch];
     } else {
         return [getMTLDevice() newTextureWithDescriptor: getMTLTextureDescriptor()];
     }
 }
 
-/** Removes and releases the MTLTexture object, so that it can be lazily created by getMTLTexture(). */
+// Removes and releases the MTLTexture object, so that it can be lazily created by getMTLTexture().
 void MVKImage::resetMTLTexture() {
 	[_mtlTexture release];
 	_mtlTexture = nil;
@@ -309,7 +334,7 @@
     return VK_SUCCESS;
 }
 
-/** Returns an autoreleased Metal texture descriptor constructed from the properties of this image. */
+// Returns an autoreleased Metal texture descriptor constructed from the properties of this image.
 MTLTextureDescriptor* MVKImage::getMTLTextureDescriptor() {
 	MTLTextureDescriptor* mtlTexDesc = [[MTLTextureDescriptor alloc] init];
 	mtlTexDesc.pixelFormat = _mtlPixelFormat;
@@ -339,63 +364,63 @@
     return stgMode;
 }
 
-/**
- * Updates the contents of the underlying MTLTexture, corresponding to the 
- * specified subresource definition, from the underlying memory buffer.
- */
+// Updates the contents of the underlying MTLTexture, corresponding to the
+//specified subresource definition, from the underlying memory buffer.
 void MVKImage::updateMTLTextureContent(MVKImageSubresource& subresource,
                                        VkDeviceSize offset, VkDeviceSize size) {
-    // Check if subresource overlaps the memory range.
+
+	VkImageSubresource& imgSubRez = subresource.subresource;
+	VkSubresourceLayout& imgLayout = subresource.layout;
+
+	// Check if subresource overlaps the memory range.
     VkDeviceSize memStart = offset;
     VkDeviceSize memEnd = offset + size;
-    VkDeviceSize imgStart = subresource.layout.offset;
-    VkDeviceSize imgEnd = subresource.layout.offset + subresource.layout.size;
+    VkDeviceSize imgStart = imgLayout.offset;
+    VkDeviceSize imgEnd = imgLayout.offset + imgLayout.size;
     if (imgStart >= memEnd || imgEnd <= memStart) { return; }
 
-    VkImageSubresource& imgSubRez = subresource.subresource;
-    VkSubresourceLayout& imgLayout = subresource.layout;
+	// Don't update if host memory has not been mapped yet.
+	void* pHostMem = getHostMemoryAddress();
+	if ( !pHostMem ) { return; }
 
-    uint32_t mipLvl = imgSubRez.mipLevel;
-    uint32_t layer = imgSubRez.arrayLayer;
-
-    VkExtent3D mipExtent = getExtent3D(mipLvl);
+    VkExtent3D mipExtent = getExtent3D(imgSubRez.mipLevel);
     VkImageType imgType = getImageType();
-    void* pImgBytes = (void*)((uintptr_t)getLogicalMappedMemory() + imgLayout.offset);
+    void* pImgBytes = (void*)((uintptr_t)pHostMem + imgLayout.offset);
 
     MTLRegion mtlRegion;
     mtlRegion.origin = MTLOriginMake(0, 0, 0);
     mtlRegion.size = mvkMTLSizeFromVkExtent3D(mipExtent);
 
     [getMTLTexture() replaceRegion: mtlRegion
-                       mipmapLevel: mipLvl
-                             slice: layer
+                       mipmapLevel: imgSubRez.mipLevel
+                             slice: imgSubRez.arrayLayer
                          withBytes: pImgBytes
                        bytesPerRow: (imgType != VK_IMAGE_TYPE_1D ? imgLayout.rowPitch : 0)
                      bytesPerImage: (imgType == VK_IMAGE_TYPE_3D ? imgLayout.depthPitch : 0)];
 }
 
-/**
- * Updates the contents of the underlying memory buffer from the contents of 
- * the underlying MTLTexture, corresponding to the specified subresource definition.
- */
+// Updates the contents of the underlying memory buffer from the contents of
+// the underlying MTLTexture, corresponding to the specified subresource definition.
 void MVKImage::getMTLTextureContent(MVKImageSubresource& subresource,
                                     VkDeviceSize offset, VkDeviceSize size) {
-    // Check if subresource overlaps the memory range.
+
+	VkImageSubresource& imgSubRez = subresource.subresource;
+	VkSubresourceLayout& imgLayout = subresource.layout;
+
+	// Check if subresource overlaps the memory range.
     VkDeviceSize memStart = offset;
     VkDeviceSize memEnd = offset + size;
-    VkDeviceSize imgStart = subresource.layout.offset;
-    VkDeviceSize imgEnd = subresource.layout.offset + subresource.layout.size;
+    VkDeviceSize imgStart = imgLayout.offset;
+    VkDeviceSize imgEnd = imgLayout.offset + imgLayout.size;
     if (imgStart >= memEnd || imgEnd <= memStart) { return; }
 
-    VkImageSubresource& imgSubRez = subresource.subresource;
-    VkSubresourceLayout& imgLayout = subresource.layout;
+	// Don't update if host memory has not been mapped yet.
+	void* pHostMem = getHostMemoryAddress();
+	if ( !pHostMem ) { return; }
 
-    uint32_t mipLvl = imgSubRez.mipLevel;
-    uint32_t layer = imgSubRez.arrayLayer;
-
-    VkExtent3D mipExtent = getExtent3D(mipLvl);
+    VkExtent3D mipExtent = getExtent3D(imgSubRez.mipLevel);
     VkImageType imgType = getImageType();
-    void* pImgBytes = (void*)((uintptr_t)getLogicalMappedMemory() + imgLayout.offset);
+    void* pImgBytes = (void*)((uintptr_t)pHostMem + imgLayout.offset);
 
     MTLRegion mtlRegion;
     mtlRegion.origin = MTLOriginMake(0, 0, 0);
@@ -405,8 +430,8 @@
                   bytesPerRow: (imgType != VK_IMAGE_TYPE_1D ? imgLayout.rowPitch : 0)
                 bytesPerImage: (imgType == VK_IMAGE_TYPE_3D ? imgLayout.depthPitch : 0)
                    fromRegion: mtlRegion
-                  mipmapLevel: mipLvl
-                        slice: layer];
+                  mipmapLevel: imgSubRez.mipLevel
+                        slice: imgSubRez.arrayLayer];
 }
 
 
@@ -446,8 +471,10 @@
 
     _isDepthStencilAttachment = (mvkAreFlagsEnabled(pCreateInfo->usage, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ||
                                  mvkAreFlagsEnabled(mvkVkFormatProperties(pCreateInfo->format).optimalTilingFeatures, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT));
-
+	_canSupportMTLTextureView = !_isDepthStencilAttachment;
     _hasExpectedTexelSize = (mvkMTLPixelFormatBytesPerBlock(_mtlPixelFormat) == mvkVkFormatBytesPerBlock(pCreateInfo->format));
+	_isLinear = validateLinear(pCreateInfo);
+	_usesTexelBuffer = false;
 
    // Calc _byteCount after _mtlTexture & _byteAlignment
     for (uint32_t mipLvl = 0; mipLvl < _mipLevels; mipLvl++) {
@@ -455,10 +482,46 @@
     }
 
     initSubresources(pCreateInfo);
-	initMTLTextureViewSupport();
 }
 
-/** Initializes the subresource definitions. */
+bool MVKImage::validateLinear(const VkImageCreateInfo* pCreateInfo) {
+	if (pCreateInfo->tiling != VK_IMAGE_TILING_LINEAR) { return false; }
+
+	if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, imageType must be VK_IMAGE_TYPE_2D."));
+		return false;
+	}
+
+	if (_isDepthStencilAttachment) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, format must not be a depth/stencil format."));
+		return false;
+	}
+
+	if (_mipLevels > 1) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, mipLevels must be 1."));
+		return false;
+	}
+
+	if (_arrayLayers > 1) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, arrayLayers must be 1."));
+		return false;
+	}
+
+	if (_samples > 1) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, samples must be VK_SAMPLE_COUNT_1_BIT."));
+		return false;
+	}
+
+	if (mvkAreOnlyAnyFlagsEnabled(_usage, (VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT))) {
+		setConfigurationResult(mvkNotifyErrorWithText(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : If tiling is VK_IMAGE_TILING_LINEAR, usage must only include VK_IMAGE_USAGE_TRANSFER_SRC_BIT and/or VK_IMAGE_USAGE_TRANSFER_DST_BIT."));
+		return false;
+	}
+
+	return true;
+}
+
+
+// Initializes the subresource definitions.
 void MVKImage::initSubresources(const VkImageCreateInfo* pCreateInfo) {
 	_subresources.reserve(_mipLevels * _arrayLayers);
 
@@ -476,7 +539,7 @@
 	}
 }
 
-/** Initializes the layout element of the specified image subresource. */
+// Initializes the layout element of the specified image subresource.
 void MVKImage::initSubresourceLayout(MVKImageSubresource& imgSubRez) {
 	VkImageSubresource subresource = imgSubRez.subresource;
 	uint32_t currMipLevel = subresource.mipLevel;
@@ -500,15 +563,8 @@
 	layout.depthPitch = bytesPerLayerCurrLevel;
 }
 
-/**
- * Determines whether this image can support Metal texture views,
- * and sets the _canSupportMTLTextureView variable appropriately.
- */
-void MVKImage::initMTLTextureViewSupport() {
-	_canSupportMTLTextureView = !_isDepthStencilAttachment;
-}
-
 MVKImage::~MVKImage() {
+	if (_deviceMemory) { _deviceMemory->removeImage(this); }
 	resetMTLTexture();
     resetIOSurface();
 }
@@ -552,10 +608,8 @@
 	}
 }
 
-/**
- * Creates and returns a retained Metal texture as an
- * overlay on the Metal texture of the underlying image.
- */
+// Creates and returns a retained Metal texture as an
+// overlay on the Metal texture of the underlying image.
 id<MTLTexture> MVKImageView::newMTLTexture() {
     return [_image->getMTLTexture() newTextureViewWithPixelFormat: _mtlPixelFormat
                                                       textureType: _mtlTextureType
@@ -698,10 +752,8 @@
     return true;
 }
 
-/**
- * Determine whether this image view should use a Metal texture view,
- * and set the _useMTLTextureView variable appropriately.
- */
+// Determine whether this image view should use a Metal texture view,
+// and set the _useMTLTextureView variable appropriately.
 void MVKImageView::initMTLTextureViewSupport() {
 	_useMTLTextureView = _image->_canSupportMTLTextureView;
 
@@ -722,7 +774,7 @@
 #pragma mark -
 #pragma mark MVKSampler
 
-/** Returns an autoreleased Metal sampler descriptor constructed from the properties of this image. */
+// Returns an autoreleased Metal sampler descriptor constructed from the properties of this image.
 MTLSamplerDescriptor* MVKSampler::getMTLSamplerDescriptor(const VkSamplerCreateInfo* pCreateInfo) {
 
 	MTLSamplerDescriptor* mtlSampDesc = [[MTLSamplerDescriptor alloc] init];
@@ -746,7 +798,7 @@
 	return [mtlSampDesc autorelease];
 }
 
-/** Constructs an instance on the specified image. */
+// Constructs an instance on the specified image.
 MVKSampler::MVKSampler(MVKDevice* device, const VkSamplerCreateInfo* pCreateInfo) : MVKBaseDeviceObject(device) {
     _mtlSamplerState = [getMTLDevice() newSamplerStateWithDescriptor: getMTLSamplerDescriptor(pCreateInfo)];
 }
@@ -819,7 +871,7 @@
 //	MVKLogDebug("%s swapchain image %p semaphore %p in acquire with %lu other semaphores.", (_availability.isAvailable ? "Signaling" : "Tracking"), this, semaphore, _availabilitySignalers.size());
 }
 
-/** Signal either or both of the semaphore and fence in the specified tracker pair. */
+// Signal either or both of the semaphore and fence in the specified tracker pair.
 void MVKSwapchainImage::signal(MVKSwapchainSignaler& signaler) {
 	if (signaler.first) { signaler.first->signal(); }
 	if (signaler.second) { signaler.second->signal(); }
@@ -846,11 +898,8 @@
 
 #pragma mark Metal
 
-/**
- * Creates and returns a retained Metal texture suitable for use in this instance.
- *
- * This implementation retrieves a MTLTexture from the CAMetalDrawable.
- */
+// Creates and returns a retained Metal texture suitable for use in this instance.
+// This implementation retrieves a MTLTexture from the CAMetalDrawable.
 id<MTLTexture> MVKSwapchainImage::newMTLTexture() {
 	return [[getCAMetalDrawable() texture] retain];
 }
@@ -885,13 +934,13 @@
     }
 }
 
-/** Removes and releases the Metal drawable object, so that it can be lazily created by getCAMetalDrawable(). */
+// Removes and releases the Metal drawable object, so that it can be lazily created by getCAMetalDrawable().
 void MVKSwapchainImage::resetCAMetalDrawable() {
 	[_mtlDrawable release];
 	_mtlDrawable = nil;
 }
 
-/** Resets the MTLTexture and CAMetalDrawable underlying this image. */
+// Resets the MTLTexture and CAMetalDrawable underlying this image.
 void MVKSwapchainImage::resetMetalSurface() {
     resetMTLTexture();			// Release texture first so drawable will be last to release it
     resetCAMetalDrawable();
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKResource.h b/MoltenVK/MoltenVK/GPUObjects/MVKResource.h
index 0491589..900f3a4 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKResource.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKResource.h
@@ -38,36 +38,28 @@
     /** Returns the byte offset in the bound device memory. */
     inline VkDeviceSize getDeviceMemoryOffset() { return _deviceMemoryOffset; }
 
-	/** Returns the byte alignment required for this resource. */
-    inline VkDeviceSize getByteAlignment() { return _byteAlignment; }
-
 	/** Returns the memory requirements of this resource by populating the specified structure. */
 	virtual VkResult getMemoryRequirements(VkMemoryRequirements* pMemoryRequirements) = 0;
 
 	/** Binds this resource to the specified offset within the specified memory allocation. */
-	VkResult bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset);
+	virtual VkResult bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset);
 
 	/** Returns the device memory underlying this resource. */
 	inline MVKDeviceMemory* getDeviceMemory() { return _deviceMemory; }
 
 	/** Returns whether the memory is accessible from the host. */
-    inline bool isMemoryHostAccessible() {
-        return (_deviceMemory ? _deviceMemory->isMemoryHostAccessible() : false);
-    }
+	inline bool isMemoryHostAccessible() { return _deviceMemory && _deviceMemory->isMemoryHostAccessible(); }
 
-    /**
-     * Returns the host memory address that represents what would be the beginning of 
-     * the host address space that this resource is mapped to by a vkMapMemory() call.
-     *
-     * The returnd value only has physical meaning if the mapped memory overlaps the 
-     * beginning of the memory used by this resource, otherwise it is a logical address
-     * used to calculate resource offsets.
-     *
-     * This function must only be called between vkMapMemory() and vkUnmapMemory() calls.
-     */
-    inline void* getLogicalMappedMemory() {
-        return (_deviceMemory ? (void*)((uintptr_t)_deviceMemory->getLogicalMappedMemory() + _deviceMemoryOffset) : nullptr);
-    }
+	/** Returns whether the memory is automatically coherent between device and host. */
+	inline bool isMemoryHostCoherent() { return _deviceMemory && _deviceMemory->isMemoryHostCoherent(); }
+
+	/**
+	 * Returns the host memory address of this resource, or NULL if the memory
+	 * is marked as device-only and cannot be mapped to a host address.
+	 */
+	inline void* getHostMemoryAddress() {
+		return (_deviceMemory ? (void*)((uintptr_t)_deviceMemory->getHostMemoryAddress() + _deviceMemoryOffset) : nullptr);
+	}
 
 	/** Applies the specified global memory barrier. */
 	virtual void applyMemoryBarrier(VkPipelineStageFlags srcStageMask,
@@ -79,23 +71,12 @@
 	
 #pragma mark Construction
 
-	/** Constructs an instance for the specified device. */
     MVKResource(MVKDevice* device) : MVKBaseDeviceObject(device) {}
 
-	/** Destructor. */
-	~MVKResource() override;
-
 protected:
-	friend MVKDeviceMemory;
-	
-    virtual void* map(VkDeviceSize offset, VkDeviceSize size) = 0;
-	virtual VkResult flushToDevice(VkDeviceSize offset, VkDeviceSize size) = 0;
-	virtual VkResult pullFromDevice(VkDeviceSize offset, VkDeviceSize size) = 0;
 	virtual bool needsHostReadSync(VkPipelineStageFlags srcStageMask,
 								   VkPipelineStageFlags dstStageMask,
 								   VkMemoryBarrier* pMemoryBarrier);
-    bool doesOverlap(VkDeviceSize offset, VkDeviceSize size);
-    bool doesContain(VkDeviceSize offset, VkDeviceSize size);
 
 	MVKDeviceMemory* _deviceMemory = nullptr;
 	VkDeviceSize _deviceMemoryOffset = 0;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKResource.mm b/MoltenVK/MoltenVK/GPUObjects/MVKResource.mm
index c316b5b..1060985 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKResource.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKResource.mm
@@ -23,20 +23,13 @@
 #pragma mark MVKResource
 
 VkResult MVKResource::bindDeviceMemory(MVKDeviceMemory* mvkMem, VkDeviceSize memOffset) {
-	if (_deviceMemory) { _deviceMemory->removeResource(this); }
-
 	_deviceMemory = mvkMem;
 	_deviceMemoryOffset = memOffset;
-
-	if (_deviceMemory) { _deviceMemory->addResource(this); }
-
 	return VK_SUCCESS;
 }
 
-/**
- * Returns whether the specified global memory barrier requires a sync between this
- * texture and host memory for the purpose of the host reading texture memory.
- */
+// Returns whether the specified global memory barrier requires a sync between this
+// texture and host memory for the purpose of the host reading texture memory.
 bool MVKResource::needsHostReadSync(VkPipelineStageFlags srcStageMask,
 									VkPipelineStageFlags dstStageMask,
 									VkMemoryBarrier* pMemoryBarrier) {
@@ -46,34 +39,7 @@
 #if MVK_MACOS
 	return (mvkIsAnyFlagEnabled(dstStageMask, (VK_PIPELINE_STAGE_HOST_BIT)) &&
 			mvkIsAnyFlagEnabled(pMemoryBarrier->dstAccessMask, (VK_ACCESS_HOST_READ_BIT)) &&
-			_deviceMemory && _deviceMemory->isMemoryHostAccessible() && !_deviceMemory->isMemoryHostCoherent());
+			isMemoryHostAccessible() && !isMemoryHostCoherent());
 #endif
 }
 
-// Check if this resource overlaps the device memory offset and range
-bool MVKResource::doesOverlap(VkDeviceSize offset, VkDeviceSize size) {
-    VkDeviceSize memStart = offset;
-    VkDeviceSize memEnd = memStart + size;
-    VkDeviceSize rezStart = _deviceMemoryOffset;
-    VkDeviceSize rezEnd = rezStart + _byteCount;
-
-    return (memStart < rezEnd && memEnd > rezStart);
-}
-
-// Check if this resource completely contains the device memory offset and range
-bool MVKResource::doesContain(VkDeviceSize offset, VkDeviceSize size) {
-    VkDeviceSize memStart = offset;
-    VkDeviceSize memEnd = memStart + size;
-    VkDeviceSize rezStart = _deviceMemoryOffset;
-    VkDeviceSize rezEnd = rezStart + _byteCount;
-
-    return (memStart >= rezStart && memEnd <= rezEnd);
-}
-
-
-#pragma mark Construction
-
-MVKResource::~MVKResource() {
-    if (_deviceMemory) { _deviceMemory->removeResource(this); }
-};
-
diff --git a/MoltenVK/MoltenVK/Utility/MVKFoundation.h b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
index a08dfe9..32b4043 100644
--- a/MoltenVK/MoltenVK/Utility/MVKFoundation.h
+++ b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
@@ -363,7 +363,11 @@
 template<typename T1, typename T2>
 bool mvkIsAnyFlagEnabled(T1 value, const T2 bitMask) { return !!(value & bitMask); }
 
-/** Returns whether the specified value has ONLY of the flags specified in bitMask enabled (set to 1). */
+/** Returns whether the specified value has ONLY ALL of the flags specified in bitMask enabled (set to 1), and none others. */
 template<typename T1, typename T2>
-bool mvkAreOnlyFlagsEnabled(T1 value, const T2 bitMask) { return (value == bitMask); }
+bool mvkAreOnlyAllFlagsEnabled(T1 value, const T2 bitMask) { return (value == bitMask); }
+
+/** Returns whether the specified value has ONLY one or more of the flags specified in bitMask enabled (set to 1), and none others. */
+template<typename T1, typename T2>
+bool mvkAreOnlyAnyFlagsEnabled(T1 value, const T2 bitMask) { return (mvkIsAnyFlagEnabled(value, bitMask) && ((value | bitMask) == bitMask)); }