Use the new memory barrier methods for pipeline barrier commands.

These two new methods introduced in Metal 2.1 (macOS 10.14) are more
granular, providing more control over what exactly must be sync'd. They
also work on buffer memory, meaning that buffer dependencies work
properly here.
diff --git a/MoltenVK/MoltenVK/API/mvk_datatypes.h b/MoltenVK/MoltenVK/API/mvk_datatypes.h
index 1a936ad..eeeef02 100644
--- a/MoltenVK/MoltenVK/API/mvk_datatypes.h
+++ b/MoltenVK/MoltenVK/API/mvk_datatypes.h
@@ -390,18 +390,24 @@
 /** Returns the size, in bytes, of a vertex index of the specified type. */
 size_t mvkMTLIndexTypeSizeInBytes(MTLIndexType mtlIdxType);
 
-/** Returns the MVKShaderStage corresponding to the specified Vulkan VkShaderStageFlagBits. */
+/** Returns the MoltenVK MVKShaderStage corresponding to the specified Vulkan VkShaderStageFlagBits. */
 MVKShaderStage mvkShaderStageFromVkShaderStageFlagBits(VkShaderStageFlagBits vkStage);
 
-/** Returns the VkShaderStageFlagBits corresponding to the specified MoltenVK MVKShaderStage. */
+/** Returns the Vulkan VkShaderStageFlagBits corresponding to the specified MoltenVK MVKShaderStage. */
 VkShaderStageFlagBits mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage mvkStage);
 
-/** Returns the MTLWinding corresponding to the specified spv::ExecutionMode. */
+/** Returns the Metal MTLWinding corresponding to the specified SPIR-V spv::ExecutionMode. */
 MTLWinding mvkMTLWindingFromSpvExecutionMode(uint32_t spvMode);
 
-/** Returns the MTLTessellationPartitionMode corresponding to the specified spv::ExecutionMode. */
+/** Returns the Metal MTLTessellationPartitionMode corresponding to the specified SPIR-V spv::ExecutionMode. */
 MTLTessellationPartitionMode mvkMTLTessellationPartitionModeFromSpvExecutionMode(uint32_t spvMode);
 
+/** Returns the combination of Metal MTLRenderStage bits corresponding to the specified Vulkan VkPiplineStageFlags. */
+MTLRenderStages mvkMTLRenderStagesFromVkPipelineStageFlags(VkPipelineStageFlags vkStages);
+
+/** Returns the combination of Metal MTLBarrierScope bits corresponding to the specified Vulkan VkAccessFlags. */
+MTLBarrierScope mvkMTLBarrierScopeFromVkAccessFlags(VkAccessFlags vkAccess);
+
 #pragma mark -
 #pragma mark Geometry conversions
 
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
index afe05ef..20c9495 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
@@ -65,8 +65,36 @@
 
 #if MVK_MACOS
     // Calls below invoke MTLBlitCommandEncoder so must apply this first
-	if ( !(_memoryBarriers.empty() && _imageMemoryBarriers.empty()) ) {
-		[cmdEncoder->_mtlRenderEncoder textureBarrier];
+	if ( [cmdEncoder->_mtlRenderEncoder respondsToSelector: @selector(memoryBarrierWithScope:afterStages:beforeStages:)] ) {
+		MTLRenderStages srcStages = mvkMTLRenderStagesFromVkPipelineStageFlags(_srcStageMask);
+		MTLRenderStages dstStages = mvkMTLRenderStagesFromVkPipelineStageFlags(_dstStageMask);
+		for (auto& mb : _memoryBarriers) {
+			MTLBarrierScope scope = mvkMTLBarrierScopeFromVkAccessFlags(mb.dstAccessMask);
+			scope |= mvkMTLBarrierScopeFromVkAccessFlags(mb.srcAccessMask);
+			[cmdEncoder->_mtlRenderEncoder memoryBarrierWithScope: scope
+													  afterStages: srcStages
+													 beforeStages: dstStages];
+		}
+		std::vector<id<MTLResource>> resources;
+		resources.reserve(_bufferMemoryBarriers.size() + _imageMemoryBarriers.size());
+		for (auto& mb : _bufferMemoryBarriers) {
+			auto* mvkBuff = (MVKBuffer*)mb.buffer;
+			resources.push_back(mvkBuff->getMTLBuffer());
+		}
+		for (auto& mb : _imageMemoryBarriers) {
+			auto* mvkImg = (MVKImage*)mb.image;
+			resources.push_back(mvkImg->getMTLTexture());
+		}
+		if ( !resources.empty() ) {
+			[cmdEncoder->_mtlRenderEncoder memoryBarrierWithResources: resources.data()
+																count: resources.size()
+														  afterStages: srcStages
+														 beforeStages: dstStages];
+		}
+	} else {
+		if ( !(_memoryBarriers.empty() && _imageMemoryBarriers.empty()) ) {
+			[cmdEncoder->_mtlRenderEncoder textureBarrier];
+		}
 	}
 #endif
 
diff --git a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
index 5b8bb2e..150ace7 100644
--- a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
+++ b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
@@ -1268,6 +1268,31 @@
 	}
 }
 
+MVK_PUBLIC_SYMBOL MTLRenderStages mvkMTLRenderStagesFromVkPipelineStageFlags(VkPipelineStageFlags vkStages) {
+	MTLRenderStages mtlStages = MTLRenderStages(0);
+	if ( mvkIsAnyFlagEnabled(vkStages, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT) ) {
+		mtlStages |= MTLRenderStageVertex;
+	}
+	if ( mvkIsAnyFlagEnabled(vkStages, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT ) ) {
+		mtlStages |= MTLRenderStageFragment;
+	}
+	return mtlStages;
+}
+
+MVK_PUBLIC_SYMBOL MTLBarrierScope mvkMTLBarrierScopeFromVkAccessFlags(VkAccessFlags vkAccess) {
+	MTLBarrierScope mtlScope = MTLBarrierScope(0);
+	if ( mvkIsAnyFlagEnabled(vkAccess, VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT) ) {
+		mtlScope |= MTLBarrierScopeBuffers;
+	}
+	if ( mvkIsAnyFlagEnabled(vkAccess, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT) ) {
+		mtlScope |= MTLBarrierScopeBuffers | MTLBarrierScopeTextures;
+	}
+	if ( mvkIsAnyFlagEnabled(vkAccess, VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT) ) {
+		mtlScope |= MTLBarrierScopeRenderTargets;
+	}
+	return mtlScope;
+}
+
 
 #pragma mark -
 #pragma mark Memory options