Merge branch 'master' into Metal-3.0
diff --git a/.travis.yml b/.travis.yml
index 9b3bd0a..32a4db1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,9 +4,10 @@
 osx_image: xcode11
 
 # Build dependencies
+# Travis has trouble with python3, which SPIRV-Tools requires,
+# so skip the SPIRV-Tools build, and use templeted headers instead.
 install:
-  - brew install python3
-  - ./fetchDependencies -v
+  - ./fetchDependencies -v --skip-spirv-tools-build
 
 # Cache built deps
 cache:
diff --git a/Docs/Whats_New.md b/Docs/Whats_New.md
index 3a7614a..fbca82f 100644
--- a/Docs/Whats_New.md
+++ b/Docs/Whats_New.md
@@ -13,6 +13,17 @@
 
 
 
+MoltenVK 1.0.36
+---------------
+
+Released TBD
+
+- On iOS GPU family 2 and earlier, support immutable depth-compare samplers 
+  as constexpr samplers hardcoded in MSL.
+- Skip SPIRV-Tools build in Travis.
+
+
+
 MoltenVK 1.0.35
 ---------------
 
diff --git a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
index 5152107..31452e2 100644
--- a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
+++ b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
@@ -50,7 +50,7 @@
  */
 #define MVK_VERSION_MAJOR   1
 #define MVK_VERSION_MINOR   0
-#define MVK_VERSION_PATCH   35
+#define MVK_VERSION_PATCH   36
 
 #define MVK_MAKE_VERSION(major, minor, patch)    (((major) * 10000) + ((minor) * 100) + (patch))
 #define MVK_VERSION     MVK_MAKE_VERSION(MVK_VERSION_MAJOR, MVK_VERSION_MINOR, MVK_VERSION_PATCH)
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
index 3931a70..5eff3e4 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.mm
@@ -254,6 +254,10 @@
 			descWrite.pTexelBufferView = pNewTexelBufferView;
 		}
 	}
+
+	// Validate by encoding on a null encoder
+	encode(nullptr);
+	setConfigurationResult(_pipelineLayout->getConfigurationResult());
 }
 
 void MVKCmdPushDescriptorSet::encode(MVKCommandEncoder* cmdEncoder) {
@@ -323,6 +327,10 @@
 	}
 	_pData = new char[size];
 	memcpy(_pData, pData, size);
+
+	// Validate by encoding on a null encoder
+	encode(nullptr);
+	setConfigurationResult(_pipelineLayout->getConfigurationResult());
 }
 
 void MVKCmdPushDescriptorSetWithTemplate::encode(MVKCommandEncoder* cmdEncoder) {
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
index 2569f32..755199e 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
@@ -26,9 +26,6 @@
 #include <unordered_map>
 #include <vector>
 
-using namespace mvk;
-
-
 class MVKDescriptorPool;
 class MVKDescriptorBinding;
 class MVKDescriptorSet;
@@ -96,7 +93,7 @@
               MVKShaderResourceBinding& dslMTLRezIdxOffsets);
 
 	/** Populates the specified shader converter context, at the specified descriptor set binding. */
-	void populateShaderConverterContext(SPIRVToMSLConverterContext& context,
+	void populateShaderConverterContext(mvk::SPIRVToMSLConverterContext& context,
                                         MVKShaderResourceBinding& dslMTLRezIdxOffsets,
                                         uint32_t dslIndex);
 
@@ -117,6 +114,7 @@
 	void initMetalResourceIndexOffsets(MVKShaderStageResourceBinding* pBindingIndexes,
 									   MVKShaderStageResourceBinding* pDescSetCounts,
 									   const VkDescriptorSetLayoutBinding* pBinding);
+	bool validate(MVKSampler* mvkSampler);
 
 	MVKDescriptorSetLayout* _layout;
 	VkDescriptorSetLayoutBinding _info;
@@ -162,7 +160,7 @@
 
 
 	/** Populates the specified shader converter context, at the specified DSL index. */
-	void populateShaderConverterContext(SPIRVToMSLConverterContext& context,
+	void populateShaderConverterContext(mvk::SPIRVToMSLConverterContext& context,
                                         MVKShaderResourceBinding& dslMTLRezIdxOffsets,
                                         uint32_t dslIndex);
 
@@ -179,6 +177,7 @@
 	friend class MVKDescriptorSet;
 
 	void propogateDebugName() override {}
+	
 	MVKVectorInline<MVKDescriptorSetLayoutBinding, 8> _bindings;
 	std::unordered_map<uint32_t, uint32_t> _bindingToIndex;
 	MVKShaderResourceBinding _mtlResourceCounts;
@@ -264,6 +263,7 @@
 	friend class MVKDescriptorSetLayoutBinding;
 
 	void initMTLSamplers(MVKDescriptorSetLayoutBinding* pBindingLayout);
+	bool validate(MVKSampler* mvkSampler) { return _pBindingLayout->validate(mvkSampler); }
 
 	MVKDescriptorSet* _pDescSet;
 	MVKDescriptorSetLayoutBinding* _pBindingLayout;
@@ -427,11 +427,12 @@
  * If the shader stage binding has a binding defined for the specified stage, populates
  * the context at the descriptor set binding from the shader stage resource binding.
  */
-void mvkPopulateShaderConverterContext(SPIRVToMSLConverterContext& context,
+void mvkPopulateShaderConverterContext(mvk::SPIRVToMSLConverterContext& context,
 									   MVKShaderStageResourceBinding& ssRB,
 									   spv::ExecutionModel stage,
 									   uint32_t descriptorSetIndex,
-									   uint32_t bindingIndex);
+									   uint32_t bindingIndex,
+									   MVKSampler* immutableSampler);
 
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
index bfef8cd..93ab0ae 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
@@ -25,6 +25,7 @@
 #include <stdlib.h>
 
 using namespace std;
+using namespace mvk;
 
 
 #pragma mark MVKShaderStageResourceBinding
@@ -80,6 +81,7 @@
 
 MVKVulkanAPIObject* MVKDescriptorSetLayoutBinding::getVulkanAPIObject() { return _layout; };
 
+// A null cmdEncoder can be passed to perform a validation pass
 void MVKDescriptorSetLayoutBinding::bind(MVKCommandEncoder* cmdEncoder,
                                          MVKDescriptorBinding& descBinding,
                                          MVKShaderResourceBinding& dslMTLRezIdxOffsets,
@@ -111,9 +113,9 @@
                     if (_applyToStage[i]) {
                         bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindBuffer(bb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
                         }
                     }
                 }
@@ -135,9 +137,9 @@
                     if (_applyToStage[i]) {
                         tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindTexture(tb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
                         }
                     }
                 }
@@ -150,9 +152,9 @@
                     if (_applyToStage[i]) {
                         sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindSamplerState(sb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindSamplerState(sb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb); }
                         }
                     }
                 }
@@ -172,11 +174,11 @@
                         tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
                         sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindTexture(tb);
-                            cmdEncoder->_computeResourcesState.bindSamplerState(sb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindSamplerState(sb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
-                            cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb); }
                         }
                     }
                 }
@@ -194,6 +196,7 @@
     return *(T*)((const char*)pData + stride * index);
 }
 
+// A null cmdEncoder can be passed to perform a validation pass
 void MVKDescriptorSetLayoutBinding::push(MVKCommandEncoder* cmdEncoder,
                                          uint32_t& dstArrayElement,
                                          uint32_t& descriptorCount,
@@ -243,9 +246,9 @@
                     if (_applyToStage[i]) {
                         bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindBuffer(bb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindBuffer(bb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindBuffer(MVKShaderStage(i), bb); }
                         }
                     }
                 }
@@ -267,9 +270,9 @@
                     if (_applyToStage[i]) {
                         tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindTexture(tb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
                         }
                     }
                 }
@@ -285,9 +288,9 @@
                     if (_applyToStage[i]) {
                         tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindTexture(tb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
                         }
                     }
                 }
@@ -296,18 +299,20 @@
 
             case VK_DESCRIPTOR_TYPE_SAMPLER: {
                 MVKSampler* sampler;
-                if (_immutableSamplers.empty())
+				if (_immutableSamplers.empty()) {
                     sampler = (MVKSampler*)get<VkDescriptorImageInfo>(pData, stride, rezIdx - dstArrayElement).sampler;
-                else
+					validate(sampler);
+				} else {
                     sampler = _immutableSamplers[rezIdx];
+				}
                 sb.mtlSamplerState = sampler->getMTLSamplerState();
                 for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
                     if (_applyToStage[i]) {
                         sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindSamplerState(sb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindSamplerState(sb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb); }
                         }
                     }
                 }
@@ -317,24 +322,30 @@
             case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
                 const auto& imageInfo = get<VkDescriptorImageInfo>(pData, stride, rezIdx - dstArrayElement);
                 MVKImageView* imageView = (MVKImageView*)imageInfo.imageView;
-                MVKSampler* sampler = _immutableSamplers.empty() ? (MVKSampler*)imageInfo.sampler : _immutableSamplers[rezIdx];
                 tb.mtlTexture = imageView->getMTLTexture();
                 if (imageView) {
                     tb.swizzle = imageView->getPackedSwizzle();
                 } else {
                     tb.swizzle = 0;
                 }
+				MVKSampler* sampler;
+				if (_immutableSamplers.empty()) {
+					sampler = (MVKSampler*)imageInfo.sampler;
+					validate(sampler);
+				} else {
+					sampler = _immutableSamplers[rezIdx];
+				}
                 sb.mtlSamplerState = sampler->getMTLSamplerState();
                 for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
                     if (_applyToStage[i]) {
                         tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
                         sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
-                            cmdEncoder->_computeResourcesState.bindTexture(tb);
-                            cmdEncoder->_computeResourcesState.bindSamplerState(sb);
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindTexture(tb); }
+							if (cmdEncoder) { cmdEncoder->_computeResourcesState.bindSamplerState(sb); }
                         } else {
-                            cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb);
-                            cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb);
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindTexture(MVKShaderStage(i), tb); }
+							if (cmdEncoder) { cmdEncoder->_graphicsResourcesState.bindSamplerState(MVKShaderStage(i), sb); }
                         }
                     }
                 }
@@ -355,10 +366,21 @@
     }
 }
 
+// If depth compare is required, but unavailable on the device, the sampler can only be used as an immutable sampler
+bool MVKDescriptorSetLayoutBinding::validate(MVKSampler* mvkSampler) {
+	if (mvkSampler->getRequiresConstExprSampler()) {
+		_layout->setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkUpdateDescriptorSets(): Depth texture samplers using a compare operation can only be used as immutable samplers on this device."));
+		return false;
+	}
+	return true;
+}
+
 void MVKDescriptorSetLayoutBinding::populateShaderConverterContext(SPIRVToMSLConverterContext& context,
                                                                    MVKShaderResourceBinding& dslMTLRezIdxOffsets,
                                                                    uint32_t dslIndex) {
 
+	MVKSampler* mvkSamp = !_immutableSamplers.empty() ? _immutableSamplers.front() : nullptr;
+
     // Establish the resource indices to use, by combining the offsets of the DSL and this DSL binding.
     MVKShaderResourceBinding mtlIdxs = _mtlResourceIndexOffsets + dslMTLRezIdxOffsets;
 
@@ -375,7 +397,8 @@
                                               mtlIdxs.stages[i],
                                               models[i],
                                               dslIndex,
-                                              _info.binding);
+                                              _info.binding,
+											  mvkSamp);
         }
     }
 }
@@ -489,6 +512,7 @@
 #pragma mark -
 #pragma mark MVKDescriptorSetLayout
 
+// A null cmdEncoder can be passed to perform a validation pass
 void MVKDescriptorSetLayout::bindDescriptorSet(MVKCommandEncoder* cmdEncoder,
                                                MVKDescriptorSet* descSet,
                                                MVKShaderResourceBinding& dslMTLRezIdxOffsets,
@@ -496,6 +520,8 @@
                                                uint32_t* pDynamicOffsetIndex) {
 
     if (_isPushDescriptorLayout) return;
+
+	clearConfigurationResult();
     uint32_t bindCnt = (uint32_t)_bindings.size();
     for (uint32_t bindIdx = 0; bindIdx < bindCnt; bindIdx++) {
         _bindings[bindIdx].bind(cmdEncoder, descSet->_bindings[bindIdx],
@@ -539,11 +565,14 @@
     return pData;
 }
 
+// A null cmdEncoder can be passed to perform a validation pass
 void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
                                                MVKVector<VkWriteDescriptorSet>& descriptorWrites,
                                                MVKShaderResourceBinding& dslMTLRezIdxOffsets) {
 
     if (!_isPushDescriptorLayout) return;
+
+	clearConfigurationResult();
     for (const VkWriteDescriptorSet& descWrite : descriptorWrites) {
         uint32_t dstBinding = descWrite.dstBinding;
         uint32_t dstArrayElement = descWrite.dstArrayElement;
@@ -571,6 +600,7 @@
     }
 }
 
+// A null cmdEncoder can be passed to perform a validation pass
 void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
                                                MVKDescriptorUpdateTemplate* descUpdateTemplate,
                                                const void* pData,
@@ -579,6 +609,8 @@
     if (!_isPushDescriptorLayout ||
         descUpdateTemplate->getType() != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR)
         return;
+
+	clearConfigurationResult();
     for (uint32_t i = 0; i < descUpdateTemplate->getNumberOfEntries(); i++) {
         const VkDescriptorUpdateTemplateEntryKHR* pEntry = descUpdateTemplate->getEntry(i);
         uint32_t dstBinding = pEntry->dstBinding;
@@ -644,6 +676,7 @@
 				_imageBindings[dstIdx].imageView = nullptr;		// Sampler only. Guard against app not explicitly clearing ImageView.
 				if (_hasDynamicSamplers) {
 					auto* mvkSampler = (MVKSampler*)pImgInfo->sampler;
+					validate(mvkSampler);
 					mvkSampler->retain();
 					_mtlSamplers[dstIdx] = mvkSampler ? mvkSampler->getMTLSamplerState() : nil;
 				} else {
@@ -667,6 +700,7 @@
 				_mtlTextures[dstIdx] = mvkImageView ? mvkImageView->getMTLTexture() : nil;
 				if (_hasDynamicSamplers) {
 					auto* mvkSampler = (MVKSampler*)pImgInfo->sampler;
+					validate(mvkSampler);
 					mvkSampler->retain();
 					_mtlSamplers[dstIdx] = mvkSampler ? mvkSampler->getMTLSamplerState() : nil;
 				} else {
@@ -808,7 +842,7 @@
 
 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
 			_imageBindings.resize(descCnt, VkDescriptorImageInfo());
-			_mtlTextures.resize(descCnt, VK_NULL_HANDLE);
+			_mtlTextures.resize(descCnt, nil);
 			initMTLSamplers(pBindingLayout);
 			break;
 
@@ -816,7 +850,7 @@
 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
 		case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
 			_imageBindings.resize(descCnt, VkDescriptorImageInfo());
-			_mtlTextures.resize(descCnt, VK_NULL_HANDLE);
+			_mtlTextures.resize(descCnt, nil);
 			break;
 
 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
@@ -824,14 +858,14 @@
 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
 			_bufferBindings.resize(descCnt, VkDescriptorBufferInfo());
-			_mtlBuffers.resize(descCnt, VK_NULL_HANDLE);
+			_mtlBuffers.resize(descCnt, nil);
 			_mtlBufferOffsets.resize(descCnt, 0);
 			break;
 
 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
-			_texelBufferBindings.resize(descCnt, VK_NULL_HANDLE);
-            _mtlTextures.resize(descCnt, VK_NULL_HANDLE);
+			_texelBufferBindings.resize(descCnt, nil);
+            _mtlTextures.resize(descCnt, nil);
 			break;
 
 		default:
@@ -878,7 +912,7 @@
 
     _mtlSamplers.reserve(descCnt);
     for (uint32_t i = 0; i < descCnt; i++) {
-        _mtlSamplers.push_back(_hasDynamicSamplers ? VK_NULL_HANDLE : imtblSamps[i]->getMTLSamplerState());
+		_mtlSamplers.push_back(_hasDynamicSamplers ? nil : imtblSamps[i]->getMTLSamplerState());
     }
 }
 
@@ -1052,7 +1086,7 @@
 #pragma mark -
 #pragma mark Support functions
 
-/** Updates the resource bindings in the descriptor sets inditified in the specified content. */
+// Updates the resource bindings in the descriptor sets inditified in the specified content.
 void mvkUpdateDescriptorSets(uint32_t writeCount,
 							 const VkWriteDescriptorSet* pDescriptorWrites,
 							 uint32_t copyCount,
@@ -1089,7 +1123,7 @@
 	}
 }
 
-/** Updates the resource bindings in the given descriptor set from the specified template. */
+// Updates the resource bindings in the given descriptor set from the specified template.
 void mvkUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet,
 										VkDescriptorUpdateTemplateKHR updateTemplate,
 										const void* pData) {
@@ -1112,13 +1146,19 @@
 									   MVKShaderStageResourceBinding& ssRB,
 									   spv::ExecutionModel stage,
 									   uint32_t descriptorSetIndex,
-									   uint32_t bindingIndex) {
-	MSLResourceBinding ctxRB;
-    ctxRB.stage = stage;
-    ctxRB.descriptorSet = descriptorSetIndex;
-    ctxRB.binding = bindingIndex;
-    ctxRB.mslBuffer = ssRB.bufferIndex;
-    ctxRB.mslTexture = ssRB.textureIndex;
-    ctxRB.mslSampler = ssRB.samplerIndex;
-    context.resourceBindings.push_back(ctxRB);
+									   uint32_t bindingIndex,
+									   MVKSampler* immutableSampler) {
+	MSLResourceBinding rb;
+
+	auto& rbb = rb.resourceBinding;
+	rbb.stage = stage;
+	rbb.desc_set = descriptorSetIndex;
+	rbb.binding = bindingIndex;
+	rbb.msl_buffer = ssRB.bufferIndex;
+	rbb.msl_texture = ssRB.textureIndex;
+	rbb.msl_sampler = ssRB.samplerIndex;
+
+	if (immutableSampler) { immutableSampler->getConstexprSampler(rb); }
+
+	context.resourceBindings.push_back(rb);
 }
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
index 0ae43ac..e136c1f 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
@@ -814,35 +814,39 @@
         }
     }
 
+#define setMSLVersion(maj, min)	\
+	_metalFeatures.mslVersion = SPIRV_CROSS_NAMESPACE::CompilerMSL::Options::make_msl_version(maj, min);
+
 	switch (_metalFeatures.mslVersionEnum) {
 		case MTLLanguageVersion2_2:
 			_metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(2, 2);
 			break;
 		case MTLLanguageVersion2_1:
-			_metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(2, 1);
+			setMSLVersion(2, 1);
 			break;
 		case MTLLanguageVersion2_0:
-			_metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(2, 0);
+			setMSLVersion(2, 0);
 			break;
 		case MTLLanguageVersion1_2:
-			_metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(1, 2);
+			setMSLVersion(1, 2);
 			break;
 		case MTLLanguageVersion1_1:
-			_metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(1, 1);
+			setMSLVersion(1, 1);
 			break;
 #if MVK_IOS
 		case MTLLanguageVersion1_0:
-			_metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(1, 0);
+			setMSLVersion(1, 0);
 			break;
 #endif
 #if MVK_MACOS
 		// Silence compiler warning catch-22 on MTLLanguageVersion1_0.
 		// But allow iOS to be explicit so it warns on future enum values
 		default:
-			_metalFeatures.mslVersion = SPIRVToMSLConverterOptions::makeMSLVersion(1, 0);
+			setMSLVersion(1, 0);
 			break;
 #endif
 	}
+
 }
 
 bool MVKPhysicalDevice::getSupportsMetalVersion(MTLSoftwareVersion mtlVersion) {
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
index 9c31d20..db47a1d 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
@@ -21,6 +21,7 @@
 #include "MVKResource.h"
 #include "MVKSync.h"
 #include "MVKVector.h"
+#include <MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h>
 #include <mutex>
 
 #import <IOSurface/IOSurfaceRef.h>
@@ -345,6 +346,15 @@
 	/** Returns the Metal sampler state. */
 	inline id<MTLSamplerState> getMTLSamplerState() { return _mtlSamplerState; }
 
+	/**
+	 * If this sampler requires hardcoding in MSL, populates the hardcoded sampler in the resource binding.
+	 * Returns whether this sampler requires hardcoding in MSL, and the constant sampler was populated.
+	 */
+	bool getConstexprSampler(mvk::MSLResourceBinding& resourceBinding);
+
+	/** Returns whether this sampler must be implemented as a hardcoded constant sampler in the shader MSL code. */
+	inline 	bool getRequiresConstExprSampler() { return _requiresConstExprSampler; }
+
 	MVKSampler(MVKDevice* device, const VkSamplerCreateInfo* pCreateInfo);
 
 	~MVKSampler() override;
@@ -352,8 +362,11 @@
 protected:
 	void propogateDebugName() override {}
 	MTLSamplerDescriptor* getMTLSamplerDescriptor(const VkSamplerCreateInfo* pCreateInfo);
+	void initConstExprSampler(const VkSamplerCreateInfo* pCreateInfo);
 
 	id<MTLSamplerState> _mtlSamplerState;
+	SPIRV_CROSS_NAMESPACE::MSLConstexprSampler _constExprSampler;
+	bool _requiresConstExprSampler;
 };
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
index 4cd8aa8..96ebc14 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
@@ -30,6 +30,7 @@
 #import "MTLSamplerDescriptor+MoltenVK.h"
 
 using namespace std;
+using namespace SPIRV_CROSS_NAMESPACE;
 
 
 #pragma mark MVKImage
@@ -1019,6 +1020,7 @@
 		_subresourceRange.layerCount == (is3D ? _image->_extent.depth : _image->_arrayLayers)) {
 		_useMTLTextureView = false;
 	}
+
 	// Never use views for subsets of 3D textures. Metal doesn't support them yet.
 	if (is3D && _subresourceRange.layerCount != _image->_extent.depth) {
 		_useMTLTextureView = false;
@@ -1033,6 +1035,14 @@
 #pragma mark -
 #pragma mark MVKSampler
 
+bool MVKSampler::getConstexprSampler(mvk::MSLResourceBinding& resourceBinding) {
+	resourceBinding.requiresConstExprSampler = _requiresConstExprSampler;
+	if (_requiresConstExprSampler) {
+		resourceBinding.constExprSampler = _constExprSampler;
+	}
+	return _requiresConstExprSampler;
+}
+
 // Returns an autoreleased Metal sampler descriptor constructed from the properties of this image.
 MTLSamplerDescriptor* MVKSampler::getMTLSamplerDescriptor(const VkSamplerCreateInfo* pCreateInfo) {
 
@@ -1052,12 +1062,12 @@
 								 : 1);
 	mtlSampDesc.normalizedCoordinates = !pCreateInfo->unnormalizedCoordinates;
 
-	if (pCreateInfo->compareEnable) {
-		if (_device->_pMetalFeatures->depthSampleCompare) {
-			mtlSampDesc.compareFunctionMVK = mvkMTLCompareFunctionFromVkCompareOp(pCreateInfo->compareOp);
-		} else {
-			setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateSampler(): Depth texture samplers do not support the comparison of the pixel value against a reference value."));
-		}
+	// If compareEnable is true, but dynamic samplers with depth compare are not available
+	// on this device, this sampler must only be used as an immutable sampler, and will
+	// be automatically hardcoded into the shader MSL. An error will be triggered if this
+	// sampler is used to update or push a descriptor binding.
+	if (pCreateInfo->compareEnable && !_requiresConstExprSampler) {
+		mtlSampDesc.compareFunctionMVK = mvkMTLCompareFunctionFromVkCompareOp(pCreateInfo->compareOp);
 	}
 
 #if MVK_MACOS
@@ -1077,9 +1087,96 @@
 	return [mtlSampDesc autorelease];
 }
 
-// Constructs an instance on the specified image.
 MVKSampler::MVKSampler(MVKDevice* device, const VkSamplerCreateInfo* pCreateInfo) : MVKVulkanAPIDeviceObject(device) {
+	_requiresConstExprSampler = pCreateInfo->compareEnable && !_device->_pMetalFeatures->depthSampleCompare;
     _mtlSamplerState = [getMTLDevice() newSamplerStateWithDescriptor: getMTLSamplerDescriptor(pCreateInfo)];
+	initConstExprSampler(pCreateInfo);
+}
+
+static MSLSamplerFilter getSpvMinMagFilterFromVkFilter(VkFilter vkFilter) {
+	switch (vkFilter) {
+		case VK_FILTER_LINEAR:	return MSL_SAMPLER_FILTER_LINEAR;
+
+		case VK_FILTER_NEAREST:
+		default:
+			return MSL_SAMPLER_FILTER_NEAREST;
+	}
+}
+
+static MSLSamplerMipFilter getSpvMipFilterFromVkMipMode(VkSamplerMipmapMode vkMipMode) {
+	switch (vkMipMode) {
+		case VK_SAMPLER_MIPMAP_MODE_LINEAR:		return MSL_SAMPLER_MIP_FILTER_LINEAR;
+		case VK_SAMPLER_MIPMAP_MODE_NEAREST:	return MSL_SAMPLER_MIP_FILTER_NEAREST;
+
+		default:
+			return MSL_SAMPLER_MIP_FILTER_NONE;
+	}
+}
+
+static MSLSamplerAddress getSpvAddressModeFromVkAddressMode(VkSamplerAddressMode vkAddrMode) {
+	switch (vkAddrMode) {
+		case VK_SAMPLER_ADDRESS_MODE_REPEAT:			return MSL_SAMPLER_ADDRESS_REPEAT;
+		case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:	return MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT;
+		case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:	return MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER;
+
+		case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
+		case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
+		default:
+			return MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE;
+	}
+}
+
+static MSLSamplerCompareFunc getSpvCompFuncFromVkCompOp(VkCompareOp vkCompOp) {
+	switch (vkCompOp) {
+		case VK_COMPARE_OP_LESS:				return MSL_SAMPLER_COMPARE_FUNC_LESS;
+		case VK_COMPARE_OP_EQUAL:				return MSL_SAMPLER_COMPARE_FUNC_EQUAL;
+		case VK_COMPARE_OP_LESS_OR_EQUAL:		return MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL;
+		case VK_COMPARE_OP_GREATER:				return MSL_SAMPLER_COMPARE_FUNC_GREATER;
+		case VK_COMPARE_OP_NOT_EQUAL:			return MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL;
+		case VK_COMPARE_OP_GREATER_OR_EQUAL:	return MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL;
+		case VK_COMPARE_OP_ALWAYS:				return MSL_SAMPLER_COMPARE_FUNC_ALWAYS;
+
+		case VK_COMPARE_OP_NEVER:
+		default:
+			return MSL_SAMPLER_COMPARE_FUNC_NEVER;
+	}
+}
+
+static MSLSamplerBorderColor getSpvBorderColorFromVkBorderColor(VkBorderColor vkBorderColor) {
+	switch (vkBorderColor) {
+		case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
+		case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
+			return MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK;
+
+		case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
+		case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
+			return MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE;
+
+		case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
+		case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
+		default:
+			return MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK;
+	}
+}
+\
+void MVKSampler::initConstExprSampler(const VkSamplerCreateInfo* pCreateInfo) {
+	if ( !_requiresConstExprSampler ) { return; }
+
+	_constExprSampler.coord = pCreateInfo->unnormalizedCoordinates ? MSL_SAMPLER_COORD_PIXEL : MSL_SAMPLER_COORD_NORMALIZED;
+	_constExprSampler.min_filter = getSpvMinMagFilterFromVkFilter(pCreateInfo->minFilter);
+	_constExprSampler.mag_filter = getSpvMinMagFilterFromVkFilter(pCreateInfo->magFilter);
+	_constExprSampler.mip_filter = getSpvMipFilterFromVkMipMode(pCreateInfo->mipmapMode);
+	_constExprSampler.s_address = getSpvAddressModeFromVkAddressMode(pCreateInfo->addressModeU);
+	_constExprSampler.t_address = getSpvAddressModeFromVkAddressMode(pCreateInfo->addressModeV);
+	_constExprSampler.r_address = getSpvAddressModeFromVkAddressMode(pCreateInfo->addressModeW);
+	_constExprSampler.compare_func = getSpvCompFuncFromVkCompOp(pCreateInfo->compareOp);
+	_constExprSampler.border_color = getSpvBorderColorFromVkBorderColor(pCreateInfo->borderColor);
+	_constExprSampler.lod_clamp_min = pCreateInfo->minLod;
+	_constExprSampler.lod_clamp_max = pCreateInfo->maxLod;
+	_constExprSampler.max_anisotropy = pCreateInfo->maxAnisotropy;
+	_constExprSampler.compare_enable = pCreateInfo->compareEnable;
+	_constExprSampler.lod_clamp_enable = false;
+	_constExprSampler.anisotropy_enable = pCreateInfo->anisotropyEnable;
 }
 
 MVKSampler::~MVKSampler() {
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
index 57dc64c..5d31a12 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
@@ -31,50 +31,65 @@
 #include <cereal/types/vector.hpp>
 
 using namespace std;
+using namespace SPIRV_CROSS_NAMESPACE;
 
 
 #pragma mark MVKPipelineLayout
 
+// A null cmdEncoder can be passed to perform a validation pass
 void MVKPipelineLayout::bindDescriptorSets(MVKCommandEncoder* cmdEncoder,
                                            MVKVector<MVKDescriptorSet*>& descriptorSets,
                                            uint32_t firstSet,
                                            MVKVector<uint32_t>& dynamicOffsets) {
-
+	clearConfigurationResult();
 	uint32_t pDynamicOffsetIndex = 0;
 	uint32_t dsCnt = (uint32_t)descriptorSets.size();
 	for (uint32_t dsIdx = 0; dsIdx < dsCnt; dsIdx++) {
 		MVKDescriptorSet* descSet = descriptorSets[dsIdx];
 		uint32_t dslIdx = firstSet + dsIdx;
-        _descriptorSetLayouts[dslIdx].bindDescriptorSet(cmdEncoder, descSet,
-                                                        _dslMTLResourceIndexOffsets[dslIdx],
-                                                        dynamicOffsets, &pDynamicOffsetIndex);
+		auto& dsl = _descriptorSetLayouts[dslIdx];
+		dsl.bindDescriptorSet(cmdEncoder, descSet,
+							  _dslMTLResourceIndexOffsets[dslIdx],
+							  dynamicOffsets, &pDynamicOffsetIndex);
+		setConfigurationResult(dsl.getConfigurationResult());
 	}
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
-		cmdEncoder->getPushConstants(mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)))->setMTLBufferIndex(_pushConstantsMTLResourceIndexes.stages[i].bufferIndex);
+	if (cmdEncoder) {
+		for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+			cmdEncoder->getPushConstants(mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)))->setMTLBufferIndex(_pushConstantsMTLResourceIndexes.stages[i].bufferIndex);
+		}
 	}
 }
 
+// A null cmdEncoder can be passed to perform a validation pass
 void MVKPipelineLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
                                           MVKVector<VkWriteDescriptorSet>& descriptorWrites,
                                           uint32_t set) {
+	clearConfigurationResult();
+	auto& dsl = _descriptorSetLayouts[set];
+	dsl.pushDescriptorSet(cmdEncoder, descriptorWrites, _dslMTLResourceIndexOffsets[set]);
+	setConfigurationResult(dsl.getConfigurationResult());
 
-    _descriptorSetLayouts[set].pushDescriptorSet(cmdEncoder, descriptorWrites,
-                                                 _dslMTLResourceIndexOffsets[set]);
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
-		cmdEncoder->getPushConstants(mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)))->setMTLBufferIndex(_pushConstantsMTLResourceIndexes.stages[i].bufferIndex);
+	if (cmdEncoder) {
+		for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+			cmdEncoder->getPushConstants(mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)))->setMTLBufferIndex(_pushConstantsMTLResourceIndexes.stages[i].bufferIndex);
+		}
 	}
 }
 
+// A null cmdEncoder can be passed to perform a validation pass
 void MVKPipelineLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
                                           MVKDescriptorUpdateTemplate* descUpdateTemplate,
                                           uint32_t set,
                                           const void* pData) {
+	clearConfigurationResult();
+	auto& dsl = _descriptorSetLayouts[set];
+	dsl.pushDescriptorSet(cmdEncoder, descUpdateTemplate, pData, _dslMTLResourceIndexOffsets[set]);
+	setConfigurationResult(dsl.getConfigurationResult());
 
-    _descriptorSetLayouts[set].pushDescriptorSet(cmdEncoder, descUpdateTemplate,
-                                                 pData,
-                                                 _dslMTLResourceIndexOffsets[set]);
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
-		cmdEncoder->getPushConstants(mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)))->setMTLBufferIndex(_pushConstantsMTLResourceIndexes.stages[i].bufferIndex);
+	if (cmdEncoder) {
+		for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+			cmdEncoder->getPushConstants(mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)))->setMTLBufferIndex(_pushConstantsMTLResourceIndexes.stages[i].bufferIndex);
+		}
 	}
 }
 
@@ -102,7 +117,8 @@
 										  _pushConstantsMTLResourceIndexes.stages[i],
 										  models[i],
 										  kPushConstDescSet,
-										  kPushConstBinding);
+										  kPushConstBinding,
+										  nullptr);
 	}
 }
 
@@ -478,16 +494,16 @@
 	// Round up to 4 elements for 3-vectors, since that reflects how Metal lays them out.
 	if (vecWidth == 3) { vecWidth = 4; }
 	switch (output.baseType) {
-		case SPIRV_CROSS_NAMESPACE::SPIRType::SByte:
-		case SPIRV_CROSS_NAMESPACE::SPIRType::UByte:
+		case SPIRType::SByte:
+		case SPIRType::UByte:
 			return 1 * vecWidth;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::Short:
-		case SPIRV_CROSS_NAMESPACE::SPIRType::UShort:
-		case SPIRV_CROSS_NAMESPACE::SPIRType::Half:
+		case SPIRType::Short:
+		case SPIRType::UShort:
+		case SPIRType::Half:
 			return 2 * vecWidth;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::Int:
-		case SPIRV_CROSS_NAMESPACE::SPIRType::UInt:
-		case SPIRV_CROSS_NAMESPACE::SPIRType::Float:
+		case SPIRType::Int:
+		case SPIRType::UInt:
+		case SPIRType::Float:
 		default:
 			return 4 * vecWidth;
 	}
@@ -495,7 +511,7 @@
 
 static VkFormat mvkFormatFromOutput(const SPIRVShaderOutput& output) {
 	switch (output.baseType) {
-		case SPIRV_CROSS_NAMESPACE::SPIRType::SByte:
+		case SPIRType::SByte:
 			switch (output.vecWidth) {
 				case 1: return VK_FORMAT_R8_SINT;
 				case 2: return VK_FORMAT_R8G8_SINT;
@@ -503,7 +519,7 @@
 				case 4: return VK_FORMAT_R8G8B8A8_SINT;
 			}
 			break;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::UByte:
+		case SPIRType::UByte:
 			switch (output.vecWidth) {
 				case 1: return VK_FORMAT_R8_UINT;
 				case 2: return VK_FORMAT_R8G8_UINT;
@@ -511,7 +527,7 @@
 				case 4: return VK_FORMAT_R8G8B8A8_UINT;
 			}
 			break;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::Short:
+		case SPIRType::Short:
 			switch (output.vecWidth) {
 				case 1: return VK_FORMAT_R16_SINT;
 				case 2: return VK_FORMAT_R16G16_SINT;
@@ -519,7 +535,7 @@
 				case 4: return VK_FORMAT_R16G16B16A16_SINT;
 			}
 			break;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::UShort:
+		case SPIRType::UShort:
 			switch (output.vecWidth) {
 				case 1: return VK_FORMAT_R16_UINT;
 				case 2: return VK_FORMAT_R16G16_UINT;
@@ -527,7 +543,7 @@
 				case 4: return VK_FORMAT_R16G16B16A16_UINT;
 			}
 			break;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::Half:
+		case SPIRType::Half:
 			switch (output.vecWidth) {
 				case 1: return VK_FORMAT_R16_SFLOAT;
 				case 2: return VK_FORMAT_R16G16_SFLOAT;
@@ -535,7 +551,7 @@
 				case 4: return VK_FORMAT_R16G16B16A16_SFLOAT;
 			}
 			break;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::Int:
+		case SPIRType::Int:
 			switch (output.vecWidth) {
 				case 1: return VK_FORMAT_R32_SINT;
 				case 2: return VK_FORMAT_R32G32_SINT;
@@ -543,7 +559,7 @@
 				case 4: return VK_FORMAT_R32G32B32A32_SINT;
 			}
 			break;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::UInt:
+		case SPIRType::UInt:
 			switch (output.vecWidth) {
 				case 1: return VK_FORMAT_R32_UINT;
 				case 2: return VK_FORMAT_R32G32_UINT;
@@ -551,7 +567,7 @@
 				case 4: return VK_FORMAT_R32G32B32A32_UINT;
 			}
 			break;
-		case SPIRV_CROSS_NAMESPACE::SPIRType::Float:
+		case SPIRType::Float:
 			switch (output.vecWidth) {
 				case 1: return VK_FORMAT_R32_SFLOAT;
 				case 2: return VK_FORMAT_R32G32_SFLOAT;
@@ -738,12 +754,12 @@
 	uint32_t vbCnt = pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
 	shaderContext.options.entryPointStage = spv::ExecutionModelVertex;
 	shaderContext.options.entryPointName = _pVertexSS->pName;
-	shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.indirectParamsBufferIndex = _indirectParamsIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.outputBufferIndex = _outputBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.shouldCaptureOutput = isTessellationPipeline();
-	shaderContext.options.isRasterizationDisabled = isTessellationPipeline() || (pCreateInfo->pRasterizationState && (pCreateInfo->pRasterizationState->rasterizerDiscardEnable));
+	shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
+	shaderContext.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageVertex];
+	shaderContext.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageVertex];
+	shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageVertex];
+	shaderContext.options.mslOptions.capture_output_to_buffer = isTessellationPipeline();
+	shaderContext.options.mslOptions.disable_rasterization = isTessellationPipeline() || (pCreateInfo->pRasterizationState && (pCreateInfo->pRasterizationState->rasterizerDiscardEnable));
     addVertexInputToShaderConverterContext(shaderContext, pCreateInfo);
 	id<MTLFunction> mtlFunction = ((MVKShaderModule*)_pVertexSS->module)->getMTLFunction(&shaderContext, _pVertexSS->pSpecializationInfo, _pipelineCache).mtlFunction;
 	if ( !mtlFunction ) {
@@ -751,7 +767,7 @@
 		return false;
 	}
 	plDesc.vertexFunction = mtlFunction;
-	plDesc.rasterizationEnabled = !shaderContext.options.isRasterizationDisabled;
+	plDesc.rasterizationEnabled = !shaderContext.options.mslOptions.disable_rasterization;
 	_needsVertexSwizzleBuffer = shaderContext.options.needsSwizzleBuffer;
 	_needsVertexBufferSizeBuffer = shaderContext.options.needsBufferSizeBuffer;
 	_needsVertexOutputBuffer = shaderContext.options.needsOutputBuffer;
@@ -773,16 +789,19 @@
 	return true;
 }
 
-bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext, std::vector<SPIRVShaderOutput>& vtxOutputs) {
+bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc,
+													 const VkGraphicsPipelineCreateInfo* pCreateInfo,
+													 SPIRVToMSLConverterContext& shaderContext,
+													 std::vector<SPIRVShaderOutput>& vtxOutputs) {
 	shaderContext.options.entryPointStage = spv::ExecutionModelTessellationControl;
 	shaderContext.options.entryPointName = _pTessCtlSS->pName;
-	shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.indirectParamsBufferIndex = _indirectParamsIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.outputBufferIndex = _outputBufferIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.patchOutputBufferIndex = _tessCtlPatchOutputBufferIndex;
-	shaderContext.options.tessLevelBufferIndex = _tessCtlLevelBufferIndex;
-	shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.shouldCaptureOutput = true;
+	shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageTessCtl];
+	shaderContext.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageTessCtl];
+	shaderContext.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageTessCtl];
+	shaderContext.options.mslOptions.shader_patch_output_buffer_index = _tessCtlPatchOutputBufferIndex;
+	shaderContext.options.mslOptions.shader_tess_factor_buffer_index = _tessCtlLevelBufferIndex;
+	shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageTessCtl];
+	shaderContext.options.mslOptions.capture_output_to_buffer = true;
 	addPrevStageOutputToShaderConverterContext(shaderContext, vtxOutputs);
 	id<MTLFunction> mtlFunction = ((MVKShaderModule*)_pTessCtlSS->module)->getMTLFunction(&shaderContext, _pTessCtlSS->pSpecializationInfo, _pipelineCache).mtlFunction;
 	if ( !mtlFunction ) {
@@ -818,13 +837,16 @@
 	return true;
 }
 
-bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext, std::vector<SPIRVShaderOutput>& tcOutputs) {
+bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc,
+													  const VkGraphicsPipelineCreateInfo* pCreateInfo,
+													  SPIRVToMSLConverterContext& shaderContext,
+													  std::vector<SPIRVShaderOutput>& tcOutputs) {
 	shaderContext.options.entryPointStage = spv::ExecutionModelTessellationEvaluation;
 	shaderContext.options.entryPointName = _pTessEvalSS->pName;
-	shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageTessEval];
-	shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageTessEval];
-	shaderContext.options.shouldCaptureOutput = false;
-	shaderContext.options.isRasterizationDisabled = (pCreateInfo->pRasterizationState && (pCreateInfo->pRasterizationState->rasterizerDiscardEnable));
+	shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageTessEval];
+	shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageTessEval];
+	shaderContext.options.mslOptions.capture_output_to_buffer = false;
+	shaderContext.options.mslOptions.disable_rasterization = (pCreateInfo->pRasterizationState && (pCreateInfo->pRasterizationState->rasterizerDiscardEnable));
 	addPrevStageOutputToShaderConverterContext(shaderContext, tcOutputs);
 	id<MTLFunction> mtlFunction = ((MVKShaderModule*)_pTessEvalSS->module)->getMTLFunction(&shaderContext, _pTessEvalSS->pSpecializationInfo, _pipelineCache).mtlFunction;
 	if ( !mtlFunction ) {
@@ -833,7 +855,7 @@
 	}
 	// Yeah, you read that right. Tess. eval functions are a kind of vertex function in Metal.
 	plDesc.vertexFunction = mtlFunction;
-	plDesc.rasterizationEnabled = !shaderContext.options.isRasterizationDisabled;
+	plDesc.rasterizationEnabled = !shaderContext.options.mslOptions.disable_rasterization;
 	_needsTessEvalSwizzleBuffer = shaderContext.options.needsSwizzleBuffer;
 	_needsTessEvalBufferSizeBuffer = shaderContext.options.needsBufferSizeBuffer;
 	if (!verifyImplicitBuffer(_needsTessEvalSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessEval, "swizzle", kMVKTessEvalNumReservedBuffers)) {
@@ -845,13 +867,15 @@
 	return true;
 }
 
-bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConverterContext& shaderContext) {
+bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc,
+													  const VkGraphicsPipelineCreateInfo* pCreateInfo,
+													  SPIRVToMSLConverterContext& shaderContext) {
 	if (_pFragmentSS) {
 		shaderContext.options.entryPointStage = spv::ExecutionModelFragment;
-		shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageFragment];
-		shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageFragment];
+		shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageFragment];
+		shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageFragment];
 		shaderContext.options.entryPointName = _pFragmentSS->pName;
-		shaderContext.options.shouldCaptureOutput = false;
+		shaderContext.options.mslOptions.capture_output_to_buffer = false;
 		id<MTLFunction> mtlFunction = ((MVKShaderModule*)_pFragmentSS->module)->getMTLFunction(&shaderContext, _pFragmentSS->pSpecializationInfo, _pipelineCache).mtlFunction;
 		if ( !mtlFunction ) {
 			setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Fragment shader function could not be compiled into pipeline. See previous logged error."));
@@ -870,7 +894,9 @@
 	return true;
 }
 
-bool MVKGraphicsPipeline::addVertexInputToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkPipelineVertexInputStateCreateInfo* pVI, const SPIRVToMSLConverterContext& shaderContext) {
+bool MVKGraphicsPipeline::addVertexInputToPipeline(MTLRenderPipelineDescriptor* plDesc,
+												   const VkPipelineVertexInputStateCreateInfo* pVI,
+												   const SPIRVToMSLConverterContext& shaderContext) {
     // Collect extension structures
     VkPipelineVertexInputDivisorStateCreateInfoEXT* pVertexInputDivisorState = nullptr;
     auto* next = (MVKVkAPIStructHeader*)pVI->pNext;
@@ -955,7 +981,9 @@
 	return true;
 }
 
-void MVKGraphicsPipeline::addTessellationToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkPipelineTessellationStateCreateInfo* pTS) {
+void MVKGraphicsPipeline::addTessellationToPipeline(MTLRenderPipelineDescriptor* plDesc,
+													const SPIRVTessReflectionData& reflectData,
+													const VkPipelineTessellationStateCreateInfo* pTS) {
 
 	VkPipelineTessellationDomainOriginStateCreateInfo* pTessDomainOriginState = nullptr;
 	if (reflectData.patchKind == spv::ExecutionModeTriangles) {
@@ -988,7 +1016,10 @@
 	plDesc.tessellationPartitionMode = mvkMTLTessellationPartitionModeFromSpvExecutionMode(reflectData.partitionMode);
 }
 
-void MVKGraphicsPipeline::addFragmentOutputToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkGraphicsPipelineCreateInfo* pCreateInfo, bool isTessellationVertexPipeline) {
+void MVKGraphicsPipeline::addFragmentOutputToPipeline(MTLRenderPipelineDescriptor* plDesc,
+													  const SPIRVTessReflectionData& reflectData,
+													  const VkGraphicsPipelineCreateInfo* pCreateInfo,
+													  bool isTessellationVertexPipeline) {
 
     // Retrieve the render subpass for which this pipeline is being constructed
     MVKRenderPass* mvkRendPass = (MVKRenderPass*)pCreateInfo->renderPass;
@@ -1069,8 +1100,8 @@
         }
     }
 
-    shaderContext.options.mslVersion = _device->_pMetalFeatures->mslVersion;
-    shaderContext.options.texelBufferTextureWidth = _device->_pMetalFeatures->maxTextureDimension;
+    shaderContext.options.mslOptions.msl_version = _device->_pMetalFeatures->mslVersion;
+    shaderContext.options.mslOptions.texel_buffer_texture_width = _device->_pMetalFeatures->maxTextureDimension;
 
     MVKPipelineLayout* layout = (MVKPipelineLayout*)pCreateInfo->layout;
     layout->populateShaderConverterContext(shaderContext);
@@ -1081,10 +1112,10 @@
     _tessCtlPatchOutputBufferIndex = layout->getTessCtlPatchOutputBufferIndex();
     _tessCtlLevelBufferIndex = layout->getTessCtlLevelBufferIndex();
 
-    shaderContext.options.isRenderingPoints = isRenderingPoints(pCreateInfo, reflectData);
+    shaderContext.options.mslOptions.enable_point_size_builtin = isRenderingPoints(pCreateInfo, reflectData);
     shaderContext.options.shouldFlipVertexY = _device->_pMVKConfig->shaderConversionFlipVertexY;
-    shaderContext.options.shouldSwizzleTextureSamples = _fullImageViewSwizzle;
-    shaderContext.options.tessDomainOriginInLowerLeft = pTessDomainOriginState && pTessDomainOriginState->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT;
+    shaderContext.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle;
+    shaderContext.options.mslOptions.tess_domain_origin_lower_left = pTessDomainOriginState && pTessDomainOriginState->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT;
 
     shaderContext.options.tessPatchKind = reflectData.patchKind;
     shaderContext.options.numTessControlPoints = reflectData.numControlPoints;
@@ -1101,9 +1132,9 @@
 
         // Set binding and offset from Vulkan vertex attribute
         MSLVertexAttribute va;
-        va.location = pVKVA->location;
-        va.mslBuffer = _device->getMetalBufferIndexForVertexAttributeBinding(pVKVA->binding);
-        va.mslOffset = pVKVA->offset;
+        va.vertexAttribute.location = pVKVA->location;
+        va.vertexAttribute.msl_buffer = _device->getMetalBufferIndexForVertexAttributeBinding(pVKVA->binding);
+        va.vertexAttribute.msl_offset = pVKVA->offset;
 
         // Metal can't do signedness conversions on vertex buffers (rdar://45922847). If the shader
         // and the vertex attribute have mismatched signedness, we have to fix the shader
@@ -1112,11 +1143,11 @@
         // declared type. Programs that try to invoke undefined behavior are on their own.
         switch (mvkFormatTypeFromVkFormat(pVKVA->format) ) {
         case kMVKFormatColorUInt8:
-            va.format = MSLVertexFormat::UInt8;
+            va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT8;
             break;
 
         case kMVKFormatColorUInt16:
-            va.format = MSLVertexFormat::UInt16;
+            va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT16;
             break;
 
         case kMVKFormatDepthStencil:
@@ -1126,7 +1157,7 @@
             case VK_FORMAT_D16_UNORM_S8_UINT:
             case VK_FORMAT_D24_UNORM_S8_UINT:
             case VK_FORMAT_D32_SFLOAT_S8_UINT:
-                va.format = MSLVertexFormat::UInt8;
+                va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT8;
                 break;
 
             default:
@@ -1144,8 +1175,8 @@
         for (uint32_t vbIdx = 0; vbIdx < vbCnt; vbIdx++) {
             const VkVertexInputBindingDescription* pVKVB = &pCreateInfo->pVertexInputState->pVertexBindingDescriptions[vbIdx];
             if (pVKVB->binding == pVKVA->binding) {
-                va.mslStride = pVKVB->stride;
-                va.isPerInstance = (pVKVB->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE);
+                va.vertexAttribute.msl_stride = pVKVB->stride;
+                va.vertexAttribute.per_instance = (pVKVB->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE);
                 break;
             }
         }
@@ -1162,16 +1193,16 @@
     uint32_t vaCnt = (uint32_t)shaderOutputs.size();
     for (uint32_t vaIdx = 0; vaIdx < vaCnt; vaIdx++) {
         MSLVertexAttribute va;
-        va.location = shaderOutputs[vaIdx].location;
-        va.builtin = shaderOutputs[vaIdx].builtin;
+        va.vertexAttribute.location = shaderOutputs[vaIdx].location;
+        va.vertexAttribute.builtin = shaderOutputs[vaIdx].builtin;
 
         switch (mvkFormatTypeFromVkFormat(mvkFormatFromOutput(shaderOutputs[vaIdx]) ) ) {
             case kMVKFormatColorUInt8:
-                va.format = MSLVertexFormat::UInt8;
+                va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT8;
                 break;
 
             case kMVKFormatColorUInt16:
-                va.format = MSLVertexFormat::UInt16;
+                va.vertexAttribute.format = MSL_VERTEX_FORMAT_UINT16;
                 break;
 
             default:
@@ -1258,16 +1289,16 @@
     SPIRVToMSLConverterContext shaderContext;
 	shaderContext.options.entryPointName = pCreateInfo->stage.pName;
 	shaderContext.options.entryPointStage = spv::ExecutionModelGLCompute;
-    shaderContext.options.mslVersion = _device->_pMetalFeatures->mslVersion;
-    shaderContext.options.texelBufferTextureWidth = _device->_pMetalFeatures->maxTextureDimension;
-	shaderContext.options.shouldSwizzleTextureSamples = _fullImageViewSwizzle;
+    shaderContext.options.mslOptions.msl_version = _device->_pMetalFeatures->mslVersion;
+    shaderContext.options.mslOptions.texel_buffer_texture_width = _device->_pMetalFeatures->maxTextureDimension;
+	shaderContext.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle;
 
     MVKPipelineLayout* layout = (MVKPipelineLayout*)pCreateInfo->layout;
     layout->populateShaderConverterContext(shaderContext);
     _swizzleBufferIndex = layout->getSwizzleBufferIndex();
     _bufferSizeBufferIndex = layout->getBufferSizeBufferIndex();
-    shaderContext.options.swizzleBufferIndex = _swizzleBufferIndex.stages[kMVKShaderStageCompute];
-    shaderContext.options.bufferSizeBufferIndex = _bufferSizeBufferIndex.stages[kMVKShaderStageCompute];
+    shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageCompute];
+    shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageCompute];
 
     MVKShaderModule* mvkShdrMod = (MVKShaderModule*)pSS->module;
     MVKMTLFunction func = mvkShdrMod->getMTLFunction(&shaderContext, pSS->pSpecializationInfo, _pipelineCache);
@@ -1316,85 +1347,6 @@
 	MVKPipelineCacheEntryTypeShaderLibrary = 1,
 } MVKPipelineCacheEntryType;
 
-// Ceral archive definitions
-namespace mvk {
-
-	template<class Archive>
-	void serialize(Archive & archive, SPIRVWorkgroupSizeDimension& wsd) {
-		archive(wsd.size,
-				wsd.specializationID,
-				wsd.isSpecialized);
-	}
-
-	template<class Archive>
-	void serialize(Archive & archive, SPIRVEntryPoint& ep) {
-		archive(ep.mtlFunctionName,
-				ep.workgroupSize.width,
-				ep.workgroupSize.height,
-				ep.workgroupSize.depth);
-	}
-
-	template<class Archive>
-	void serialize(Archive & archive, SPIRVToMSLConverterOptions& opt) {
-		archive(opt.entryPointName,
-				opt.entryPointStage,
-				opt.tessPatchKind,
-				opt.mslVersion,
-				opt.texelBufferTextureWidth,
-				opt.swizzleBufferIndex,
-				opt.indirectParamsBufferIndex,
-				opt.outputBufferIndex,
-				opt.patchOutputBufferIndex,
-				opt.tessLevelBufferIndex,
-				opt.bufferSizeBufferIndex,
-				opt.inputThreadgroupMemIndex,
-				opt.numTessControlPoints,
-				opt.shouldFlipVertexY,
-				opt.isRenderingPoints,
-				opt.shouldSwizzleTextureSamples,
-				opt.shouldCaptureOutput,
-				opt.tessDomainOriginInLowerLeft,
-				opt.isRasterizationDisabled,
-				opt.needsSwizzleBuffer,
-				opt.needsOutputBuffer,
-				opt.needsPatchOutputBuffer,
-				opt.needsBufferSizeBuffer,
-				opt.needsInputThreadgroupMem);
-	}
-
-	template<class Archive>
-	void serialize(Archive & archive, MSLVertexAttribute& va) {
-		archive(va.location,
-				va.mslBuffer,
-				va.mslOffset,
-				va.mslStride,
-				va.isPerInstance,
-				va.isUsedByShader);
-	}
-
-	template<class Archive>
-	void serialize(Archive & archive, MSLResourceBinding& rb) {
-		archive(rb.stage,
-				rb.descriptorSet,
-				rb.binding,
-				rb.mslBuffer,
-				rb.mslTexture,
-				rb.mslSampler,
-				rb.isUsedByShader);
-	}
-
-	template<class Archive>
-	void serialize(Archive & archive, SPIRVToMSLConverterContext& ctx) {
-		archive(ctx.options, ctx.vertexAttributes, ctx.resourceBindings);
-	}
-
-}
-
-template<class Archive>
-void serialize(Archive & archive, MVKShaderModuleKey& k) {
-	archive(k.codeSize, k.codeHash);
-}
-
 // Helper class to iterate through the shader libraries in a shader library cache in order to serialize them.
 // Needs to support input of null shader library cache.
 class MVKShaderCacheIterator : public MVKBaseObject {
@@ -1587,6 +1539,133 @@
 }
 
 
+#pragma mark Cereal archive definitions
+
+namespace SPIRV_CROSS_NAMESPACE {
+
+	template<class Archive>
+	void serialize(Archive & archive, CompilerMSL::Options& opt) {
+		archive(opt.platform,
+				opt.msl_version,
+				opt.texel_buffer_texture_width,
+				opt.swizzle_buffer_index,
+				opt.indirect_params_buffer_index,
+				opt.shader_output_buffer_index,
+				opt.shader_patch_output_buffer_index,
+				opt.shader_tess_factor_buffer_index,
+				opt.buffer_size_buffer_index,
+				opt.shader_input_wg_index,
+				opt.enable_point_size_builtin,
+				opt.disable_rasterization,
+				opt.capture_output_to_buffer,
+				opt.swizzle_texture_samples,
+				opt.tess_domain_origin_lower_left,
+				opt.argument_buffers,
+				opt.pad_fragment_output_components,
+				opt.texture_buffer_native);
+	}
+
+	template<class Archive>
+	void serialize(Archive & archive, MSLVertexAttr& va) {
+		archive(va.location,
+				va.msl_buffer,
+				va.msl_offset,
+				va.msl_stride,
+				va.per_instance,
+				va.format,
+				va.builtin);
+	}
+
+	template<class Archive>
+	void serialize(Archive & archive, MSLResourceBinding& rb) {
+		archive(rb.stage,
+				rb.desc_set,
+				rb.binding,
+				rb.msl_buffer,
+				rb.msl_texture,
+				rb.msl_sampler);
+	}
+
+	template<class Archive>
+	void serialize(Archive & archive, MSLConstexprSampler& cs) {
+		archive(cs.coord,
+				cs.min_filter,
+				cs.mag_filter,
+				cs.mip_filter,
+				cs.s_address,
+				cs.t_address,
+				cs.r_address,
+				cs.compare_func,
+				cs.border_color,
+				cs.lod_clamp_min,
+				cs.lod_clamp_max,
+				cs.max_anisotropy,
+				cs.compare_enable,
+				cs.lod_clamp_enable,
+				cs.anisotropy_enable);
+	}
+
+}
+
+namespace mvk {
+
+	template<class Archive>
+	void serialize(Archive & archive, SPIRVWorkgroupSizeDimension& wsd) {
+		archive(wsd.size,
+				wsd.specializationID,
+				wsd.isSpecialized);
+	}
+
+	template<class Archive>
+	void serialize(Archive & archive, SPIRVEntryPoint& ep) {
+		archive(ep.mtlFunctionName,
+				ep.workgroupSize.width,
+				ep.workgroupSize.height,
+				ep.workgroupSize.depth);
+	}
+
+	template<class Archive>
+	void serialize(Archive & archive, SPIRVToMSLConverterOptions& opt) {
+		archive(opt.mslOptions,
+				opt.entryPointName,
+				opt.entryPointStage,
+				opt.tessPatchKind,
+				opt.numTessControlPoints,
+				opt.shouldFlipVertexY,
+				opt.needsSwizzleBuffer,
+				opt.needsOutputBuffer,
+				opt.needsPatchOutputBuffer,
+				opt.needsBufferSizeBuffer,
+				opt.needsInputThreadgroupMem);
+	}
+
+	template<class Archive>
+	void serialize(Archive & archive, MSLVertexAttribute& va) {
+		archive(va.vertexAttribute,
+				va.isUsedByShader);
+	}
+
+	template<class Archive>
+	void serialize(Archive & archive, MSLResourceBinding& rb) {
+		archive(rb.resourceBinding,
+				rb.constExprSampler,
+				rb.requiresConstExprSampler,
+				rb.isUsedByShader);
+	}
+
+	template<class Archive>
+	void serialize(Archive & archive, SPIRVToMSLConverterContext& ctx) {
+		archive(ctx.options, ctx.vertexAttributes, ctx.resourceBindings);
+	}
+
+}
+
+template<class Archive>
+void serialize(Archive & archive, MVKShaderModuleKey& k) {
+	archive(k.codeSize, k.codeHash);
+}
+
+
 #pragma mark Construction
 
 MVKPipelineCache::MVKPipelineCache(MVKDevice* device, const VkPipelineCacheCreateInfo* pCreateInfo) : MVKVulkanAPIDeviceObject(device) {
diff --git a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp
index 4258216..4cfc542 100644
--- a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp
+++ b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.cpp
@@ -21,11 +21,11 @@
 #include "MVKStrings.h"
 #include "FileSupport.h"
 #include "SPIRVSupport.h"
-#include <SPIRV-Cross/spirv_msl.hpp>
 #include <fstream>
 
 using namespace mvk;
 using namespace std;
+using namespace SPIRV_CROSS_NAMESPACE;
 
 
 #pragma mark -
@@ -40,24 +40,36 @@
 
 MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverterOptions::matches(const SPIRVToMSLConverterOptions& other) const {
 	if (entryPointStage != other.entryPointStage) { return false; }
-    if (mslVersion != other.mslVersion) { return false; }
-	if (texelBufferTextureWidth != other.texelBufferTextureWidth) { return false; }
-	if (swizzleBufferIndex != other.swizzleBufferIndex) { return false; }
-	if (indirectParamsBufferIndex != other.indirectParamsBufferIndex) { return false; }
-	if (outputBufferIndex != other.outputBufferIndex) { return false; }
-	if (patchOutputBufferIndex != other.patchOutputBufferIndex) { return false; }
-	if (tessLevelBufferIndex != other.tessLevelBufferIndex) { return false; }
-	if (bufferSizeBufferIndex != other.bufferSizeBufferIndex) { return false; }
-	if (inputThreadgroupMemIndex != other.inputThreadgroupMemIndex) { return false; }
-    if (!!shouldFlipVertexY != !!other.shouldFlipVertexY) { return false; }
-    if (!!isRenderingPoints != !!other.isRenderingPoints) { return false; }
-	if (!!shouldSwizzleTextureSamples != !!other.shouldSwizzleTextureSamples) { return false; }
-	if (!!shouldCaptureOutput != !!other.shouldCaptureOutput) { return false; }
-	if (!!tessDomainOriginInLowerLeft != !!other.tessDomainOriginInLowerLeft) { return false; }
+	if (entryPointName != other.entryPointName) { return false; }
 	if (tessPatchKind != other.tessPatchKind) { return false; }
 	if (numTessControlPoints != other.numTessControlPoints) { return false; }
-	if (entryPointName != other.entryPointName) { return false; }
-    return true;
+	if (!!shouldFlipVertexY != !!other.shouldFlipVertexY) { return false; }
+	if (!!needsSwizzleBuffer != !!other.needsSwizzleBuffer) { return false; }
+	if (!!needsOutputBuffer != !!other.needsOutputBuffer) { return false; }
+	if (!!needsPatchOutputBuffer != !!other.needsPatchOutputBuffer) { return false; }
+	if (!!needsBufferSizeBuffer != !!other.needsBufferSizeBuffer) { return false; }
+	if (!!needsInputThreadgroupMem != !!other.needsInputThreadgroupMem) { return false; }
+
+	if (mslOptions.platform != other.mslOptions.platform) { return false; }
+	if (mslOptions.msl_version != other.mslOptions.msl_version) { return false; }
+	if (mslOptions.texel_buffer_texture_width != other.mslOptions.texel_buffer_texture_width) { return false; }
+	if (mslOptions.swizzle_buffer_index != other.mslOptions.swizzle_buffer_index) { return false; }
+	if (mslOptions.indirect_params_buffer_index != other.mslOptions.indirect_params_buffer_index) { return false; }
+	if (mslOptions.shader_output_buffer_index != other.mslOptions.shader_output_buffer_index) { return false; }
+	if (mslOptions.shader_patch_output_buffer_index != other.mslOptions.shader_patch_output_buffer_index) { return false; }
+	if (mslOptions.shader_tess_factor_buffer_index != other.mslOptions.shader_tess_factor_buffer_index) { return false; }
+	if (mslOptions.buffer_size_buffer_index != other.mslOptions.buffer_size_buffer_index) { return false; }
+	if (mslOptions.shader_input_wg_index != other.mslOptions.shader_input_wg_index) { return false; }
+	if (!!mslOptions.enable_point_size_builtin != !!other.mslOptions.enable_point_size_builtin) { return false; }
+	if (!!mslOptions.disable_rasterization != !!other.mslOptions.disable_rasterization) { return false; }
+	if (!!mslOptions.capture_output_to_buffer != !!other.mslOptions.capture_output_to_buffer) { return false; }
+	if (!!mslOptions.swizzle_texture_samples != !!other.mslOptions.swizzle_texture_samples) { return false; }
+	if (!!mslOptions.tess_domain_origin_lower_left != !!other.mslOptions.tess_domain_origin_lower_left) { return false; }
+	if (mslOptions.argument_buffers != other.mslOptions.argument_buffers) { return false; }
+	if (mslOptions.pad_fragment_output_components != other.mslOptions.pad_fragment_output_components) { return false; }
+	if (mslOptions.texture_buffer_native != other.mslOptions.texture_buffer_native) { return false; }
+
+	return true;
 }
 
 MVK_PUBLIC_SYMBOL std::string SPIRVToMSLConverterOptions::printMSLVersion(uint32_t mslVersion, bool includePatch) {
@@ -66,12 +78,12 @@
 	uint32_t major = mslVersion / 10000;
 	verStr += to_string(major);
 
-	uint32_t minor = (mslVersion - makeMSLVersion(major)) / 100;
+	uint32_t minor = (mslVersion - CompilerMSL::Options::make_msl_version(major)) / 100;
 	verStr += ".";
 	verStr += to_string(minor);
 
 	if (includePatch) {
-		uint32_t patch = mslVersion - makeMSLVersion(major, minor);
+		uint32_t patch = mslVersion - CompilerMSL::Options::make_msl_version(major, minor);
 		verStr += ".";
 		verStr += to_string(patch);
 	}
@@ -79,34 +91,56 @@
 	return verStr;
 }
 
-MVK_PUBLIC_SYMBOL mvk::SPIRVToMSLConverterOptions::Platform SPIRVToMSLConverterOptions::getNativePlatform() {
+MVK_PUBLIC_SYMBOL SPIRVToMSLConverterOptions::SPIRVToMSLConverterOptions() {
 #if MVK_MACOS
-	return SPIRVToMSLConverterOptions::macOS;
+	mslOptions.platform = CompilerMSL::Options::macOS;
 #endif
 #if MVK_IOS
-	return SPIRVToMSLConverterOptions::iOS;
+	mslOptions.platform = CompilerMSL::Options::iOS;
 #endif
 }
 
 MVK_PUBLIC_SYMBOL bool MSLVertexAttribute::matches(const MSLVertexAttribute& other) const {
-    if (location != other.location) { return false; }
-    if (mslBuffer != other.mslBuffer) { return false; }
-    if (mslOffset != other.mslOffset) { return false; }
-    if (mslStride != other.mslStride) { return false; }
-    if (format != other.format) { return false; }
-	if (builtin != other.builtin) { return false; }
-    if (!!isPerInstance != !!other.isPerInstance) { return false; }
-    return true;
+	if (vertexAttribute.location != other.vertexAttribute.location) { return false; }
+	if (vertexAttribute.msl_buffer != other.vertexAttribute.msl_buffer) { return false; }
+	if (vertexAttribute.msl_offset != other.vertexAttribute.msl_offset) { return false; }
+	if (vertexAttribute.msl_stride != other.vertexAttribute.msl_stride) { return false; }
+	if (vertexAttribute.format != other.vertexAttribute.format) { return false; }
+	if (vertexAttribute.builtin != other.vertexAttribute.builtin) { return false; }
+	if (!!vertexAttribute.per_instance != !!other.vertexAttribute.per_instance) { return false; }
+	return true;
 }
 
-MVK_PUBLIC_SYMBOL bool MSLResourceBinding::matches(const MSLResourceBinding& other) const {
-    if (stage != other.stage) { return false; }
-    if (descriptorSet != other.descriptorSet) { return false; }
-    if (binding != other.binding) { return false; }
-    if (mslBuffer != other.mslBuffer) { return false; }
-    if (mslTexture != other.mslTexture) { return false; }
-    if (mslSampler != other.mslSampler) { return false; }
-    return true;
+MVK_PUBLIC_SYMBOL bool mvk::MSLResourceBinding::matches(const MSLResourceBinding& other) const {
+	if (resourceBinding.stage != other.resourceBinding.stage) { return false; }
+	if (resourceBinding.desc_set != other.resourceBinding.desc_set) { return false; }
+	if (resourceBinding.binding != other.resourceBinding.binding) { return false; }
+	if (resourceBinding.msl_buffer != other.resourceBinding.msl_buffer) { return false; }
+	if (resourceBinding.msl_texture != other.resourceBinding.msl_texture) { return false; }
+	if (resourceBinding.msl_sampler != other.resourceBinding.msl_sampler) { return false; }
+
+	if (requiresConstExprSampler != other.requiresConstExprSampler) { return false; }
+
+	// If requiresConstExprSampler is false, constExprSampler can be ignored
+	if (requiresConstExprSampler) {
+		if (constExprSampler.coord != other.constExprSampler.coord) { return false; }
+		if (constExprSampler.min_filter != other.constExprSampler.min_filter) { return false; }
+		if (constExprSampler.mag_filter != other.constExprSampler.mag_filter) { return false; }
+		if (constExprSampler.mip_filter != other.constExprSampler.mip_filter) { return false; }
+		if (constExprSampler.s_address != other.constExprSampler.s_address) { return false; }
+		if (constExprSampler.t_address != other.constExprSampler.t_address) { return false; }
+		if (constExprSampler.r_address != other.constExprSampler.r_address) { return false; }
+		if (constExprSampler.compare_func != other.constExprSampler.compare_func) { return false; }
+		if (constExprSampler.border_color != other.constExprSampler.border_color) { return false; }
+		if (constExprSampler.lod_clamp_min != other.constExprSampler.lod_clamp_min) { return false; }
+		if (constExprSampler.lod_clamp_max != other.constExprSampler.lod_clamp_max) { return false; }
+		if (constExprSampler.max_anisotropy != other.constExprSampler.max_anisotropy) { return false; }
+		if (constExprSampler.compare_enable != other.constExprSampler.compare_enable) { return false; }
+		if (constExprSampler.lod_clamp_enable != other.constExprSampler.lod_clamp_enable) { return false; }
+		if (constExprSampler.anisotropy_enable != other.constExprSampler.anisotropy_enable) { return false; }
+	}
+
+	return true;
 }
 
 MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverterContext::stageSupportsVertexAttributes() const {
@@ -118,7 +152,7 @@
 // Check them all in case inactive VA's duplicate locations used by active VA's.
 MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverterContext::isVertexAttributeLocationUsed(uint32_t location) const {
     for (auto& va : vertexAttributes) {
-        if ((va.location == location) && va.isUsedByShader) { return true; }
+        if ((va.vertexAttribute.location == location) && va.isUsedByShader) { return true; }
     }
     return false;
 }
@@ -126,7 +160,7 @@
 // Check them all in case inactive VA's duplicate buffers used by active VA's.
 MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverterContext::isVertexBufferUsed(uint32_t mslBuffer) const {
     for (auto& va : vertexAttributes) {
-        if ((va.mslBuffer == mslBuffer) && va.isUsedByShader) { return true; }
+        if ((va.vertexAttribute.msl_buffer == mslBuffer) && va.isUsedByShader) { return true; }
     }
     return false;
 }
@@ -159,7 +193,7 @@
 
 MVK_PUBLIC_SYMBOL void SPIRVToMSLConverterContext::alignWith(const SPIRVToMSLConverterContext& srcContext) {
 
-	options.isRasterizationDisabled = srcContext.options.isRasterizationDisabled;
+	options.mslOptions.disable_rasterization = srcContext.options.mslOptions.disable_rasterization;
 	options.needsSwizzleBuffer = srcContext.options.needsSwizzleBuffer;
 	options.needsOutputBuffer = srcContext.options.needsOutputBuffer;
 	options.needsPatchOutputBuffer = srcContext.options.needsPatchOutputBuffer;
@@ -187,9 +221,6 @@
 #pragma mark -
 #pragma mark SPIRVToMSLConverter
 
-// Return the SPIRV-Cross platform enum corresponding to a SPIRVToMSLConverterOptions platform enum value.
-SPIRV_CROSS_NAMESPACE::CompilerMSL::Options::Platform getCompilerMSLPlatform(SPIRVToMSLConverterOptions::Platform platform);
-
 // Populates the entry point with info extracted from the SPRI-V compiler.
 void populateEntryPoint(SPIRVEntryPoint& entryPoint, SPIRV_CROSS_NAMESPACE::Compiler* pCompiler, SPIRVToMSLConverterOptions& options);
 
@@ -241,24 +272,8 @@
 
 		// Establish the MSL options for the compiler
 		// This needs to be done in two steps...for CompilerMSL and its superclass.
-		auto mslOpts = pMSLCompiler->get_msl_options();
-		mslOpts.platform = getCompilerMSLPlatform(context.options.platform);
-		mslOpts.msl_version = context.options.mslVersion;
-		mslOpts.texel_buffer_texture_width = context.options.texelBufferTextureWidth;
-		mslOpts.swizzle_buffer_index = context.options.swizzleBufferIndex;
-		mslOpts.indirect_params_buffer_index = context.options.indirectParamsBufferIndex;
-		mslOpts.shader_output_buffer_index = context.options.outputBufferIndex;
-		mslOpts.shader_patch_output_buffer_index = context.options.patchOutputBufferIndex;
-		mslOpts.shader_tess_factor_buffer_index = context.options.tessLevelBufferIndex;
-		mslOpts.buffer_size_buffer_index = context.options.bufferSizeBufferIndex;
-		mslOpts.shader_input_wg_index = context.options.inputThreadgroupMemIndex;
-		mslOpts.enable_point_size_builtin = context.options.isRenderingPoints;
-		mslOpts.disable_rasterization = context.options.isRasterizationDisabled;
-		mslOpts.swizzle_texture_samples = context.options.shouldSwizzleTextureSamples;
-		mslOpts.capture_output_to_buffer = context.options.shouldCaptureOutput;
-		mslOpts.tess_domain_origin_lower_left = context.options.tessDomainOriginInLowerLeft;
-		mslOpts.pad_fragment_output_components = true;
-		pMSLCompiler->set_msl_options(mslOpts);
+		context.options.mslOptions.pad_fragment_output_components = true;
+		pMSLCompiler->set_msl_options(context.options.mslOptions);
 
 		auto scOpts = pMSLCompiler->get_common_options();
 		scOpts.vertex.flip_vert_y = context.options.shouldFlipVertexY;
@@ -266,39 +281,19 @@
 
 		// Add vertex attributes
 		if (context.stageSupportsVertexAttributes()) {
-			SPIRV_CROSS_NAMESPACE::MSLVertexAttr va;
-			for (auto& ctxVA : context.vertexAttributes) {
-				va.location = ctxVA.location;
-				va.builtin = ctxVA.builtin;
-				va.msl_buffer = ctxVA.mslBuffer;
-				va.msl_offset = ctxVA.mslOffset;
-				va.msl_stride = ctxVA.mslStride;
-				va.per_instance = ctxVA.isPerInstance;
-				switch (ctxVA.format) {
-					case MSLVertexFormat::Other:
-						va.format = SPIRV_CROSS_NAMESPACE::MSL_VERTEX_FORMAT_OTHER;
-						break;
-					case MSLVertexFormat::UInt8:
-						va.format = SPIRV_CROSS_NAMESPACE::MSL_VERTEX_FORMAT_UINT8;
-						break;
-					case MSLVertexFormat::UInt16:
-						va.format = SPIRV_CROSS_NAMESPACE::MSL_VERTEX_FORMAT_UINT16;
-						break;
-				}
-				pMSLCompiler->add_msl_vertex_attribute(va);
+			for (auto& va : context.vertexAttributes) {
+				pMSLCompiler->add_msl_vertex_attribute(va.vertexAttribute);
 			}
 		}
 
-		// Add resource bindings
-		SPIRV_CROSS_NAMESPACE::MSLResourceBinding rb;
-		for (auto& ctxRB : context.resourceBindings) {
-			rb.desc_set = ctxRB.descriptorSet;
-			rb.binding = ctxRB.binding;
-			rb.stage = ctxRB.stage;
-			rb.msl_buffer = ctxRB.mslBuffer;
-			rb.msl_texture = ctxRB.mslTexture;
-			rb.msl_sampler = ctxRB.mslSampler;
-			pMSLCompiler->add_msl_resource_binding(rb);
+		// Add resource bindings and hardcoded constexpr samplers
+		for (auto& rb : context.resourceBindings) {
+			auto& rbb = rb.resourceBinding;
+			pMSLCompiler->add_msl_resource_binding(rbb);
+
+			if (rb.requiresConstExprSampler) {
+				pMSLCompiler->remap_constexpr_sampler_by_binding(rbb.desc_set, rbb.binding, rb.constExprSampler);
+			}
 		}
 
 		_msl = pMSLCompiler->compile();
@@ -320,7 +315,7 @@
 	// Populate the shader context with info from the compilation run, including
 	// which vertex attributes and resource bindings are used by the shader
 	populateEntryPoint(_entryPoint, pMSLCompiler, context.options);
-	context.options.isRasterizationDisabled = pMSLCompiler && pMSLCompiler->get_is_rasterization_disabled();
+	context.options.mslOptions.disable_rasterization = pMSLCompiler && pMSLCompiler->get_is_rasterization_disabled();
 	context.options.needsSwizzleBuffer = pMSLCompiler && pMSLCompiler->needs_swizzle_buffer();
 	context.options.needsOutputBuffer = pMSLCompiler && pMSLCompiler->needs_output_buffer();
 	context.options.needsPatchOutputBuffer = pMSLCompiler && pMSLCompiler->needs_patch_output_buffer();
@@ -329,11 +324,13 @@
 
 	if (context.stageSupportsVertexAttributes()) {
 		for (auto& ctxVA : context.vertexAttributes) {
-			ctxVA.isUsedByShader = pMSLCompiler->is_msl_vertex_attribute_used(ctxVA.location);
+			ctxVA.isUsedByShader = pMSLCompiler->is_msl_vertex_attribute_used(ctxVA.vertexAttribute.location);
 		}
 	}
 	for (auto& ctxRB : context.resourceBindings) {
-		ctxRB.isUsedByShader = pMSLCompiler->is_msl_resource_binding_used(ctxRB.stage, ctxRB.descriptorSet, ctxRB.binding);
+		ctxRB.isUsedByShader = pMSLCompiler->is_msl_resource_binding_used(ctxRB.resourceBinding.stage,
+																		  ctxRB.resourceBinding.desc_set,
+																		  ctxRB.resourceBinding.binding);
 	}
 
 	delete pMSLCompiler;
@@ -369,7 +366,7 @@
 	return _wasConverted;
 }
 
-/** Appends the message text to the result log. */
+// Appends the message text to the result log.
 void SPIRVToMSLConverter::logMsg(const char* logMsg) {
 	string trimMsg = trim(logMsg);
 	if ( !trimMsg.empty() ) {
@@ -378,14 +375,14 @@
 	}
 }
 
-/** Appends the error text to the result log, sets the wasConverted property to false, and returns it. */
+// Appends the error text to the result log, sets the wasConverted property to false, and returns it.
 bool SPIRVToMSLConverter::logError(const char* errMsg) {
 	logMsg(errMsg);
 	_wasConverted = false;
 	return _wasConverted;
 }
 
-/** Appends the SPIR-V to the result log, indicating whether it is being converted or was converted. */
+// Appends the SPIR-V to the result log, indicating whether it is being converted or was converted.
 void SPIRVToMSLConverter::logSPIRV(const char* opDesc) {
 
 	string spvLog;
@@ -403,10 +400,8 @@
 //	printf("\n%s\n", getResultLog().c_str());
 }
 
-/**
- * Writes the SPIR-V code to a file. This can be useful for debugging
- * when the SPRIR-V did not originally come from a known file
- */
+// Writes the SPIR-V code to a file. This can be useful for debugging
+// when the SPRIR-V did not originally come from a known file
 void SPIRVToMSLConverter::writeSPIRVToFile(string spvFilepath) {
 	vector<char> fileContents;
 	spirvToBytes(_spirv, fileContents);
@@ -418,7 +413,7 @@
 	}
 }
 
-/** Validates that the SPIR-V code will disassemble during logging. */
+// Validates that the SPIR-V code will disassemble during logging.
 bool SPIRVToMSLConverter::validateSPIRV() {
 	if (_spirv.size() < 5) { return false; }
 	if (_spirv[0] != spv::MagicNumber) { return false; }
@@ -426,7 +421,7 @@
 	return true;
 }
 
-/** Appends the source to the result log, prepending with the operation. */
+// Appends the source to the result log, prepending with the operation.
 void SPIRVToMSLConverter::logSource(string& src, const char* srcLang, const char* opDesc) {
     _resultLog += opDesc;
     _resultLog += " ";
@@ -441,14 +436,6 @@
 
 #pragma mark Support functions
 
-// Return the SPIRV-Cross platform enum corresponding to a SPIRVToMSLConverterOptions platform enum value.
-SPIRV_CROSS_NAMESPACE::CompilerMSL::Options::Platform getCompilerMSLPlatform(SPIRVToMSLConverterOptions::Platform platform) {
-	switch (platform) {
-		case SPIRVToMSLConverterOptions::macOS: return SPIRV_CROSS_NAMESPACE::CompilerMSL::Options::macOS;
-		case SPIRVToMSLConverterOptions::iOS: return SPIRV_CROSS_NAMESPACE::CompilerMSL::Options::iOS;
-	}
-}
-
 // Populate a workgroup size dimension.
 void populateWorkgroupDimension(SPIRVWorkgroupSizeDimension& wgDim, uint32_t size, SPIRV_CROSS_NAMESPACE::SpecializationConstant& spvSpecConst) {
 	wgDim.size = max(size, 1u);
diff --git a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h
index 0d86ffe..ecaa595 100644
--- a/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h
+++ b/MoltenVKShaderConverter/MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h
@@ -20,6 +20,7 @@
 #define __SPIRVToMSLConverter_h_ 1
 
 #include <SPIRV-Cross/spirv.hpp>
+#include <SPIRV-Cross/spirv_msl.hpp>
 #include <string>
 #include <vector>
 #include <unordered_map>
@@ -37,78 +38,33 @@
 	 * CHANGES TO THIS STRUCT SHOULD BE CAPTURED IN THE STREAMING LOGIC OF THE PIPELINE CACHE.
 	 */
 	typedef struct SPIRVToMSLConverterOptions {
-
-		enum Platform {
-			iOS = 0,
-			macOS = 1
-		};
-
+		SPIRV_CROSS_NAMESPACE::CompilerMSL::Options mslOptions;
 		std::string entryPointName;
 		spv::ExecutionModel entryPointStage = spv::ExecutionModelMax;
 		spv::ExecutionMode tessPatchKind = spv::ExecutionModeMax;
-
-        uint32_t mslVersion = makeMSLVersion(2, 1);
-		Platform platform = getNativePlatform();
-		uint32_t texelBufferTextureWidth = 4096;
-		uint32_t swizzleBufferIndex = 0;
-		uint32_t indirectParamsBufferIndex = 0;
-		uint32_t outputBufferIndex = 0;
-		uint32_t patchOutputBufferIndex = 0;
-		uint32_t tessLevelBufferIndex = 0;
-		uint32_t bufferSizeBufferIndex = 0;
-		uint32_t inputThreadgroupMemIndex = 0;
 		uint32_t numTessControlPoints = 0;
 		bool shouldFlipVertexY = true;
-		bool isRenderingPoints = false;
-		bool shouldSwizzleTextureSamples = false;
-		bool shouldCaptureOutput = false;
-		bool tessDomainOriginInLowerLeft = false;
-
-		bool isRasterizationDisabled = false;
 		bool needsSwizzleBuffer = false;
 		bool needsOutputBuffer = false;
 		bool needsPatchOutputBuffer = false;
 		bool needsBufferSizeBuffer = false;
 		bool needsInputThreadgroupMem = false;
 
-        /** 
-         * Returns whether the specified options match this one.
-         * It does if all corresponding elements are equal.
-         */
-        bool matches(const SPIRVToMSLConverterOptions& other) const;
+		/**
+		 * Returns whether the specified options match this one.
+		 * It does if all corresponding elements are equal.
+		 */
+		bool matches(const SPIRVToMSLConverterOptions& other) const;
 
 		bool hasEntryPoint() const {
 			return !entryPointName.empty() && entryPointStage != spv::ExecutionModelMax;
 		}
 
-        void setMSLVersion(uint32_t major, uint32_t minor = 0, uint32_t point = 0) {
-            mslVersion = makeMSLVersion(major, minor, point);
-        }
-
-        bool supportsMSLVersion(uint32_t major, uint32_t minor = 0, uint32_t point = 0) const {
-            return mslVersion >= makeMSLVersion(major, minor, point);
-        }
-
-        static uint32_t makeMSLVersion(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) {
-            return (major * 10000) + (minor * 100) + patch;
-        }
-
 		static std::string printMSLVersion(uint32_t mslVersion, bool includePatch = false);
 
-		static Platform getNativePlatform();
+		SPIRVToMSLConverterOptions();
 
-    } SPIRVToMSLConverterOptions;
-
-	/**
-	 * Defines the format of a vertex attribute. Currently limited to describing
-	 * whether or not the attribute is of an 8-bit unsigned format, a 16-bit
-	 * unsigned format, or some other format.
-	 */
-	enum class MSLVertexFormat {
-		Other,
-		UInt8,
-		UInt16
-	};
+	} SPIRVToMSLConverterOptions;
 
 	/**
 	 * Defines MSL characteristics of a vertex attribute at a particular location.
@@ -119,22 +75,16 @@
 	 * CHANGES TO THIS STRUCT SHOULD BE CAPTURED IN THE STREAMING LOGIC OF THE PIPELINE CACHE.
 	 */
 	typedef struct MSLVertexAttribute {
-		uint32_t location = 0;
-		uint32_t mslBuffer = 0;
-        uint32_t mslOffset = 0;
-        uint32_t mslStride = 0;
-		MSLVertexFormat format = MSLVertexFormat::Other;
-		spv::BuiltIn builtin = spv::BuiltInMax;
-        bool isPerInstance = false;
+		SPIRV_CROSS_NAMESPACE::MSLVertexAttr vertexAttribute;
 
 		bool isUsedByShader = false;
 
-        /**
-         * Returns whether the specified vertex attribute match this one.
-         * It does if all corresponding elements except isUsedByShader are equal.
-         */
-        bool matches(const MSLVertexAttribute& other) const;
-        
+		/**
+		 * Returns whether the specified vertex attribute match this one.
+		 * It does if all corresponding elements except isUsedByShader are equal.
+		 */
+		bool matches(const MSLVertexAttribute& other) const;
+
 	} MSLVertexAttribute;
 
 	/**
@@ -144,27 +94,27 @@
 	 * or sampler elements will be populated. The isUsedByShader flag is set to true during
 	 * compilation of SPIR-V to MSL if the shader makes use of this vertex attribute.
 	 *
+	 * If requiresConstExprSampler is true, the resource is a sampler whose content must be
+	 * hardcoded into the MSL as a constexpr type, instead of passed in as a runtime-bound variable.
+	 * The content of that constexpr sampler is defined in the constExprSampler parameter.
+	 *
 	 * THIS STRUCT IS STREAMED OUT AS PART OF THE PIEPLINE CACHE.
 	 * CHANGES TO THIS STRUCT SHOULD BE CAPTURED IN THE STREAMING LOGIC OF THE PIPELINE CACHE.
 	 */
 	typedef struct MSLResourceBinding {
-		spv::ExecutionModel stage;
-		uint32_t descriptorSet = 0;
-		uint32_t binding = 0;
-
-		uint32_t mslBuffer = 0;
-		uint32_t mslTexture = 0;
-		uint32_t mslSampler = 0;
+		SPIRV_CROSS_NAMESPACE::MSLResourceBinding resourceBinding;
+		SPIRV_CROSS_NAMESPACE::MSLConstexprSampler constExprSampler;
+		bool requiresConstExprSampler = false;
 
 		bool isUsedByShader = false;
 
-        /**
-         * Returns whether the specified resource binding match this one.
-         * It does if all corresponding elements except isUsedByShader are equal.
-         */
-        bool matches(const MSLResourceBinding& other) const;
+		/**
+		 * Returns whether the specified resource binding match this one.
+		 * It does if all corresponding elements except isUsedByShader are equal.
+		 */
+		bool matches(const MSLResourceBinding& other) const;
 
-    } MSLResourceBinding;
+	} MSLResourceBinding;
 
 	/**
 	 * Context passed to the SPIRVToMSLConverter to map SPIR-V descriptors to Metal resource indices.
@@ -233,12 +183,6 @@
 		} workgroupSize;
 	} SPIRVEntryPoint;
 
-	/** Special constant used in a MSLResourceBinding descriptorSet element to indicate the bindings for the push constants. */
-    static const uint32_t kPushConstDescSet = std::numeric_limits<uint32_t>::max();
-
-	/** Special constant used in a MSLResourceBinding binding element to indicate the bindings for the push constants. */
-	static const uint32_t kPushConstBinding = 0;
-
 
 #pragma mark -
 #pragma mark SPIRVToMSLConverter
diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverterTool/MoltenVKShaderConverterTool.cpp b/MoltenVKShaderConverter/MoltenVKShaderConverterTool/MoltenVKShaderConverterTool.cpp
index cf441bc..b95ab6b 100644
--- a/MoltenVKShaderConverter/MoltenVKShaderConverterTool/MoltenVKShaderConverterTool.cpp
+++ b/MoltenVKShaderConverter/MoltenVKShaderConverterTool/MoltenVKShaderConverterTool.cpp
@@ -205,8 +205,8 @@
 
 	// Derive the context under which conversion will occur
 	SPIRVToMSLConverterContext mslContext;
-	mslContext.options.platform = _mslPlatform;
-	mslContext.options.setMSLVersion(_mslVersionMajor, _mslVersionMinor, _mslVersionPatch);
+	mslContext.options.mslOptions.platform = _mslPlatform;
+	mslContext.options.mslOptions.set_msl_version(_mslVersionMajor, _mslVersionMinor, _mslVersionPatch);
 	mslContext.options.shouldFlipVertexY = _shouldFlipVertexY;
 
 	SPIRVToMSLConverter spvConverter;
@@ -387,7 +387,7 @@
 	_mslVersionMajor = 2;
 	_mslVersionMinor = 1;
 	_mslVersionPatch = 0;
-	_mslPlatform = SPIRVToMSLConverterOptions::getNativePlatform();
+	_mslPlatform = SPIRVToMSLConverterOptions().mslOptions.platform;
 
 	_isActive = parseArgs(argc, argv);
 	if ( !_isActive ) { showUsage(); }
@@ -463,10 +463,10 @@
 
 			switch (shdrTypeStr.front()) {
 				case 'm':
-					_mslPlatform = SPIRVToMSLConverterOptions::macOS;
+					_mslPlatform = SPIRV_CROSS_NAMESPACE::CompilerMSL::Options::macOS;
 					break;
 				case 'i':
-					_mslPlatform = SPIRVToMSLConverterOptions::iOS;
+					_mslPlatform = SPIRV_CROSS_NAMESPACE::CompilerMSL::Options::iOS;
 					break;
 				default:
 					return false;
diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverterTool/MoltenVKShaderConverterTool.h b/MoltenVKShaderConverter/MoltenVKShaderConverterTool/MoltenVKShaderConverterTool.h
index b4baa61..de571fa 100644
--- a/MoltenVKShaderConverter/MoltenVKShaderConverterTool/MoltenVKShaderConverterTool.h
+++ b/MoltenVKShaderConverter/MoltenVKShaderConverterTool/MoltenVKShaderConverterTool.h
@@ -106,7 +106,7 @@
 		uint32_t _mslVersionMajor;
 		uint32_t _mslVersionMinor;
 		uint32_t _mslVersionPatch;
-		SPIRVToMSLConverterOptions::Platform _mslPlatform;
+		SPIRV_CROSS_NAMESPACE::CompilerMSL::Options::Platform _mslPlatform;
 		bool _isActive;
 		bool _shouldUseDirectoryRecursion;
 		bool _shouldReadGLSL;
diff --git a/README.md b/README.md
index 046524c..72d7fd1 100644
--- a/README.md
+++ b/README.md
@@ -99,7 +99,7 @@
 1. Ensure you have `cmake` and `python3` installed:
 
 		brew install cmake
-		brew install python
+		brew install python3
 
    For faster dependency builds, you can also optionally install `ninja`:
 
diff --git a/Templates/spirv-tools/build.zip b/Templates/spirv-tools/build.zip
new file mode 100644
index 0000000..2434da0
--- /dev/null
+++ b/Templates/spirv-tools/build.zip
Binary files differ
diff --git a/fetchDependencies b/fetchDependencies
index 92e2ec6..89ad70e 100755
--- a/fetchDependencies
+++ b/fetchDependencies
@@ -4,29 +4,32 @@
 #
 # fetchDependencies - Retrieves the correct versions of all dependencies
 #
-# macOS usage: ./fetchDependencies [-v] [--debug] [--v-headers-root path] [--spirv-cross-root path] [--glslang-root path]
+# macOS usage: ./fetchDependencies [-v] [--debug] [--skip-spirv-tools-build]
+#                                  [--v-headers-root path] [--spirv-cross-root path] [--glslang-root path]
 #
-#      --v-headers-root path
-#              "path" specifies a directory path to a
-#              KhronosGroup/Vulkan-Headers repository.
-#              This repository does not have to be built.
-#
-#      --spirv-cross-root path
-#              "path" specifies a directory path to a
-#              KhronosGroup/SPIRV-Cross repository.
-#              This repository does not have to be built.
+#      --debug
+#              Build the external libraries in Debug mode, which may be useful when debugging
+#              and tracing calls into those libraries.
 #
 #      --glslang-root path
-#              "path" specifies a directory path to a KhronosGroup/glslang
-#              repository.  This repository does need to be built and the
-#              build directory must be in the specified directory.
-#              It should be built the same way this script builds it.
+#              "path" specifies a directory path to a KhronosGroup/glslang repository.
+#              This repository does need to be built and the build directory must be in the
+#              specified directory. It should be built the same way this script builds it.
+#
+#      --skip-spirv-tools-build
+#              Skip the spirv-tools build and install a template of pre-generated SPIRV-Tools header files
+#              instead. The primary purpose of this is to allow Travis CI to skip the SPIRV-Tools build
+#              because Travis cannot support the required use of Python3 by the SPIRV-Tools build.
+#
+#      --spirv-cross-root path
+#              "path" specifies a directory path to a KhronosGroup/SPIRV-Cross repository.
+#              This repository does not have to be built.
 #
 #      -v      verbose output
 #
-#      --debug
-#              Build the external libraries in Debug mode, which may be useful
-#              when debugging and tracing calls into those libraries.
+#      --v-headers-root path
+#              "path" specifies a directory path to a KhronosGroup/Vulkan-Headers repository.
+#              This repository does not have to be built.
 #
 
 set -o errexit
@@ -38,6 +41,7 @@
 V_HEADERS_ROOT=""
 SPIRV_CROSS_ROOT=""
 GLSLANG_ROOT=""
+SKIP_SPV_TLS_BLD=""
 
 while (( "$#" )); do
   case "$1" in
@@ -49,6 +53,10 @@
          XC_BUILD_VERBOSITY=""
          shift 1
          ;;
+       --skip-spirv-tools-build)
+         SKIP_SPV_TLS_BLD="Y"
+         shift 1
+         ;;
        --v-headers-root)
          V_HEADERS_ROOT=$2
          shift 2
@@ -220,8 +228,15 @@
 	cd -  > /dev/null
 fi
 
-#Make sure the embedded spirv-tools is built
-build_repo "${REPO_NAME}/External/spirv-tools"
+# Build the embedded spirv-tools, or use option of pre-generated headers
+SPV_TLS_DIR="${REPO_NAME}/External/spirv-tools"
+if [ ! "$SKIP_SPV_TLS_BLD" = "" ]; then
+	unzip -o -q -d "${SPV_TLS_DIR}" ../Templates/spirv-tools/build.zip
+	rm -rf "${SPV_TLS_DIR}/__MACOSX"
+else
+	build_repo "${SPV_TLS_DIR}"
+fi
+
 
 
 # ----------------- Vulkan-Tools -------------------