Merge pull request #1352 from billhollings/shader-config

Improvements to SPIRVToMSLConversionConfiguration
diff --git a/Docs/Whats_New.md b/Docs/Whats_New.md
index 73f3dc1..da7173d 100644
--- a/Docs/Whats_New.md
+++ b/Docs/Whats_New.md
@@ -22,6 +22,8 @@
 - Expose `vkGetIOSurfaceMVK()` and `vkUseIOSurfaceMVK()` without requiring _Objective-C_.
 - Support _Xcode 12.5_ build settings, build warnings, and SDK change to availability of
   `[MTLDevice supportsBCTextureCompression]` on _Mac Catalyst_.
+- Improve cache hits when matching `SPIRVToMSLConversionConfiguration` structs to each other 
+  to find a cached shader, by only considering resources from the current shader stage.
 - Rename `kMVKShaderStageMax` to `kMVKShaderStageCount`.
 - Fix internal reference from `SPIRV_CROSS_NAMESPACE_OVERRIDE` to `SPIRV_CROSS_NAMESPACE`.
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
index 2254d59..64128ea 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
@@ -68,7 +68,7 @@
  * If the shader stage binding has a binding defined for the specified stage, populates
  * the context at the descriptor set binding from the shader stage resource binding.
  */
-void mvkPopulateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
+void mvkPopulateShaderConversionConfig(mvk::SPIRVToMSLConversionConfiguration& shaderConfig,
 									   MVKShaderStageResourceBinding& ssRB,
 									   MVKShaderStage stage,
 									   uint32_t descriptorSetIndex,
@@ -172,7 +172,7 @@
 								  MTLDataType dataType,
 								  MTLArgumentAccess access);
 	bool isUsingMetalArgumentBuffer();
-	void populateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
+	void populateShaderConversionConfig(mvk::SPIRVToMSLConversionConfiguration& shaderConfig,
 										MVKShaderResourceBinding& dslMTLRezIdxOffsets,
 										uint32_t dslIndex);
 	bool validate(MVKSampler* mvkSampler);
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.mm
index 7794704..74bcd1e 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.mm
@@ -95,7 +95,7 @@
 	}
 }
 
-void mvkPopulateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
+void mvkPopulateShaderConversionConfig(mvk::SPIRVToMSLConversionConfiguration& shaderConfig,
 									   MVKShaderStageResourceBinding& ssRB,
 									   MVKShaderStage stage,
 									   uint32_t descriptorSetIndex,
@@ -109,7 +109,7 @@
 		mvk::MSLResourceBinding rb;													\
 		auto& rbb = rb.resourceBinding;												\
 		rbb.stage = spvExecModels[stage];											\
-		rbb.basetype = SPIRV_CROSS_NAMESPACE::SPIRType::spvRezType;		\
+		rbb.basetype = SPIRV_CROSS_NAMESPACE::SPIRType::spvRezType;					\
 		rbb.desc_set = descriptorSetIndex;											\
 		rbb.binding = bindingIndex;													\
 		rbb.count = count;															\
@@ -117,7 +117,7 @@
 		rbb.msl_texture = ssRB.textureIndex;										\
 		rbb.msl_sampler = ssRB.samplerIndex;										\
 		if (immutableSampler) { immutableSampler->getConstexprSampler(rb); }		\
-		context.resourceBindings.push_back(rb);										\
+		shaderConfig.resourceBindings.push_back(rb);								\
 	} while(false)
 
 	static const spv::ExecutionModel spvExecModels[] = {
@@ -144,7 +144,7 @@
 			db.descriptorSet = descriptorSetIndex;
 			db.binding = bindingIndex;
 			db.index = ssRB.dynamicOffsetBufferIndex;
-			context.dynamicBufferDescriptors.push_back(db);
+			shaderConfig.dynamicBufferDescriptors.push_back(db);
 			break;
 		}
 
@@ -508,7 +508,7 @@
 	[args addObject: argDesc];
 }
 
-void MVKDescriptorSetLayoutBinding::populateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
+void MVKDescriptorSetLayoutBinding::populateShaderConversionConfig(mvk::SPIRVToMSLConversionConfiguration& shaderConfig,
                                                                    MVKShaderResourceBinding& dslMTLRezIdxOffsets,
                                                                    uint32_t dslIndex) {
 
@@ -521,7 +521,7 @@
 	bool isUsingMtlArgBuff = isUsingMetalArgumentBuffer();
 	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageCount; stage++) {
         if ((_applyToStage[stage] || isUsingMtlArgBuff) && descCnt > 0) {
-            mvkPopulateShaderConverterContext(context,
+            mvkPopulateShaderConversionConfig(shaderConfig,
                                               mtlIdxs.stages[stage],
                                               MVKShaderStage(stage),
                                               dslIndex,
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
index c8ec767..fdced31 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
@@ -83,8 +83,8 @@
 						   MVKShaderResourceBinding& dslMTLRezIdxOffsets);
 
 
-	/** Populates the specified shader converter context, at the specified DSL index. */
-	void populateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
+	/** Populates the specified shader conversion config, at the specified DSL index. */
+	void populateShaderConversionConfig(mvk::SPIRVToMSLConversionConfiguration& shaderConfig,
                                         MVKShaderResourceBinding& dslMTLRezIdxOffsets,
                                         uint32_t descSetIndex);
 
@@ -359,7 +359,7 @@
  * If the shader stage binding has a binding defined for the specified stage, populates
  * the context at the descriptor set binding from the shader stage resource binding.
  */
-void mvkPopulateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
+void mvkPopulateShaderConversionConfig(mvk::SPIRVToMSLConversionConfiguration& shaderConfig,
 									   MVKShaderStageResourceBinding& ssRB,
 									   spv::ExecutionModel stage,
 									   uint32_t descriptorSetIndex,
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
index 36c1861..ff9fcd5 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
@@ -169,17 +169,17 @@
     }
 }
 
-void MVKDescriptorSetLayout::populateShaderConverterContext(mvk::SPIRVToMSLConversionConfiguration& context,
+void MVKDescriptorSetLayout::populateShaderConversionConfig(mvk::SPIRVToMSLConversionConfiguration& shaderConfig,
                                                             MVKShaderResourceBinding& dslMTLRezIdxOffsets,
 															uint32_t descSetIndex) {
 	uint32_t bindCnt = (uint32_t)_bindings.size();
 	for (uint32_t bindIdx = 0; bindIdx < bindCnt; bindIdx++) {
-		_bindings[bindIdx].populateShaderConverterContext(context, dslMTLRezIdxOffsets, descSetIndex);
+		_bindings[bindIdx].populateShaderConversionConfig(shaderConfig, dslMTLRezIdxOffsets, descSetIndex);
 	}
 
 	// Mark if Metal argument buffers are in use, but this descriptor set layout is not using them.
 	if (isUsingMetalArgumentBuffers() && !isUsingMetalArgumentBuffer()) {
-		context.discreteDescriptorSets.push_back(descSetIndex);
+		shaderConfig.discreteDescriptorSets.push_back(descSetIndex);
 	}
 }
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
index 66869bf..0f69331 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
@@ -72,8 +72,8 @@
 						   uint32_t set,
 						   const void* pData);
 
-	/** Populates the specified shader converter context. */
-	void populateShaderConverterContext(SPIRVToMSLConversionConfiguration& context);
+	/** Populates the specified shader conversion config. */
+	void populateShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig);
 
 	/** Returns the current swizzle buffer bindings. */
 	const MVKShaderImplicitRezBinding& getSwizzleBufferIndex() { return _swizzleBufferIndex; }
@@ -198,7 +198,7 @@
 	void propagateDebugName() override {}
 	template<typename CreateInfo> void addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc,
 															  const CreateInfo* pCreateInfo,
-															  SPIRVToMSLConversionConfiguration& context,
+															  SPIRVToMSLConversionConfiguration& shaderConfig,
 															  MVKShaderStage stage);
 
 	MVKPipelineCache* _pipelineCache;
@@ -322,20 +322,20 @@
     id<MTLRenderPipelineState> getOrCompilePipeline(MTLRenderPipelineDescriptor* plDesc, id<MTLRenderPipelineState>& plState);
     id<MTLComputePipelineState> getOrCompilePipeline(MTLComputePipelineDescriptor* plDesc, id<MTLComputePipelineState>& plState, const char* compilerType);
     void initMTLRenderPipelineState(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
-    void initMVKShaderConverterContext(SPIRVToMSLConversionConfiguration& _shaderContext, const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
-    void addVertexInputToShaderConverterContext(SPIRVToMSLConversionConfiguration& shaderContext, const VkGraphicsPipelineCreateInfo* pCreateInfo);
-    void addPrevStageOutputToShaderConverterContext(SPIRVToMSLConversionConfiguration& shaderContext, SPIRVShaderOutputs& outputs);
+    void initShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
+    void addVertexInputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, const VkGraphicsPipelineCreateInfo* pCreateInfo);
+    void addPrevStageOutputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& outputs);
     MTLRenderPipelineDescriptor* newMTLRenderPipelineDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
-    MTLComputePipelineDescriptor* newMTLTessVertexStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConversionConfiguration& shaderContext);
-	MTLComputePipelineDescriptor* newMTLTessControlStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConversionConfiguration& shaderContext);
-	MTLRenderPipelineDescriptor* newMTLTessRasterStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConversionConfiguration& shaderContext);
-	bool addVertexShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext);
-	bool addVertexShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext);
-	bool addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext, SPIRVShaderOutputs& prevOutput);
-	bool addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext, SPIRVShaderOutputs& prevOutput);
-    bool addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderContext, SPIRVShaderOutputs& prevOutput);
+    MTLComputePipelineDescriptor* newMTLTessVertexStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConversionConfiguration& shaderConfig);
+	MTLComputePipelineDescriptor* newMTLTessControlStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConversionConfiguration& shaderConfig);
+	MTLRenderPipelineDescriptor* newMTLTessRasterStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData, SPIRVToMSLConversionConfiguration& shaderConfig);
+	bool addVertexShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderConfig);
+	bool addVertexShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderConfig);
+	bool addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& prevOutput);
+	bool addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& prevOutput);
+    bool addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo, SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& prevOutput);
 	template<class T>
-	bool addVertexInputToPipeline(T* inputDesc, const VkPipelineVertexInputStateCreateInfo* pVI, const SPIRVToMSLConversionConfiguration& shaderContext);
+	bool addVertexInputToPipeline(T* inputDesc, const VkPipelineVertexInputStateCreateInfo* pVI, const SPIRVToMSLConversionConfiguration& shaderConfig);
 	void adjustVertexInputForMultiview(MTLVertexDescriptor* inputDesc, const VkPipelineVertexInputStateCreateInfo* pVI, uint32_t viewCount, uint32_t oldViewCount = 1);
     void addTessellationToPipeline(MTLRenderPipelineDescriptor* plDesc, const SPIRVTessReflectionData& reflectData, const VkPipelineTessellationStateCreateInfo* pTS);
     void addFragmentOutputToPipeline(MTLRenderPipelineDescriptor* plDesc, const VkGraphicsPipelineCreateInfo* pCreateInfo);
@@ -468,7 +468,7 @@
 	 */
 	VkResult writeData(size_t* pDataSize, void* pData);
 
-	/** Return a shader library from the specified shader context sourced from the specified shader module. */
+	/** Return a shader library from the shader conversion configuration and sourced from the specified shader module. */
 	MVKShaderLibrary* getShaderLibrary(SPIRVToMSLConversionConfiguration* pContext, MVKShaderModule* shaderModule);
 
 	/** Merges the contents of the specified number of pipeline caches into this cache. */
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
index 8d0ff54..6cf3d3b 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
@@ -77,15 +77,15 @@
 	if (!cmdEncoder) { setConfigurationResult(dsl->getConfigurationResult()); }
 }
 
-void MVKPipelineLayout::populateShaderConverterContext(SPIRVToMSLConversionConfiguration& context) {
-	context.resourceBindings.clear();
-	context.discreteDescriptorSets.clear();
-	context.dynamicBufferDescriptors.clear();
+void MVKPipelineLayout::populateShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig) {
+	shaderConfig.resourceBindings.clear();
+	shaderConfig.discreteDescriptorSets.clear();
+	shaderConfig.dynamicBufferDescriptors.clear();
 
     // Add resource bindings defined in the descriptor set layouts
 	uint32_t dslCnt = getDescriptorSetCount();
 	for (uint32_t dslIdx = 0; dslIdx < dslCnt; dslIdx++) {
-		_descriptorSetLayouts[dslIdx]->populateShaderConverterContext(context,
+		_descriptorSetLayouts[dslIdx]->populateShaderConversionConfig(shaderConfig,
 																	  _dslMTLResourceIndexOffsets[dslIdx],
 																	  dslIdx);
 	}
@@ -93,7 +93,7 @@
 	// Add any resource bindings used by push-constants.
 	// Use VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT descriptor type as compatible with push constants in Metal.
 	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageCount; stage++) {
-		mvkPopulateShaderConverterContext(context,
+		mvkPopulateShaderConversionConfig(shaderConfig,
 										  _pushConstantsMTLResourceIndexes.stages[stage],
 										  MVKShaderStage(stage),
 										  kPushConstDescSet,
@@ -187,7 +187,7 @@
 template<typename CreateInfo>
 void MVKPipeline::addMTLArgumentEncoders(MVKMTLFunction& mvkMTLFunc,
 										 const CreateInfo* pCreateInfo,
-										 SPIRVToMSLConversionConfiguration& context,
+										 SPIRVToMSLConversionConfiguration& shaderConfig,
 										 MVKShaderStage stage) {
 	if ( !isUsingMetalArgumentBuffers() ) { return; }
 
@@ -195,7 +195,7 @@
 	auto mtlFunc = mvkMTLFunc.getMTLFunction();
 	for (uint32_t dsIdx = 0; dsIdx < _descriptorSetCount; dsIdx++) {
 		auto* dsLayout = ((MVKPipelineLayout*)pCreateInfo->layout)->getDescriptorSetLayout(dsIdx);
-		bool descSetIsUsed = dsLayout->populateBindingUse(getDescriptorBindingUse(dsIdx, stage), context, stage, dsIdx);
+		bool descSetIsUsed = dsLayout->populateBindingUse(getDescriptorBindingUse(dsIdx, stage), shaderConfig, stage, dsIdx);
 		if (descSetIsUsed && needMTLArgEnc) {
 			getMTLArgumentEncoder(dsIdx, stage).init([mtlFunc newArgumentEncoderWithBufferIndex: dsIdx]);
 		}
@@ -549,12 +549,12 @@
 		// In this case, we need to create three render pipelines. But, the way Metal handles
 		// index buffers for compute stage-in means we have to defer creation of stage 1 until
 		// draw time. In the meantime, we'll create and retain a descriptor for it.
-		SPIRVToMSLConversionConfiguration shaderContext;
-		initMVKShaderConverterContext(shaderContext, pCreateInfo, reflectData);
+		SPIRVToMSLConversionConfiguration shaderConfig;
+		initShaderConversionConfig(shaderConfig, pCreateInfo, reflectData);
 
-		_mtlTessVertexStageDesc = newMTLTessVertexStageDescriptor(pCreateInfo, reflectData, shaderContext);					// retained
-		MTLComputePipelineDescriptor* tcPLDesc = newMTLTessControlStageDescriptor(pCreateInfo, reflectData, shaderContext);	// temp retained
-		MTLRenderPipelineDescriptor* rastPLDesc = newMTLTessRasterStageDescriptor(pCreateInfo, reflectData, shaderContext);	// temp retained
+		_mtlTessVertexStageDesc = newMTLTessVertexStageDescriptor(pCreateInfo, reflectData, shaderConfig);					// retained
+		MTLComputePipelineDescriptor* tcPLDesc = newMTLTessControlStageDescriptor(pCreateInfo, reflectData, shaderConfig);	// temp retained
+		MTLRenderPipelineDescriptor* rastPLDesc = newMTLTessRasterStageDescriptor(pCreateInfo, reflectData, shaderConfig);	// temp retained
 		if (_mtlTessVertexStageDesc && tcPLDesc && rastPLDesc) {
 			if (getOrCompilePipeline(tcPLDesc, _mtlTessControlStageState, "Tessellation control")) {
 				getOrCompilePipeline(rastPLDesc, _mtlPipelineState);
@@ -569,8 +569,8 @@
 // It is the responsibility of the caller to release the returned descriptor.
 MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLRenderPipelineDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo,
 																				 const SPIRVTessReflectionData& reflectData) {
-	SPIRVToMSLConversionConfiguration shaderContext;
-	initMVKShaderConverterContext(shaderContext, pCreateInfo, reflectData);
+	SPIRVToMSLConversionConfiguration shaderConfig;
+	initShaderConversionConfig(shaderConfig, pCreateInfo, reflectData);
 
 	MTLRenderPipelineDescriptor* plDesc = [MTLRenderPipelineDescriptor new];	// retained
 
@@ -582,14 +582,14 @@
 	}
 
 	// Add shader stages. Compile vertex shader before others just in case conversion changes anything...like rasterizaion disable.
-	if (!addVertexShaderToPipeline(plDesc, pCreateInfo, shaderContext)) { return nil; }
+	if (!addVertexShaderToPipeline(plDesc, pCreateInfo, shaderConfig)) { return nil; }
 
 	// Vertex input
 	// This needs to happen before compiling the fragment shader, or we'll lose information on vertex attributes.
-	if (!addVertexInputToPipeline(plDesc.vertexDescriptor, pCreateInfo->pVertexInputState, shaderContext)) { return nil; }
+	if (!addVertexInputToPipeline(plDesc.vertexDescriptor, pCreateInfo->pVertexInputState, shaderConfig)) { return nil; }
 
 	// Fragment shader - only add if rasterization is enabled
-	if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderContext, vtxOutputs)) { return nil; }
+	if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderConfig, vtxOutputs)) { return nil; }
 
 	// Output
 	addFragmentOutputToPipeline(plDesc, pCreateInfo);
@@ -606,15 +606,15 @@
 // It is the responsibility of the caller to release the returned descriptor.
 MTLComputePipelineDescriptor* MVKGraphicsPipeline::newMTLTessVertexStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo,
 																				  const SPIRVTessReflectionData& reflectData,
-																				  SPIRVToMSLConversionConfiguration& shaderContext) {
+																				  SPIRVToMSLConversionConfiguration& shaderConfig) {
 	MTLComputePipelineDescriptor* plDesc = [MTLComputePipelineDescriptor new];	// retained
 
 	// Add shader stages.
-	if (!addVertexShaderToPipeline(plDesc, pCreateInfo, shaderContext)) { return nil; }
+	if (!addVertexShaderToPipeline(plDesc, pCreateInfo, shaderConfig)) { return nil; }
 
 	// Vertex input
 	plDesc.stageInputDescriptor = [MTLStageInputOutputDescriptor stageInputOutputDescriptor];
-	if (!addVertexInputToPipeline(plDesc.stageInputDescriptor, pCreateInfo->pVertexInputState, shaderContext)) { return nil; }
+	if (!addVertexInputToPipeline(plDesc.stageInputDescriptor, pCreateInfo->pVertexInputState, shaderConfig)) { return nil; }
 	plDesc.stageInputDescriptor.indexBufferIndex = _indirectParamsIndex.stages[kMVKShaderStageVertex];
 
 	plDesc.threadGroupSizeIsMultipleOfThreadExecutionWidth = YES;
@@ -751,7 +751,7 @@
 // It is the responsibility of the caller to release the returned descriptor.
 MTLComputePipelineDescriptor* MVKGraphicsPipeline::newMTLTessControlStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo,
 																					const SPIRVTessReflectionData& reflectData,
-																					SPIRVToMSLConversionConfiguration& shaderContext) {
+																					SPIRVToMSLConversionConfiguration& shaderConfig) {
 	MTLComputePipelineDescriptor* plDesc = [MTLComputePipelineDescriptor new];		// retained
 
 	SPIRVShaderOutputs vtxOutputs;
@@ -762,7 +762,7 @@
 	}
 
 	// Add shader stages.
-	if (!addTessCtlShaderToPipeline(plDesc, pCreateInfo, shaderContext, vtxOutputs)) {
+	if (!addTessCtlShaderToPipeline(plDesc, pCreateInfo, shaderConfig, vtxOutputs)) {
 		[plDesc release];
 		return nil;
 	}
@@ -779,7 +779,7 @@
 // It is the responsibility of the caller to release the returned descriptor.
 MTLRenderPipelineDescriptor* MVKGraphicsPipeline::newMTLTessRasterStageDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo,
 																				  const SPIRVTessReflectionData& reflectData,
-																				  SPIRVToMSLConversionConfiguration& shaderContext) {
+																				  SPIRVToMSLConversionConfiguration& shaderConfig) {
 	MTLRenderPipelineDescriptor* plDesc = [MTLRenderPipelineDescriptor new];	// retained
 
 	SPIRVShaderOutputs tcOutputs, teOutputs;
@@ -794,7 +794,7 @@
 	}
 
 	// Add shader stages. Compile tessellation evaluation shader before others just in case conversion changes anything...like rasterizaion disable.
-	if (!addTessEvalShaderToPipeline(plDesc, pCreateInfo, shaderContext, tcOutputs)) {
+	if (!addTessEvalShaderToPipeline(plDesc, pCreateInfo, shaderConfig, tcOutputs)) {
 		[plDesc release];
 		return nil;
 	}
@@ -807,7 +807,7 @@
 	const SPIRVShaderOutput* firstVertex = nullptr, * firstPatch = nullptr;
 	for (const SPIRVShaderOutput& output : tcOutputs) {
 		if (output.builtin == spv::BuiltInPointSize && !reflectData.pointMode) { continue; }
-		if (!shaderContext.isShaderInputLocationUsed(output.location)) {
+		if (!shaderConfig.isShaderInputLocationUsed(output.location)) {
 			if (output.perPatch && !(output.builtin == spv::BuiltInTessLevelOuter || output.builtin == spv::BuiltInTessLevelInner) ) {
 				if (!firstPatch) { firstPatch = &output; }
 				patchOffset += sizeOfOutput(output);
@@ -878,7 +878,7 @@
 	}
 
 	// Fragment shader - only add if rasterization is enabled
-	if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderContext, teOutputs)) {
+	if (!addFragmentShaderToPipeline(plDesc, pCreateInfo, shaderConfig, teOutputs)) {
 		[plDesc release];
 		return nil;
 	}
@@ -909,21 +909,21 @@
 // Adds a vertex shader to the pipeline description.
 bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLRenderPipelineDescriptor* plDesc,
 													const VkGraphicsPipelineCreateInfo* pCreateInfo,
-													SPIRVToMSLConversionConfiguration& shaderContext) {
+													SPIRVToMSLConversionConfiguration& shaderConfig) {
 	uint32_t vbCnt = pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
-	shaderContext.options.entryPointStage = spv::ExecutionModelVertex;
-	shaderContext.options.entryPointName = _pVertexSS->pName;
-	shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.view_mask_buffer_index = _viewRangeBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.capture_output_to_buffer = false;
-	shaderContext.options.mslOptions.disable_rasterization = isRasterizationDisabled(pCreateInfo);
-    addVertexInputToShaderConverterContext(shaderContext, pCreateInfo);
+	shaderConfig.options.entryPointStage = spv::ExecutionModelVertex;
+	shaderConfig.options.entryPointName = _pVertexSS->pName;
+	shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.view_mask_buffer_index = _viewRangeBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.capture_output_to_buffer = false;
+	shaderConfig.options.mslOptions.disable_rasterization = isRasterizationDisabled(pCreateInfo);
+    addVertexInputToShaderConversionConfig(shaderConfig, pCreateInfo);
 
-	MVKMTLFunction func = ((MVKShaderModule*)_pVertexSS->module)->getMTLFunction(&shaderContext, _pVertexSS->pSpecializationInfo, _pipelineCache);
+	MVKMTLFunction func = ((MVKShaderModule*)_pVertexSS->module)->getMTLFunction(&shaderConfig, _pVertexSS->pSpecializationInfo, _pipelineCache);
 	id<MTLFunction> mtlFunc = func.getMTLFunction();
 	if ( !mtlFunc ) {
 		setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Vertex shader function could not be compiled into pipeline. See previous logged error."));
@@ -939,7 +939,7 @@
 	_needsVertexViewRangeBuffer = funcRslts.needsViewRangeBuffer;
 	_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
 
-	addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageVertex);
+	addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
 
 	if (funcRslts.isRasterizationDisabled) {
 		_pFragmentSS = nullptr;
@@ -973,19 +973,19 @@
 // Adds a vertex shader compiled as a compute kernel to the pipeline description.
 bool MVKGraphicsPipeline::addVertexShaderToPipeline(MTLComputePipelineDescriptor* plDesc,
 													const VkGraphicsPipelineCreateInfo* pCreateInfo,
-													SPIRVToMSLConversionConfiguration& shaderContext) {
+													SPIRVToMSLConversionConfiguration& shaderConfig) {
 	uint32_t vbCnt = pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
-	shaderContext.options.entryPointStage = spv::ExecutionModelVertex;
-	shaderContext.options.entryPointName = _pVertexSS->pName;
-	shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.shader_index_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageVertex];
-	shaderContext.options.mslOptions.capture_output_to_buffer = true;
-	shaderContext.options.mslOptions.vertex_for_tessellation = true;
-	shaderContext.options.mslOptions.disable_rasterization = true;
-    addVertexInputToShaderConverterContext(shaderContext, pCreateInfo);
+	shaderConfig.options.entryPointStage = spv::ExecutionModelVertex;
+	shaderConfig.options.entryPointName = _pVertexSS->pName;
+	shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.shader_index_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageVertex];
+	shaderConfig.options.mslOptions.capture_output_to_buffer = true;
+	shaderConfig.options.mslOptions.vertex_for_tessellation = true;
+	shaderConfig.options.mslOptions.disable_rasterization = true;
+    addVertexInputToShaderConversionConfig(shaderConfig, pCreateInfo);
 
 	static const CompilerMSL::Options::IndexType indexTypes[] = {
 		CompilerMSL::Options::IndexType::None,
@@ -995,8 +995,8 @@
 	// We need to compile this function three times, with no indexing, 16-bit indices, and 32-bit indices.
 	MVKMTLFunction func;
 	for (uint32_t i = 0; i < sizeof(indexTypes)/sizeof(indexTypes[0]); i++) {
-		shaderContext.options.mslOptions.vertex_index_type = indexTypes[i];
-		func = ((MVKShaderModule*)_pVertexSS->module)->getMTLFunction(&shaderContext, _pVertexSS->pSpecializationInfo, _pipelineCache);
+		shaderConfig.options.mslOptions.vertex_index_type = indexTypes[i];
+		func = ((MVKShaderModule*)_pVertexSS->module)->getMTLFunction(&shaderConfig, _pVertexSS->pSpecializationInfo, _pipelineCache);
 		id<MTLFunction> mtlFunc = func.getMTLFunction();
 		if ( !mtlFunc ) {
 			setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Vertex shader function could not be compiled into pipeline. See previous logged error."));
@@ -1011,7 +1011,7 @@
 		_needsVertexOutputBuffer = funcRslts.needsOutputBuffer;
 	}
 
-	addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageVertex);
+	addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageVertex);
 
 	// If we need the swizzle buffer and there's no place to put it, we're in serious trouble.
 	if (!verifyImplicitBuffer(_needsVertexSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageVertex, "swizzle", vbCnt)) {
@@ -1029,7 +1029,7 @@
 	if (!verifyImplicitBuffer(_needsVertexOutputBuffer, _outputBufferIndex, kMVKShaderStageVertex, "output", vbCnt)) {
 		return false;
 	}
-	if (!verifyImplicitBuffer(!shaderContext.shaderInputs.empty(), _indirectParamsIndex, kMVKShaderStageVertex, "index", vbCnt)) {
+	if (!verifyImplicitBuffer(!shaderConfig.shaderInputs.empty(), _indirectParamsIndex, kMVKShaderStageVertex, "index", vbCnt)) {
 		return false;
 	}
 	return true;
@@ -1037,24 +1037,24 @@
 
 bool MVKGraphicsPipeline::addTessCtlShaderToPipeline(MTLComputePipelineDescriptor* plDesc,
 													 const VkGraphicsPipelineCreateInfo* pCreateInfo,
-													 SPIRVToMSLConversionConfiguration& shaderContext,
+													 SPIRVToMSLConversionConfiguration& shaderConfig,
 													 SPIRVShaderOutputs& vtxOutputs) {
-	shaderContext.options.entryPointStage = spv::ExecutionModelTessellationControl;
-	shaderContext.options.entryPointName = _pTessCtlSS->pName;
-	shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.mslOptions.shader_input_buffer_index = kMVKTessCtlInputBufferIndex;
-	shaderContext.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.mslOptions.shader_patch_output_buffer_index = _tessCtlPatchOutputBufferIndex;
-	shaderContext.options.mslOptions.shader_tess_factor_buffer_index = _tessCtlLevelBufferIndex;
-	shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageTessCtl];
-	shaderContext.options.mslOptions.capture_output_to_buffer = true;
-	shaderContext.options.mslOptions.multi_patch_workgroup = true;
-	shaderContext.options.mslOptions.fixed_subgroup_size = mvkIsAnyFlagEnabled(_pTessCtlSS->flags, VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) ? 0 : _device->_pMetalFeatures->maxSubgroupSize;
-	addPrevStageOutputToShaderConverterContext(shaderContext, vtxOutputs);
+	shaderConfig.options.entryPointStage = spv::ExecutionModelTessellationControl;
+	shaderConfig.options.entryPointName = _pTessCtlSS->pName;
+	shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageTessCtl];
+	shaderConfig.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageTessCtl];
+	shaderConfig.options.mslOptions.shader_input_buffer_index = kMVKTessCtlInputBufferIndex;
+	shaderConfig.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageTessCtl];
+	shaderConfig.options.mslOptions.shader_patch_output_buffer_index = _tessCtlPatchOutputBufferIndex;
+	shaderConfig.options.mslOptions.shader_tess_factor_buffer_index = _tessCtlLevelBufferIndex;
+	shaderConfig.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageTessCtl];
+	shaderConfig.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageTessCtl];
+	shaderConfig.options.mslOptions.capture_output_to_buffer = true;
+	shaderConfig.options.mslOptions.multi_patch_workgroup = true;
+	shaderConfig.options.mslOptions.fixed_subgroup_size = mvkIsAnyFlagEnabled(_pTessCtlSS->flags, VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) ? 0 : _device->_pMetalFeatures->maxSubgroupSize;
+	addPrevStageOutputToShaderConversionConfig(shaderConfig, vtxOutputs);
 
-	MVKMTLFunction func = ((MVKShaderModule*)_pTessCtlSS->module)->getMTLFunction(&shaderContext, _pTessCtlSS->pSpecializationInfo, _pipelineCache);
+	MVKMTLFunction func = ((MVKShaderModule*)_pTessCtlSS->module)->getMTLFunction(&shaderConfig, _pTessCtlSS->pSpecializationInfo, _pipelineCache);
 	id<MTLFunction> mtlFunc = func.getMTLFunction();
 	if ( !mtlFunc ) {
 		setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Tessellation control shader function could not be compiled into pipeline. See previous logged error."));
@@ -1070,7 +1070,7 @@
 	_needsTessCtlPatchOutputBuffer = funcRslts.needsPatchOutputBuffer;
 	_needsTessCtlInputBuffer = funcRslts.needsInputThreadgroupMem;
 
-	addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageTessCtl);
+	addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessCtl);
 
 	if (!verifyImplicitBuffer(_needsTessCtlSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageTessCtl, "swizzle", kMVKTessCtlNumReservedBuffers)) {
 		return false;
@@ -1100,18 +1100,18 @@
 
 bool MVKGraphicsPipeline::addTessEvalShaderToPipeline(MTLRenderPipelineDescriptor* plDesc,
 													  const VkGraphicsPipelineCreateInfo* pCreateInfo,
-													  SPIRVToMSLConversionConfiguration& shaderContext,
+													  SPIRVToMSLConversionConfiguration& shaderConfig,
 													  SPIRVShaderOutputs& tcOutputs) {
-	shaderContext.options.entryPointStage = spv::ExecutionModelTessellationEvaluation;
-	shaderContext.options.entryPointName = _pTessEvalSS->pName;
-	shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageTessEval];
-	shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageTessEval];
-	shaderContext.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageTessEval];
-	shaderContext.options.mslOptions.capture_output_to_buffer = false;
-	shaderContext.options.mslOptions.disable_rasterization = isRasterizationDisabled(pCreateInfo);
-	addPrevStageOutputToShaderConverterContext(shaderContext, tcOutputs);
+	shaderConfig.options.entryPointStage = spv::ExecutionModelTessellationEvaluation;
+	shaderConfig.options.entryPointName = _pTessEvalSS->pName;
+	shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageTessEval];
+	shaderConfig.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageTessEval];
+	shaderConfig.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageTessEval];
+	shaderConfig.options.mslOptions.capture_output_to_buffer = false;
+	shaderConfig.options.mslOptions.disable_rasterization = isRasterizationDisabled(pCreateInfo);
+	addPrevStageOutputToShaderConversionConfig(shaderConfig, tcOutputs);
 
-	MVKMTLFunction func = ((MVKShaderModule*)_pTessEvalSS->module)->getMTLFunction(&shaderContext, _pTessEvalSS->pSpecializationInfo, _pipelineCache);
+	MVKMTLFunction func = ((MVKShaderModule*)_pTessEvalSS->module)->getMTLFunction(&shaderConfig, _pTessEvalSS->pSpecializationInfo, _pipelineCache);
 	id<MTLFunction> mtlFunc = func.getMTLFunction();
 	if ( !mtlFunc ) {
 		setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Tessellation evaluation shader function could not be compiled into pipeline. See previous logged error."));
@@ -1126,7 +1126,7 @@
 	_needsTessEvalBufferSizeBuffer = funcRslts.needsBufferSizeBuffer;
 	_needsTessEvalDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
 
-	addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageTessEval);
+	addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageTessEval);
 
 	if (funcRslts.isRasterizationDisabled) {
 		_pFragmentSS = nullptr;
@@ -1146,29 +1146,29 @@
 
 bool MVKGraphicsPipeline::addFragmentShaderToPipeline(MTLRenderPipelineDescriptor* plDesc,
 													  const VkGraphicsPipelineCreateInfo* pCreateInfo,
-													  SPIRVToMSLConversionConfiguration& shaderContext,
+													  SPIRVToMSLConversionConfiguration& shaderConfig,
 													  SPIRVShaderOutputs& shaderOutputs) {
 	if (_pFragmentSS) {
-		shaderContext.options.entryPointStage = spv::ExecutionModelFragment;
-		shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageFragment];
-		shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageFragment];
-		shaderContext.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageFragment];
-		shaderContext.options.mslOptions.view_mask_buffer_index = _viewRangeBufferIndex.stages[kMVKShaderStageFragment];
-		shaderContext.options.entryPointName = _pFragmentSS->pName;
-		shaderContext.options.mslOptions.capture_output_to_buffer = false;
-		shaderContext.options.mslOptions.fixed_subgroup_size = mvkIsAnyFlagEnabled(_pFragmentSS->flags, VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) ? 0 : _device->_pMetalFeatures->maxSubgroupSize;
+		shaderConfig.options.entryPointStage = spv::ExecutionModelFragment;
+		shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageFragment];
+		shaderConfig.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageFragment];
+		shaderConfig.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageFragment];
+		shaderConfig.options.mslOptions.view_mask_buffer_index = _viewRangeBufferIndex.stages[kMVKShaderStageFragment];
+		shaderConfig.options.entryPointName = _pFragmentSS->pName;
+		shaderConfig.options.mslOptions.capture_output_to_buffer = false;
+		shaderConfig.options.mslOptions.fixed_subgroup_size = mvkIsAnyFlagEnabled(_pFragmentSS->flags, VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) ? 0 : _device->_pMetalFeatures->maxSubgroupSize;
 		if (pCreateInfo->pMultisampleState) {
 			if (pCreateInfo->pMultisampleState->pSampleMask && pCreateInfo->pMultisampleState->pSampleMask[0] != 0xffffffff) {
-				shaderContext.options.mslOptions.additional_fixed_sample_mask = pCreateInfo->pMultisampleState->pSampleMask[0];
+				shaderConfig.options.mslOptions.additional_fixed_sample_mask = pCreateInfo->pMultisampleState->pSampleMask[0];
 			}
-			shaderContext.options.mslOptions.force_sample_rate_shading = pCreateInfo->pMultisampleState->sampleShadingEnable && pCreateInfo->pMultisampleState->minSampleShading != 0.0f;
+			shaderConfig.options.mslOptions.force_sample_rate_shading = pCreateInfo->pMultisampleState->sampleShadingEnable && pCreateInfo->pMultisampleState->minSampleShading != 0.0f;
 		}
 		if (std::any_of(shaderOutputs.begin(), shaderOutputs.end(), [](const SPIRVShaderOutput& output) { return output.builtin == spv::BuiltInLayer; })) {
-			shaderContext.options.mslOptions.arrayed_subpass_input = true;
+			shaderConfig.options.mslOptions.arrayed_subpass_input = true;
 		}
-		addPrevStageOutputToShaderConverterContext(shaderContext, shaderOutputs);
+		addPrevStageOutputToShaderConversionConfig(shaderConfig, shaderOutputs);
 
-		MVKMTLFunction func = ((MVKShaderModule*)_pFragmentSS->module)->getMTLFunction(&shaderContext, _pFragmentSS->pSpecializationInfo, _pipelineCache);
+		MVKMTLFunction func = ((MVKShaderModule*)_pFragmentSS->module)->getMTLFunction(&shaderConfig, _pFragmentSS->pSpecializationInfo, _pipelineCache);
 		id<MTLFunction> mtlFunc = func.getMTLFunction();
 		if ( !mtlFunc ) {
 			setConfigurationResult(reportError(VK_ERROR_INVALID_SHADER_NV, "Fragment shader function could not be compiled into pipeline. See previous logged error."));
@@ -1182,7 +1182,7 @@
 		_needsFragmentDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
 		_needsFragmentViewRangeBuffer = funcRslts.needsViewRangeBuffer;
 
-		addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageFragment);
+		addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageFragment);
 
 		if (!verifyImplicitBuffer(_needsFragmentSwizzleBuffer, _swizzleBufferIndex, kMVKShaderStageFragment, "swizzle", 0)) {
 			return false;
@@ -1203,7 +1203,7 @@
 template<class T>
 bool MVKGraphicsPipeline::addVertexInputToPipeline(T* inputDesc,
 												   const VkPipelineVertexInputStateCreateInfo* pVI,
-												   const SPIRVToMSLConversionConfiguration& shaderContext) {
+												   const SPIRVToMSLConversionConfiguration& shaderConfig) {
     // Collect extension structures
     VkPipelineVertexInputDivisorStateCreateInfoEXT* pVertexInputDivisorState = nullptr;
 	for (const auto* next = (VkBaseInStructure*)pVI->pNext; next; next = next->pNext) {
@@ -1221,7 +1221,7 @@
 	uint32_t maxBinding = 0;
     for (uint32_t i = 0; i < vbCnt; i++) {
         const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[i];
-        if (shaderContext.isVertexBufferUsed(pVKVB->binding)) {
+        if (shaderConfig.isVertexBufferUsed(pVKVB->binding)) {
 
 			// Vulkan allows any stride, but Metal only allows multiples of 4.
             // TODO: We could try to expand the buffer to the required alignment in that case.
@@ -1254,7 +1254,7 @@
         vbCnt = pVertexInputDivisorState->vertexBindingDivisorCount;
         for (uint32_t i = 0; i < vbCnt; i++) {
             const VkVertexInputBindingDivisorDescriptionEXT* pVKVB = &pVertexInputDivisorState->pVertexBindingDivisors[i];
-            if (shaderContext.isVertexBufferUsed(pVKVB->binding)) {
+            if (shaderConfig.isVertexBufferUsed(pVKVB->binding)) {
                 uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding);
                 if ((NSUInteger)inputDesc.layouts[vbIdx].stepFunction == MTLStepFunctionPerInstance ||
 					(NSUInteger)inputDesc.layouts[vbIdx].stepFunction == MTLStepFunctionThreadPositionInGridY) {
@@ -1272,7 +1272,7 @@
 	uint32_t vaCnt = pVI->vertexAttributeDescriptionCount;
 	for (uint32_t i = 0; i < vaCnt; i++) {
 		const VkVertexInputAttributeDescription* pVKVA = &pVI->pVertexAttributeDescriptions[i];
-		if (shaderContext.isShaderInputLocationUsed(pVKVA->location)) {
+		if (shaderConfig.isShaderInputLocationUsed(pVKVA->location)) {
 			uint32_t vaBinding = pVKVA->binding;
 			uint32_t vaOffset = pVKVA->offset;
 
@@ -1332,7 +1332,7 @@
 	// but at an offset that is one or more strides away from the original.
 	for (uint32_t i = 0; i < vbCnt; i++) {
 		const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[i];
-		uint32_t vbVACnt = shaderContext.countShaderInputsAt(pVKVB->binding);
+		uint32_t vbVACnt = shaderConfig.countShaderInputsAt(pVKVB->binding);
 		if (vbVACnt > 0) {
 			uint32_t vbIdx = getMetalBufferIndexForVertexAttributeBinding(pVKVB->binding);
 			auto vbDesc = inputDesc.layouts[vbIdx];
@@ -1502,10 +1502,10 @@
     }
 }
 
-// Initializes the context used to prepare the MSL library used by this pipeline.
-void MVKGraphicsPipeline::initMVKShaderConverterContext(SPIRVToMSLConversionConfiguration& shaderContext,
-                                                        const VkGraphicsPipelineCreateInfo* pCreateInfo,
-                                                        const SPIRVTessReflectionData& reflectData) {
+// Initializes the shader conversion config used to prepare the MSL library used by this pipeline.
+void MVKGraphicsPipeline::initShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig,
+													 const VkGraphicsPipelineCreateInfo* pCreateInfo,
+													 const SPIRVTessReflectionData& reflectData) {
 
     VkPipelineTessellationDomainOriginStateCreateInfo* pTessDomainOriginState = nullptr;
     if (pCreateInfo->pTessellationState) {
@@ -1520,18 +1520,18 @@
         }
     }
 
-    shaderContext.options.mslOptions.msl_version = _device->_pMetalFeatures->mslVersion;
-    shaderContext.options.mslOptions.texel_buffer_texture_width = _device->_pMetalFeatures->maxTextureDimension;
-    shaderContext.options.mslOptions.r32ui_linear_texture_alignment = (uint32_t)_device->getVkFormatTexelBufferAlignment(VK_FORMAT_R32_UINT, this);
-	shaderContext.options.mslOptions.texture_buffer_native = _device->_pMetalFeatures->textureBuffers;
+    shaderConfig.options.mslOptions.msl_version = _device->_pMetalFeatures->mslVersion;
+    shaderConfig.options.mslOptions.texel_buffer_texture_width = _device->_pMetalFeatures->maxTextureDimension;
+    shaderConfig.options.mslOptions.r32ui_linear_texture_alignment = (uint32_t)_device->getVkFormatTexelBufferAlignment(VK_FORMAT_R32_UINT, this);
+	shaderConfig.options.mslOptions.texture_buffer_native = _device->_pMetalFeatures->textureBuffers;
 
 	bool useMetalArgBuff = isUsingMetalArgumentBuffers();
-	shaderContext.options.mslOptions.argument_buffers = useMetalArgBuff;
-	shaderContext.options.mslOptions.force_active_argument_buffer_resources = useMetalArgBuff;
-	shaderContext.options.mslOptions.pad_argument_buffer_resources = useMetalArgBuff;
+	shaderConfig.options.mslOptions.argument_buffers = useMetalArgBuff;
+	shaderConfig.options.mslOptions.force_active_argument_buffer_resources = useMetalArgBuff;
+	shaderConfig.options.mslOptions.pad_argument_buffer_resources = useMetalArgBuff;
 
     MVKPipelineLayout* layout = (MVKPipelineLayout*)pCreateInfo->layout;
-    layout->populateShaderConverterContext(shaderContext);
+    layout->populateShaderConversionConfig(shaderConfig);
     _swizzleBufferIndex = layout->getSwizzleBufferIndex();
     _bufferSizeBufferIndex = layout->getBufferSizeBufferIndex();
 	_dynamicOffsetBufferIndex = layout->getDynamicOffsetBufferIndex();
@@ -1546,42 +1546,42 @@
 	MVKPixelFormats* pixFmts = getPixelFormats();
     MTLPixelFormat mtlDSFormat = pixFmts->getMTLPixelFormat(mvkRenderSubpass->getDepthStencilFormat());
 
-	shaderContext.options.mslOptions.enable_frag_output_mask = 0;
+	shaderConfig.options.mslOptions.enable_frag_output_mask = 0;
 	if (pCreateInfo->pColorBlendState) {
 		for (uint32_t caIdx = 0; caIdx < pCreateInfo->pColorBlendState->attachmentCount; caIdx++) {
 			if (mvkRenderSubpass->isColorAttachmentUsed(caIdx)) {
-				mvkEnableFlags(shaderContext.options.mslOptions.enable_frag_output_mask, 1 << caIdx);
+				mvkEnableFlags(shaderConfig.options.mslOptions.enable_frag_output_mask, 1 << caIdx);
 			}
 		}
 	}
 
-	shaderContext.options.mslOptions.texture_1D_as_2D = mvkConfig().texture1DAs2D;
-    shaderContext.options.mslOptions.enable_point_size_builtin = isRenderingPoints(pCreateInfo) || reflectData.pointMode;
-	shaderContext.options.mslOptions.enable_frag_depth_builtin = pixFmts->isDepthFormat(mtlDSFormat);
-	shaderContext.options.mslOptions.enable_frag_stencil_ref_builtin = pixFmts->isStencilFormat(mtlDSFormat);
-    shaderContext.options.shouldFlipVertexY = mvkConfig().shaderConversionFlipVertexY;
-    shaderContext.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle && !getDevice()->_pMetalFeatures->nativeTextureSwizzle;
-    shaderContext.options.mslOptions.tess_domain_origin_lower_left = pTessDomainOriginState && pTessDomainOriginState->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT;
-    shaderContext.options.mslOptions.multiview = mvkRendPass->isMultiview();
-    shaderContext.options.mslOptions.multiview_layered_rendering = getPhysicalDevice()->canUseInstancingForMultiview();
-    shaderContext.options.mslOptions.view_index_from_device_index = mvkAreAllFlagsEnabled(pCreateInfo->flags, VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT);
+	shaderConfig.options.mslOptions.texture_1D_as_2D = mvkConfig().texture1DAs2D;
+    shaderConfig.options.mslOptions.enable_point_size_builtin = isRenderingPoints(pCreateInfo) || reflectData.pointMode;
+	shaderConfig.options.mslOptions.enable_frag_depth_builtin = pixFmts->isDepthFormat(mtlDSFormat);
+	shaderConfig.options.mslOptions.enable_frag_stencil_ref_builtin = pixFmts->isStencilFormat(mtlDSFormat);
+    shaderConfig.options.shouldFlipVertexY = mvkConfig().shaderConversionFlipVertexY;
+    shaderConfig.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle && !getDevice()->_pMetalFeatures->nativeTextureSwizzle;
+    shaderConfig.options.mslOptions.tess_domain_origin_lower_left = pTessDomainOriginState && pTessDomainOriginState->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT;
+    shaderConfig.options.mslOptions.multiview = mvkRendPass->isMultiview();
+    shaderConfig.options.mslOptions.multiview_layered_rendering = getPhysicalDevice()->canUseInstancingForMultiview();
+    shaderConfig.options.mslOptions.view_index_from_device_index = mvkAreAllFlagsEnabled(pCreateInfo->flags, VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT);
 #if MVK_MACOS
-    shaderContext.options.mslOptions.emulate_subgroups = !_device->_pMetalFeatures->simdPermute;
+    shaderConfig.options.mslOptions.emulate_subgroups = !_device->_pMetalFeatures->simdPermute;
 #endif
 #if MVK_IOS_OR_TVOS
-    shaderContext.options.mslOptions.emulate_subgroups = !_device->_pMetalFeatures->quadPermute;
-    shaderContext.options.mslOptions.ios_use_simdgroup_functions = !!_device->_pMetalFeatures->simdPermute;
+    shaderConfig.options.mslOptions.emulate_subgroups = !_device->_pMetalFeatures->quadPermute;
+    shaderConfig.options.mslOptions.ios_use_simdgroup_functions = !!_device->_pMetalFeatures->simdPermute;
 #endif
 
-    shaderContext.options.tessPatchKind = reflectData.patchKind;
-    shaderContext.options.numTessControlPoints = reflectData.numControlPoints;
+    shaderConfig.options.tessPatchKind = reflectData.patchKind;
+    shaderConfig.options.numTessControlPoints = reflectData.numControlPoints;
 }
 
-// Initializes the vertex attributes in a shader converter context.
-void MVKGraphicsPipeline::addVertexInputToShaderConverterContext(SPIRVToMSLConversionConfiguration& shaderContext,
+// Initializes the vertex attributes in a shader conversion configuration.
+void MVKGraphicsPipeline::addVertexInputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig,
                                                                  const VkGraphicsPipelineCreateInfo* pCreateInfo) {
-    // Set the shader context vertex attribute information
-    shaderContext.shaderInputs.clear();
+    // Set the shader conversion config vertex attribute information
+    shaderConfig.shaderInputs.clear();
     uint32_t vaCnt = pCreateInfo->pVertexInputState->vertexAttributeDescriptionCount;
     for (uint32_t vaIdx = 0; vaIdx < vaCnt; vaIdx++) {
         const VkVertexInputAttributeDescription* pVKVA = &pCreateInfo->pVertexInputState->pVertexAttributeDescriptions[vaIdx];
@@ -1625,15 +1625,15 @@
 
         }
 
-        shaderContext.shaderInputs.push_back(si);
+        shaderConfig.shaderInputs.push_back(si);
     }
 }
 
-// Initializes the shader inputs in a shader converter context from the previous stage output.
-void MVKGraphicsPipeline::addPrevStageOutputToShaderConverterContext(SPIRVToMSLConversionConfiguration& shaderContext,
+// Initializes the shader inputs in a shader conversion config from the previous stage output.
+void MVKGraphicsPipeline::addPrevStageOutputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig,
                                                                      SPIRVShaderOutputs& shaderOutputs) {
-    // Set the shader context input variable information
-    shaderContext.shaderInputs.clear();
+    // Set the shader conversion configuration input variable information
+    shaderConfig.shaderInputs.clear();
     uint32_t siCnt = (uint32_t)shaderOutputs.size();
     for (uint32_t siIdx = 0; siIdx < siCnt; siIdx++) {
 		if (!shaderOutputs[siIdx].isUsed) { continue; }
@@ -1667,7 +1667,7 @@
                 break;
         }
 
-        shaderContext.shaderInputs.push_back(si);
+        shaderConfig.shaderInputs.push_back(si);
     }
 }
 
@@ -1770,42 +1770,42 @@
     const VkPipelineShaderStageCreateInfo* pSS = &pCreateInfo->stage;
     if ( !mvkAreAllFlagsEnabled(pSS->stage, VK_SHADER_STAGE_COMPUTE_BIT) ) { return MVKMTLFunctionNull; }
 
-    SPIRVToMSLConversionConfiguration shaderContext;
-	shaderContext.options.entryPointName = pCreateInfo->stage.pName;
-	shaderContext.options.entryPointStage = spv::ExecutionModelGLCompute;
-    shaderContext.options.mslOptions.msl_version = _device->_pMetalFeatures->mslVersion;
-    shaderContext.options.mslOptions.texel_buffer_texture_width = _device->_pMetalFeatures->maxTextureDimension;
-    shaderContext.options.mslOptions.r32ui_linear_texture_alignment = (uint32_t)_device->getVkFormatTexelBufferAlignment(VK_FORMAT_R32_UINT, this);
-	shaderContext.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle && !getDevice()->_pMetalFeatures->nativeTextureSwizzle;
-	shaderContext.options.mslOptions.texture_buffer_native = _device->_pMetalFeatures->textureBuffers;
-	shaderContext.options.mslOptions.dispatch_base = _allowsDispatchBase;
-	shaderContext.options.mslOptions.texture_1D_as_2D = mvkConfig().texture1DAs2D;
-    shaderContext.options.mslOptions.fixed_subgroup_size = mvkIsAnyFlagEnabled(pSS->flags, VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) ? 0 : _device->_pMetalFeatures->maxSubgroupSize;
+    SPIRVToMSLConversionConfiguration shaderConfig;
+	shaderConfig.options.entryPointName = pCreateInfo->stage.pName;
+	shaderConfig.options.entryPointStage = spv::ExecutionModelGLCompute;
+    shaderConfig.options.mslOptions.msl_version = _device->_pMetalFeatures->mslVersion;
+    shaderConfig.options.mslOptions.texel_buffer_texture_width = _device->_pMetalFeatures->maxTextureDimension;
+    shaderConfig.options.mslOptions.r32ui_linear_texture_alignment = (uint32_t)_device->getVkFormatTexelBufferAlignment(VK_FORMAT_R32_UINT, this);
+	shaderConfig.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle && !getDevice()->_pMetalFeatures->nativeTextureSwizzle;
+	shaderConfig.options.mslOptions.texture_buffer_native = _device->_pMetalFeatures->textureBuffers;
+	shaderConfig.options.mslOptions.dispatch_base = _allowsDispatchBase;
+	shaderConfig.options.mslOptions.texture_1D_as_2D = mvkConfig().texture1DAs2D;
+    shaderConfig.options.mslOptions.fixed_subgroup_size = mvkIsAnyFlagEnabled(pSS->flags, VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) ? 0 : _device->_pMetalFeatures->maxSubgroupSize;
 
 	bool useMetalArgBuff = isUsingMetalArgumentBuffers();
-	shaderContext.options.mslOptions.argument_buffers = useMetalArgBuff;
-	shaderContext.options.mslOptions.force_active_argument_buffer_resources = useMetalArgBuff;
-	shaderContext.options.mslOptions.pad_argument_buffer_resources = useMetalArgBuff;
+	shaderConfig.options.mslOptions.argument_buffers = useMetalArgBuff;
+	shaderConfig.options.mslOptions.force_active_argument_buffer_resources = useMetalArgBuff;
+	shaderConfig.options.mslOptions.pad_argument_buffer_resources = useMetalArgBuff;
 
 #if MVK_MACOS
-    shaderContext.options.mslOptions.emulate_subgroups = !_device->_pMetalFeatures->simdPermute;
+    shaderConfig.options.mslOptions.emulate_subgroups = !_device->_pMetalFeatures->simdPermute;
 #endif
 #if MVK_IOS_OR_TVOS
-    shaderContext.options.mslOptions.emulate_subgroups = !_device->_pMetalFeatures->quadPermute;
-    shaderContext.options.mslOptions.ios_use_simdgroup_functions = !!_device->_pMetalFeatures->simdPermute;
+    shaderConfig.options.mslOptions.emulate_subgroups = !_device->_pMetalFeatures->quadPermute;
+    shaderConfig.options.mslOptions.ios_use_simdgroup_functions = !!_device->_pMetalFeatures->simdPermute;
 #endif
 
     MVKPipelineLayout* layout = (MVKPipelineLayout*)pCreateInfo->layout;
-    layout->populateShaderConverterContext(shaderContext);
+    layout->populateShaderConversionConfig(shaderConfig);
     _swizzleBufferIndex = layout->getSwizzleBufferIndex();
     _bufferSizeBufferIndex = layout->getBufferSizeBufferIndex();
 	_dynamicOffsetBufferIndex = layout->getDynamicOffsetBufferIndex();
-    shaderContext.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageCompute];
-    shaderContext.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageCompute];
-	shaderContext.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageCompute];
-    shaderContext.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageCompute];
+    shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageCompute];
+    shaderConfig.options.mslOptions.buffer_size_buffer_index = _bufferSizeBufferIndex.stages[kMVKShaderStageCompute];
+	shaderConfig.options.mslOptions.dynamic_offsets_buffer_index = _dynamicOffsetBufferIndex.stages[kMVKShaderStageCompute];
+    shaderConfig.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageCompute];
 
-    MVKMTLFunction func = ((MVKShaderModule*)pSS->module)->getMTLFunction(&shaderContext, pSS->pSpecializationInfo, _pipelineCache);
+    MVKMTLFunction func = ((MVKShaderModule*)pSS->module)->getMTLFunction(&shaderConfig, pSS->pSpecializationInfo, _pipelineCache);
 
 	auto& funcRslts = func.shaderConversionResults;
 	_needsSwizzleBuffer = funcRslts.needsSwizzleBuffer;
@@ -1813,7 +1813,7 @@
 	_needsDynamicOffsetBuffer = funcRslts.needsDynamicOffsetBuffer;
     _needsDispatchBaseBuffer = funcRslts.needsDispatchBaseBuffer;
 
-	addMTLArgumentEncoders(func, pCreateInfo, shaderContext, kMVKShaderStageCompute);
+	addMTLArgumentEncoders(func, pCreateInfo, shaderConfig, kMVKShaderStageCompute);
 
 	return func;
 }
@@ -1828,7 +1828,7 @@
 #pragma mark -
 #pragma mark MVKPipelineCache
 
-// Return a shader library from the specified shader context sourced from the specified shader module.
+// Return a shader library from the specified shader conversion configuration sourced from the specified shader module.
 MVKShaderLibrary* MVKPipelineCache::getShaderLibrary(SPIRVToMSLConversionConfiguration* pContext, MVKShaderModule* shaderModule) {
 	lock_guard<mutex> lock(_shaderCacheLock);
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.h b/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.h
index 1fa007e..e20aea6 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.h
@@ -128,13 +128,13 @@
 	MVKVulkanAPIObject* getVulkanAPIObject() override { return _owner->getVulkanAPIObject(); };
 
 	/**
-	 * Returns a shader library from the specified shader context sourced from the specified shader module,
+	 * Returns a shader library from the shader conversion configuration sourced from the shader module,
 	 * lazily creating the shader library from source code in the shader module, if needed.
 	 *
 	 * If pWasAdded is not nil, this function will set it to true if a new shader library was created,
 	 * and to false if an existing shader library was found and returned.
 	 */
-	MVKShaderLibrary* getShaderLibrary(SPIRVToMSLConversionConfiguration* pContext,
+	MVKShaderLibrary* getShaderLibrary(SPIRVToMSLConversionConfiguration* pShaderConfig,
 									   MVKShaderModule* shaderModule,
 									   bool* pWasAdded = nullptr);
 
@@ -147,8 +147,8 @@
 	friend MVKPipelineCache;
 	friend MVKShaderModule;
 
-	MVKShaderLibrary* findShaderLibrary(SPIRVToMSLConversionConfiguration* pContext);
-	MVKShaderLibrary* addShaderLibrary(SPIRVToMSLConversionConfiguration* pContext,
+	MVKShaderLibrary* findShaderLibrary(SPIRVToMSLConversionConfiguration* pShaderConfig);
+	MVKShaderLibrary* addShaderLibrary(SPIRVToMSLConversionConfiguration* pShaderConfig,
 									   const std::string& mslSourceCode,
 									   const SPIRVToMSLConversionResults& shaderConversionResults);
 	void merge(MVKShaderLibraryCache* other);
@@ -195,12 +195,12 @@
 	VkDebugReportObjectTypeEXT getVkDebugReportObjectType() override { return VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT; }
 
 	/** Returns the Metal shader function, possibly specialized. */
-	MVKMTLFunction getMTLFunction(SPIRVToMSLConversionConfiguration* pContext,
+	MVKMTLFunction getMTLFunction(SPIRVToMSLConversionConfiguration* pShaderConfig,
 								  const VkSpecializationInfo* pSpecializationInfo,
 								  MVKPipelineCache* pipelineCache);
 
-	/** Convert the SPIR-V to MSL, using the specified shader conversion context. */
-	bool convert(SPIRVToMSLConversionConfiguration* pContext);
+	/** Convert the SPIR-V to MSL, using the specified shader conversion configuration. */
+	bool convert(SPIRVToMSLConversionConfiguration* pShaderConfig);
 
 	/** Returns the original SPIR-V code that was specified when this object was created. */
 	const std::vector<uint32_t>& getSPIRV() { return _spvConverter.getSPIRV(); }
@@ -228,7 +228,7 @@
 	friend MVKShaderCacheIterator;
 
 	void propagateDebugName() override {}
-	MVKGLSLConversionShaderStage getMVKGLSLConversionShaderStage(SPIRVToMSLConversionConfiguration* pContext);
+	MVKGLSLConversionShaderStage getMVKGLSLConversionShaderStage(SPIRVToMSLConversionConfiguration* pShaderConfig);
 
 	MVKShaderLibraryCache _shaderLibraryCache;
 	SPIRVToMSLConverter _spvConverter;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.mm b/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.mm
index 732e4f3..7da3dca 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.mm
@@ -211,14 +211,14 @@
 #pragma mark -
 #pragma mark MVKShaderLibraryCache
 
-MVKShaderLibrary* MVKShaderLibraryCache::getShaderLibrary(SPIRVToMSLConversionConfiguration* pContext,
+MVKShaderLibrary* MVKShaderLibraryCache::getShaderLibrary(SPIRVToMSLConversionConfiguration* pShaderConfig,
 														  MVKShaderModule* shaderModule,
 														  bool* pWasAdded) {
 	bool wasAdded = false;
-	MVKShaderLibrary* shLib = findShaderLibrary(pContext);
+	MVKShaderLibrary* shLib = findShaderLibrary(pShaderConfig);
 	if ( !shLib ) {
-		if (shaderModule->convert(pContext)) {
-			shLib = addShaderLibrary(pContext, shaderModule->getMSL(), shaderModule->getConversionResults());
+		if (shaderModule->convert(pShaderConfig)) {
+			shLib = addShaderLibrary(pShaderConfig, shaderModule->getMSL(), shaderModule->getConversionResults());
 			wasAdded = true;
 		}
 	}
@@ -228,24 +228,24 @@
 	return shLib;
 }
 
-// Finds and returns a shader library matching the specified context, or returns nullptr if it doesn't exist.
-// If a match is found, the specified context is aligned with the context of the matching library.
-MVKShaderLibrary* MVKShaderLibraryCache::findShaderLibrary(SPIRVToMSLConversionConfiguration* pContext) {
+// Finds and returns a shader library matching the shader config, or returns nullptr if it doesn't exist.
+// If a match is found, the shader config is aligned with the shader config of the matching library.
+MVKShaderLibrary* MVKShaderLibraryCache::findShaderLibrary(SPIRVToMSLConversionConfiguration* pShaderConfig) {
 	for (auto& slPair : _shaderLibraries) {
-		if (slPair.first.matches(*pContext)) {
-			pContext->alignWith(slPair.first);
+		if (slPair.first.matches(*pShaderConfig)) {
+			pShaderConfig->alignWith(slPair.first);
 			return slPair.second;
 		}
 	}
 	return nullptr;
 }
 
-// Adds and returns a new shader library configured from the specified context.
-MVKShaderLibrary* MVKShaderLibraryCache::addShaderLibrary(SPIRVToMSLConversionConfiguration* pContext,
+// Adds and returns a new shader library configured from the specified conversion configuration.
+MVKShaderLibrary* MVKShaderLibraryCache::addShaderLibrary(SPIRVToMSLConversionConfiguration* pShaderConfig,
 														  const string& mslSourceCode,
 														  const SPIRVToMSLConversionResults& shaderConversionResults) {
 	MVKShaderLibrary* shLib = new MVKShaderLibrary(_owner, mslSourceCode, shaderConversionResults);
-	_shaderLibraries.emplace_back(*pContext, shLib);
+	_shaderLibraries.emplace_back(*pShaderConfig, shLib);
 	return shLib;
 }
 
@@ -268,7 +268,7 @@
 #pragma mark -
 #pragma mark MVKShaderModule
 
-MVKMTLFunction MVKShaderModule::getMTLFunction(SPIRVToMSLConversionConfiguration* pContext,
+MVKMTLFunction MVKShaderModule::getMTLFunction(SPIRVToMSLConversionConfiguration* pShaderConfig,
 											   const VkSpecializationInfo* pSpecializationInfo,
 											   MVKPipelineCache* pipelineCache) {
 	lock_guard<mutex> lock(_accessLock);
@@ -277,20 +277,20 @@
 	if ( !mvkLib ) {
 		uint64_t startTime = _device->getPerformanceTimestamp();
 		if (pipelineCache) {
-			mvkLib = pipelineCache->getShaderLibrary(pContext, this);
+			mvkLib = pipelineCache->getShaderLibrary(pShaderConfig, this);
 		} else {
-			mvkLib = _shaderLibraryCache.getShaderLibrary(pContext, this);
+			mvkLib = _shaderLibraryCache.getShaderLibrary(pShaderConfig, this);
 		}
 		_device->addActivityPerformance(_device->_performanceStatistics.shaderCompilation.shaderLibraryFromCache, startTime);
 	} else {
-		mvkLib->setEntryPointName(pContext->options.entryPointName);
-		pContext->markAllInputsAndResourcesUsed();
+		mvkLib->setEntryPointName(pShaderConfig->options.entryPointName);
+		pShaderConfig->markAllInputsAndResourcesUsed();
 	}
 
 	return mvkLib ? mvkLib->getMTLFunction(pSpecializationInfo, this) : MVKMTLFunctionNull;
 }
 
-bool MVKShaderModule::convert(SPIRVToMSLConversionConfiguration* pContext) {
+bool MVKShaderModule::convert(SPIRVToMSLConversionConfiguration* pShaderConfig) {
 	bool shouldLogCode = mvkConfig().debugMode;
 	bool shouldLogEstimatedGLSL = shouldLogCode;
 
@@ -299,7 +299,7 @@
 	if ( !_spvConverter.hasSPIRV() && _glslConverter.hasGLSL() ) {
 
 		uint64_t startTime = _device->getPerformanceTimestamp();
-		bool wasConverted = _glslConverter.convert(getMVKGLSLConversionShaderStage(pContext), shouldLogCode, false);
+		bool wasConverted = _glslConverter.convert(getMVKGLSLConversionShaderStage(pShaderConfig), shouldLogCode, false);
 		_device->addActivityPerformance(_device->_performanceStatistics.shaderCompilation.glslToSPRIV, startTime);
 
 		if (wasConverted) {
@@ -312,7 +312,7 @@
 	}
 
 	uint64_t startTime = _device->getPerformanceTimestamp();
-	bool wasConverted = _spvConverter.convert(*pContext, shouldLogCode, shouldLogCode, shouldLogEstimatedGLSL);
+	bool wasConverted = _spvConverter.convert(*pShaderConfig, shouldLogCode, shouldLogCode, shouldLogEstimatedGLSL);
 	_device->addActivityPerformance(_device->_performanceStatistics.shaderCompilation.spirvToMSL, startTime);
 
 	if (wasConverted) {
@@ -323,9 +323,9 @@
 	return wasConverted;
 }
 
-// Returns the MVKGLSLConversionShaderStage corresponding to the shader stage in the SPIR-V converter context.
-MVKGLSLConversionShaderStage MVKShaderModule::getMVKGLSLConversionShaderStage(SPIRVToMSLConversionConfiguration* pContext) {
-	switch (pContext->options.entryPointStage) {
+// Returns the MVKGLSLConversionShaderStage corresponding to the shader stage in the SPIR-V conversion configuration.
+MVKGLSLConversionShaderStage MVKShaderModule::getMVKGLSLConversionShaderStage(SPIRVToMSLConversionConfiguration* pShaderConfig) {
+	switch (pShaderConfig->options.entryPointStage) {
 		case spv::ExecutionModelVertex:						return kMVKGLSLConversionShaderStageVertex;
 		case spv::ExecutionModelTessellationControl:		return kMVKGLSLConversionShaderStageTessControl;
 		case spv::ExecutionModelTessellationEvaluation:		return kMVKGLSLConversionShaderStageTessEval;
diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.cpp b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.cpp
index 7142113..17c7939 100644
--- a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.cpp
+++ b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.cpp
@@ -47,18 +47,16 @@
 }
 
 MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionOptions::matches(const SPIRVToMSLConversionOptions& other) const {
+	if (memcmp(&mslOptions, &other.mslOptions, sizeof(mslOptions)) != 0) { return false; }
 	if (entryPointStage != other.entryPointStage) { return false; }
 	if (entryPointName != other.entryPointName) { return false; }
 	if (tessPatchKind != other.tessPatchKind) { return false; }
 	if (numTessControlPoints != other.numTessControlPoints) { return false; }
 	if (shouldFlipVertexY != other.shouldFlipVertexY) { return false; }
-
-	if (memcmp(&mslOptions, &other.mslOptions, sizeof(mslOptions)) != 0) { return false; }
-
 	return true;
 }
 
-MVK_PUBLIC_SYMBOL std::string SPIRVToMSLConversionOptions::printMSLVersion(uint32_t mslVersion, bool includePatch) {
+MVK_PUBLIC_SYMBOL string SPIRVToMSLConversionOptions::printMSLVersion(uint32_t mslVersion, bool includePatch) {
 	string verStr;
 
 	uint32_t major = mslVersion / 10000;
@@ -97,60 +95,37 @@
 }
 
 MVK_PUBLIC_SYMBOL bool mvk::MSLShaderInput::matches(const mvk::MSLShaderInput& other) const {
-	if (shaderInput.location != other.shaderInput.location) { return false; }
-	if (shaderInput.format != other.shaderInput.format) { return false; }
-	if (shaderInput.builtin != other.shaderInput.builtin) { return false; }
-	if (shaderInput.vecsize != other.shaderInput.vecsize) { return false; }
+	if (memcmp(&shaderInput, &other.shaderInput, sizeof(shaderInput)) != 0) { return false; }
 	if (binding != other.binding) { return false; }
 	return true;
 }
 
+MVK_PUBLIC_SYMBOL mvk::MSLShaderInput::MSLShaderInput() {
+	// Explicitly set shaderInput to defaults over cleared memory to ensure all instances
+	// have exactly the same memory layout when using memory comparison in matches().
+	memset(&shaderInput, 0, sizeof(shaderInput));
+	shaderInput = SPIRV_CROSS_NAMESPACE::MSLShaderInput();
+}
+
+// If requiresConstExprSampler is false, constExprSampler can be ignored
 MVK_PUBLIC_SYMBOL bool mvk::MSLResourceBinding::matches(const MSLResourceBinding& other) const {
-	if (resourceBinding.stage != other.resourceBinding.stage) { return false; }
-	if (resourceBinding.basetype != other.resourceBinding.basetype) { return false; }
-	if (resourceBinding.desc_set != other.resourceBinding.desc_set) { return false; }
-	if (resourceBinding.binding != other.resourceBinding.binding) { return false; }
-	if (resourceBinding.count != other.resourceBinding.count) { return false; }
-	if (resourceBinding.msl_buffer != other.resourceBinding.msl_buffer) { return false; }
-	if (resourceBinding.msl_texture != other.resourceBinding.msl_texture) { return false; }
-	if (resourceBinding.msl_sampler != other.resourceBinding.msl_sampler) { return false; }
+	if (memcmp(&resourceBinding, &other.resourceBinding, sizeof(resourceBinding)) != 0) { return false; }
 	if (requiresConstExprSampler != other.requiresConstExprSampler) { return false; }
-
-	// If requiresConstExprSampler is false, constExprSampler can be ignored
 	if (requiresConstExprSampler) {
-		if (constExprSampler.coord != other.constExprSampler.coord) { return false; }
-		if (constExprSampler.min_filter != other.constExprSampler.min_filter) { return false; }
-		if (constExprSampler.mag_filter != other.constExprSampler.mag_filter) { return false; }
-		if (constExprSampler.mip_filter != other.constExprSampler.mip_filter) { return false; }
-		if (constExprSampler.s_address != other.constExprSampler.s_address) { return false; }
-		if (constExprSampler.t_address != other.constExprSampler.t_address) { return false; }
-		if (constExprSampler.r_address != other.constExprSampler.r_address) { return false; }
-		if (constExprSampler.compare_func != other.constExprSampler.compare_func) { return false; }
-		if (constExprSampler.border_color != other.constExprSampler.border_color) { return false; }
-		if (constExprSampler.lod_clamp_min != other.constExprSampler.lod_clamp_min) { return false; }
-		if (constExprSampler.lod_clamp_max != other.constExprSampler.lod_clamp_max) { return false; }
-		if (constExprSampler.max_anisotropy != other.constExprSampler.max_anisotropy) { return false; }
-
-		if (constExprSampler.planes != other.constExprSampler.planes) { return false; }
-		if (constExprSampler.resolution != other.constExprSampler.resolution) { return false; }
-		if (constExprSampler.chroma_filter != other.constExprSampler.chroma_filter) { return false; }
-		if (constExprSampler.x_chroma_offset != other.constExprSampler.x_chroma_offset) { return false; }
-		if (constExprSampler.y_chroma_offset != other.constExprSampler.y_chroma_offset) { return false; }
-		for(uint32_t i = 0; i < 4; ++i)
-			if (constExprSampler.swizzle[i] != other.constExprSampler.swizzle[i]) { return false; }
-		if (constExprSampler.ycbcr_model != other.constExprSampler.ycbcr_model) { return false; }
-		if (constExprSampler.ycbcr_range != other.constExprSampler.ycbcr_range) { return false; }
-		if (constExprSampler.bpc != other.constExprSampler.bpc) { return false; }
-
-		if (constExprSampler.compare_enable != other.constExprSampler.compare_enable) { return false; }
-		if (constExprSampler.lod_clamp_enable != other.constExprSampler.lod_clamp_enable) { return false; }
-		if (constExprSampler.anisotropy_enable != other.constExprSampler.anisotropy_enable) { return false; }
-		if (constExprSampler.ycbcr_conversion_enable != other.constExprSampler.ycbcr_conversion_enable) { return false; }
+		if (memcmp(&constExprSampler, &other.constExprSampler, sizeof(constExprSampler)) != 0) { return false; }
 	}
-
 	return true;
 }
 
+MVK_PUBLIC_SYMBOL mvk::MSLResourceBinding::MSLResourceBinding() {
+	// Explicitly set resourceBinding and constExprSampler to defaults over cleared memory to ensure
+	// all instances have exactly the same memory layout when using memory comparison in matches().
+	memset(&resourceBinding, 0, sizeof(resourceBinding));
+	resourceBinding = SPIRV_CROSS_NAMESPACE::MSLResourceBinding();
+	memset(&constExprSampler, 0, sizeof(constExprSampler));
+	constExprSampler = SPIRV_CROSS_NAMESPACE::MSLConstexprSampler();
+}
+
 MVK_PUBLIC_SYMBOL bool mvk::DescriptorBinding::matches(const mvk::DescriptorBinding& other) const {
 	if (stage != other.stage) { return false; }
 	if (descriptorSet != other.descriptorSet) { return false; }
@@ -196,6 +171,11 @@
 	for (auto& rb : resourceBindings) { rb.outIsUsedByShader = true; }
 }
 
+// A single SPIRVToMSLConversionConfiguration instance is used for all pipeline shader stages,
+// and the resources can be spread across these shader stages. To improve cache hits when using
+// this function to find a cached shader for a particular shader stage, only consider the resources
+// that are used in that shader stage. By contrast, discreteDescriptorSet apply across all stages,
+// and shaderInputs are populated before each stage, so neither needs to be filtered by stage here.
 MVK_PUBLIC_SYMBOL bool SPIRVToMSLConversionConfiguration::matches(const SPIRVToMSLConversionConfiguration& other) const {
 
     if ( !options.matches(other.options) ) { return false; }
@@ -205,17 +185,20 @@
 	}
 
     for (const auto& rb : resourceBindings) {
-        if (rb.outIsUsedByShader && !containsMatching(other.resourceBindings, rb)) { return false; }
+        if (rb.resourceBinding.stage == options.entryPointStage &&
+			rb.outIsUsedByShader &&
+			!containsMatching(other.resourceBindings, rb)) { return false; }
     }
 
+	for (const auto& db : dynamicBufferDescriptors) {
+		if (db.stage == options.entryPointStage &&
+			!containsMatching(other.dynamicBufferDescriptors, db)) { return false; }
+	}
+
 	for (uint32_t dsIdx : discreteDescriptorSets) {
 		if ( !contains(other.discreteDescriptorSets, dsIdx)) { return false; }
 	}
 
-	for (const auto& db : dynamicBufferDescriptors) {
-		if ( !containsMatching(other.dynamicBufferDescriptors, db)) { return false; }
-	}
-
     return true;
 }
 
@@ -251,7 +234,7 @@
 	}
 }
 
-MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfiguration& context,
+MVK_PUBLIC_SYMBOL bool SPIRVToMSLConverter::convert(SPIRVToMSLConversionConfiguration& shaderConfig,
 													bool shouldLogSPIRV,
 													bool shouldLogMSL,
                                                     bool shouldLogGLSL) {
@@ -275,36 +258,36 @@
 #endif
 		pMSLCompiler = new CompilerMSL(_spirv);
 
-		if (context.options.hasEntryPoint()) {
-			pMSLCompiler->set_entry_point(context.options.entryPointName, context.options.entryPointStage);
+		if (shaderConfig.options.hasEntryPoint()) {
+			pMSLCompiler->set_entry_point(shaderConfig.options.entryPointName, shaderConfig.options.entryPointStage);
 		}
 
 		// Set up tessellation parameters if needed.
-		if (context.options.entryPointStage == ExecutionModelTessellationControl ||
-			context.options.entryPointStage == ExecutionModelTessellationEvaluation) {
-			if (context.options.tessPatchKind != ExecutionModeMax) {
-				pMSLCompiler->set_execution_mode(context.options.tessPatchKind);
+		if (shaderConfig.options.entryPointStage == ExecutionModelTessellationControl ||
+			shaderConfig.options.entryPointStage == ExecutionModelTessellationEvaluation) {
+			if (shaderConfig.options.tessPatchKind != ExecutionModeMax) {
+				pMSLCompiler->set_execution_mode(shaderConfig.options.tessPatchKind);
 			}
-			if (context.options.numTessControlPoints != 0) {
-				pMSLCompiler->set_execution_mode(ExecutionModeOutputVertices, context.options.numTessControlPoints);
+			if (shaderConfig.options.numTessControlPoints != 0) {
+				pMSLCompiler->set_execution_mode(ExecutionModeOutputVertices, shaderConfig.options.numTessControlPoints);
 			}
 		}
 
 		// Establish the MSL options for the compiler
 		// This needs to be done in two steps...for CompilerMSL and its superclass.
-		pMSLCompiler->set_msl_options(context.options.mslOptions);
+		pMSLCompiler->set_msl_options(shaderConfig.options.mslOptions);
 
 		auto scOpts = pMSLCompiler->get_common_options();
-		scOpts.vertex.flip_vert_y = context.options.shouldFlipVertexY;
+		scOpts.vertex.flip_vert_y = shaderConfig.options.shouldFlipVertexY;
 		pMSLCompiler->set_common_options(scOpts);
 
 		// Add shader inputs
-		for (auto& si : context.shaderInputs) {
+		for (auto& si : shaderConfig.shaderInputs) {
 			pMSLCompiler->add_msl_shader_input(si.shaderInput);
 		}
 
 		// Add resource bindings and hardcoded constexpr samplers
-		for (auto& rb : context.resourceBindings) {
+		for (auto& rb : shaderConfig.resourceBindings) {
 			auto& rbb = rb.resourceBinding;
 			pMSLCompiler->add_msl_resource_binding(rbb);
 
@@ -315,15 +298,15 @@
 
 		// Add any descriptor sets that are not using Metal argument buffers.
 		// This only has an effect if SPIRVToMSLConversionConfiguration::options::mslOptions::argument_buffers is enabled.
-		for (uint32_t dsIdx : context.discreteDescriptorSets) {
+		for (uint32_t dsIdx : shaderConfig.discreteDescriptorSets) {
 			pMSLCompiler->add_discrete_descriptor_set(dsIdx);
 		}
 
 		// Add any dynamic buffer bindings.
 		// This only has an applies if SPIRVToMSLConversionConfiguration::options::mslOptions::argument_buffers is enabled.
-		if (context.options.mslOptions.argument_buffers) {
-			for (auto& db : context.dynamicBufferDescriptors) {
-				if (db.stage == context.options.entryPointStage) {
+		if (shaderConfig.options.mslOptions.argument_buffers) {
+			for (auto& db : shaderConfig.dynamicBufferDescriptors) {
+				if (db.stage == shaderConfig.options.entryPointStage) {
 					pMSLCompiler->add_dynamic_buffer(db.descriptorSet, db.binding, db.index);
 				}
 			}
@@ -346,7 +329,7 @@
 
 	// Populate the shader conversion results with info from the compilation run,
 	// and mark which vertex attributes and resource bindings are used by the shader
-	populateEntryPoint(pMSLCompiler, context.options);
+	populateEntryPoint(pMSLCompiler, shaderConfig.options);
 	_shaderConversionResults.isRasterizationDisabled = pMSLCompiler && pMSLCompiler->get_is_rasterization_disabled();
 	_shaderConversionResults.isPositionInvariant = pMSLCompiler && pMSLCompiler->is_position_invariant();
 	_shaderConversionResults.needsSwizzleBuffer = pMSLCompiler && pMSLCompiler->needs_swizzle_buffer();
@@ -360,19 +343,19 @@
 	// When using Metal argument buffers, if the shader is provided with dynamic buffer offsets,
 	// then it needs a buffer to hold these dynamic offsets.
 	_shaderConversionResults.needsDynamicOffsetBuffer = false;
-	if (context.options.mslOptions.argument_buffers) {
-		for (auto& db : context.dynamicBufferDescriptors) {
-			if (db.stage == context.options.entryPointStage) {
+	if (shaderConfig.options.mslOptions.argument_buffers) {
+		for (auto& db : shaderConfig.dynamicBufferDescriptors) {
+			if (db.stage == shaderConfig.options.entryPointStage) {
 				_shaderConversionResults.needsDynamicOffsetBuffer = true;
 			}
 		}
 	}
 
-	for (auto& ctxSI : context.shaderInputs) {
+	for (auto& ctxSI : shaderConfig.shaderInputs) {
 		ctxSI.outIsUsedByShader = pMSLCompiler->is_msl_shader_input_used(ctxSI.shaderInput.location);
 	}
-	for (auto& ctxRB : context.resourceBindings) {
-		if (ctxRB.resourceBinding.stage == context.options.entryPointStage) {
+	for (auto& ctxRB : shaderConfig.resourceBindings) {
+		if (ctxRB.resourceBinding.stage == shaderConfig.options.entryPointStage) {
 			ctxRB.outIsUsedByShader = pMSLCompiler->is_msl_resource_binding_used(ctxRB.resourceBinding.stage,
 																				 ctxRB.resourceBinding.desc_set,
 																				 ctxRB.resourceBinding.binding);
diff --git a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h
index 18f260f..688acd0 100644
--- a/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h
+++ b/MoltenVKShaderConverter/MoltenVKShaderConverter/SPIRVToMSLConverter.h
@@ -83,6 +83,8 @@
 		 */
 		bool matches(const MSLShaderInput& other) const;
 
+		MSLShaderInput();
+
 	} MSLShaderInput;
 
 	/**
@@ -116,6 +118,8 @@
 		 */
 		bool matches(const MSLResourceBinding& other) const;
 
+		MSLResourceBinding();
+
 	} MSLResourceBinding;
 
 	/**
@@ -167,15 +171,15 @@
 		void markAllInputsAndResourcesUsed();
 
         /**
-         * Returns whether this configuration matches the other context. It does if the
-		 * respective options match and any vertex attributes and resource bindings used
+         * Returns whether this configuration matches the other configuration. It does if
+		 * the respective options match and any vertex attributes and resource bindings used
 		 * by this configuration can be found in the other configuration. Vertex attributes
 		 * and resource bindings that are in the other configuration but are not used by
 		 * the shader that created this configuration, are ignored.
          */
         bool matches(const SPIRVToMSLConversionConfiguration& other) const;
 
-        /** Aligns certain aspects of this configuration with the source context. */
+        /** Aligns certain aspects of this configuration with the source configuration. */
         void alignWith(const SPIRVToMSLConversionConfiguration& srcContext);
 
 	} SPIRVToMSLConversionConfiguration;
@@ -270,7 +274,7 @@
          * and optionally, the original GLSL (as converted from the SPIR_V), should be logged 
          * to the result log of this converter. This can be useful during shader debugging.
 		 */
-		bool convert(SPIRVToMSLConversionConfiguration& context,
+		bool convert(SPIRVToMSLConversionConfiguration& shaderConfig,
                      bool shouldLogSPIRV = false,
                      bool shouldLogMSL = false,
                      bool shouldLogGLSL = false);