Merge pull request #1348 from billhollings/maintainability-changes

Miscellaneous non-functional maintainability updates.
diff --git a/Docs/Whats_New.md b/Docs/Whats_New.md
index 1296ce6..73f3dc1 100644
--- a/Docs/Whats_New.md
+++ b/Docs/Whats_New.md
@@ -13,6 +13,20 @@
 
 
 
+MoltenVK 1.1.4
+--------------
+
+Released TBD
+
+- Make `vkGetPastPresentationTimingGOOGLE()` queuing behavior compliant with Vulkan spec.
+- Expose `vkGetIOSurfaceMVK()` and `vkUseIOSurfaceMVK()` without requiring _Objective-C_.
+- Support _Xcode 12.5_ build settings, build warnings, and SDK change to availability of
+  `[MTLDevice supportsBCTextureCompression]` on _Mac Catalyst_.
+- Rename `kMVKShaderStageMax` to `kMVKShaderStageCount`.
+- Fix internal reference from `SPIRV_CROSS_NAMESPACE_OVERRIDE` to `SPIRV_CROSS_NAMESPACE`.
+
+
+
 MoltenVK 1.1.3
 --------------
 
diff --git a/MoltenVK/MoltenVK/API/mvk_datatypes.h b/MoltenVK/MoltenVK/API/mvk_datatypes.h
index 2ac2f49..08b42b4 100644
--- a/MoltenVK/MoltenVK/API/mvk_datatypes.h
+++ b/MoltenVK/MoltenVK/API/mvk_datatypes.h
@@ -323,7 +323,8 @@
 	kMVKShaderStageTessEval,
 	kMVKShaderStageFragment,
 	kMVKShaderStageCompute,
-	kMVKShaderStageMax
+	kMVKShaderStageCount,
+	kMVKShaderStageMax = kMVKShaderStageCount	// Pubic API legacy value
 } MVKShaderStage;
 
 /** Returns the Metal MTLColorWriteMask corresponding to the specified Vulkan VkColorComponentFlags. */
diff --git a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
index d99d9c6..a78e93c 100644
--- a/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
+++ b/MoltenVK/MoltenVK/API/vk_mvk_moltenvk.h
@@ -50,7 +50,7 @@
  */
 #define MVK_VERSION_MAJOR   1
 #define MVK_VERSION_MINOR   1
-#define MVK_VERSION_PATCH   3
+#define MVK_VERSION_PATCH   4
 
 #define MVK_MAKE_VERSION(major, minor, patch)    (((major) * 10000) + ((minor) * 100) + (patch))
 #define MVK_VERSION     MVK_MAKE_VERSION(MVK_VERSION_MAJOR, MVK_VERSION_MINOR, MVK_VERSION_PATCH)
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
index f862b61..2254d59 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.h
@@ -49,7 +49,7 @@
 
 /** Indicates the Metal resource indexes used by each shader stage in a descriptor. */
 typedef struct MVKShaderResourceBinding {
-	MVKShaderStageResourceBinding stages[kMVKShaderStageMax];
+	MVKShaderStageResourceBinding stages[kMVKShaderStageCount];
 
 	uint16_t getMaxResourceIndex();
 	uint16_t getMaxBufferIndex();
@@ -183,7 +183,7 @@
 	MVKSmallVector<MVKSampler*> _immutableSamplers;
 	MVKShaderResourceBinding _mtlResourceIndexOffsets;
 	uint32_t _descriptorIndex;
-	bool _applyToStage[kMVKShaderStageMax];
+	bool _applyToStage[kMVKShaderStageCount];
 };
 
 
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.mm
index 8a199c5..7794704 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptor.mm
@@ -69,27 +69,27 @@
 
 MVKShaderResourceBinding MVKShaderResourceBinding::operator+ (const MVKShaderResourceBinding& rhs) {
 	MVKShaderResourceBinding rslt;
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		rslt.stages[i] = this->stages[i] + rhs.stages[i];
 	}
 	return rslt;
 }
 
 MVKShaderResourceBinding& MVKShaderResourceBinding::operator+= (const MVKShaderResourceBinding& rhs) {
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		this->stages[i] += rhs.stages[i];
 	}
 	return *this;
 }
 
 void MVKShaderResourceBinding::clearArgumentBufferResources() {
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		stages[i].clearArgumentBufferResources();
 	}
 }
 
 void MVKShaderResourceBinding::addArgumentBuffers(uint32_t count) {
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		stages[i].bufferIndex += count;
 		stages[i].resourceIndex += count;
 	}
@@ -272,7 +272,7 @@
                 else
                     bb.size = (uint32_t)bufferInfo.range;
 
-                for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+                for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
                     if (_applyToStage[i]) {
                         bb.index = mtlIdxs.stages[i].bufferIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
@@ -290,7 +290,7 @@
                 bb.mtlBytes = inlineUniformBlock.pData;
                 bb.size = inlineUniformBlock.dataSize;
                 bb.isInline = true;
-                for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+                for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
                     if (_applyToStage[i]) {
                         bb.index = mtlIdxs.stages[i].bufferIndex;
                         if (i == kMVKShaderStageCompute) {
@@ -319,7 +319,7 @@
                         bb.offset = mtlTex.bufferOffset;
                         bb.size = (uint32_t)(mtlTex.height * mtlTex.bufferBytesPerRow);
                     }
-                    for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+                    for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
                         if (_applyToStage[i]) {
                             tb.index = mtlIdxs.stages[i].textureIndex + rezIdx + planeIndex;
                             if (i == kMVKShaderStageCompute) {
@@ -352,7 +352,7 @@
                     bb.offset = mtlTex.bufferOffset;
                     bb.size = (uint32_t)(mtlTex.height * mtlTex.bufferBytesPerRow);
                 }
-                for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+                for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
                     if (_applyToStage[i]) {
                         tb.index = mtlIdxs.stages[i].textureIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
@@ -382,7 +382,7 @@
                     sampler = _immutableSamplers[rezIdx];
 				}
                 sb.mtlSamplerState = sampler->getMTLSamplerState();
-                for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+                for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
                     if (_applyToStage[i]) {
                         sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
                         if (i == kMVKShaderStageCompute) {
@@ -410,7 +410,7 @@
                         sampler = _immutableSamplers[rezIdx];
                     }
                     sb.mtlSamplerState = sampler->getMTLSamplerState();
-                    for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+                    for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
                         if (_applyToStage[i]) {
                             tb.index = mtlIdxs.stages[i].textureIndex + rezIdx + planeIndex;
                             sb.index = mtlIdxs.stages[i].samplerIndex + rezIdx;
@@ -519,7 +519,7 @@
 
 	uint32_t descCnt = getDescriptorCount();
 	bool isUsingMtlArgBuff = isUsingMetalArgumentBuffer();
-	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageMax; stage++) {
+	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageCount; stage++) {
         if ((_applyToStage[stage] || isUsingMtlArgBuff) && descCnt > 0) {
             mvkPopulateShaderConverterContext(context,
                                               mtlIdxs.stages[stage],
@@ -544,7 +544,7 @@
 
 MTLRenderStages MVKDescriptorSetLayoutBinding::getMTLRenderStages() {
 	MTLRenderStages mtlStages = 0;
-	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageMax; stage++) {
+	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageCount; stage++) {
 		if (_applyToStage[stage]) {
 			switch (stage) {
 				case kMVKShaderStageVertex:
@@ -579,7 +579,7 @@
 	_info.pImmutableSamplers = nullptr;     // Remove dangling pointer
 
 	// Determine if this binding is used by this shader stage, and initialize resource indexes.
-	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageMax; stage++) {
+	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageCount; stage++) {
 		_applyToStage[stage] = mvkAreAllFlagsEnabled(pBinding->stageFlags, mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(stage)));
 		initMetalResourceIndexOffsets(pBinding, stage);
 	}
@@ -606,7 +606,7 @@
 	_immutableSamplers(binding._immutableSamplers),
 	_mtlResourceIndexOffsets(binding._mtlResourceIndexOffsets) {
 
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
         _applyToStage[i] = binding._applyToStage[i];
     }
 	for (MVKSampler* sampler : _immutableSamplers) {
@@ -763,7 +763,7 @@
 		else
 			bb.size = (uint32_t)_buffRange;
 	}
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		if (stages[i]) {
 			bb.index = mtlIndexes.stages[i].bufferIndex + elementIndex;
 			if (i == kMVKShaderStageCompute) {
@@ -853,7 +853,7 @@
 		bb.size = mvkDSLBind->_info.descriptorCount;
 	}
 
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		if (stages[i]) {
 			bb.index = mtlIndexes.stages[i].bufferIndex;
 			if (i == kMVKShaderStageCompute) {
@@ -957,7 +957,7 @@
             bb.offset = mtlTex.bufferOffset;
             bb.size = (uint32_t)(mtlTex.height * mtlTex.bufferBytesPerRow);
         }
-        for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+        for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
             if (stages[i]) {
                 tb.index = mtlIndexes.stages[i].textureIndex + elementIndex + planeIndex;
                 if (i == kMVKShaderStageCompute) {
@@ -1070,7 +1070,7 @@
 	sb.mtlSamplerState = (mvkSamp
 						  ? mvkSamp->getMTLSamplerState()
 						  : cmdEncoder->getDevice()->getDefaultMTLSamplerState());
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		if (stages[i]) {
 			sb.index = mtlIndexes.stages[i].samplerIndex + elementIndex;
 			if (i == kMVKShaderStageCompute) {
@@ -1261,7 +1261,7 @@
 			bb.size = (uint32_t)(mtlTex.height * mtlTex.bufferBytesPerRow);
 		}
 	}
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		if (stages[i]) {
 			tb.index = mtlIndexes.stages[i].textureIndex + elementIndex;
 			if (i == kMVKShaderStageCompute) {
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h
index 23e22d4..d2afd9e 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.h
@@ -327,7 +327,7 @@
 	inline bool mslVersionIsAtLeast(MTLLanguageVersion minVer) { return _metalFeatures.mslVersionEnum >= minVer; }
 
 	/** Returns whether this device is using Metal argument buffers. */
-	inline bool isUsingMetalArgumentBuffers() const  { return _metalFeatures.argumentBuffers && mvkConfig()->useMetalArgumentBuffers; };
+	inline bool isUsingMetalArgumentBuffers() const  { return _metalFeatures.argumentBuffers && mvkConfig().useMetalArgumentBuffers; };
 
 
 #pragma mark Construction
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
index 2a4fd6e..6a9317b 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
@@ -240,7 +240,7 @@
 				portabilityFeatures->events = true;
 				portabilityFeatures->imageViewFormatReinterpretation = true;
 				portabilityFeatures->imageViewFormatSwizzle = (_metalFeatures.nativeTextureSwizzle ||
-															   mvkConfig()->fullImageViewSwizzle);
+															   mvkConfig().fullImageViewSwizzle);
 				portabilityFeatures->imageView2DOn3DImage = false;
 				portabilityFeatures->multisampleArrayImage = _metalFeatures.multisampleArrayTextures;
 				portabilityFeatures->mutableComparisonSamplers = _metalFeatures.depthSampleCompare;
@@ -563,7 +563,7 @@
 		case VK_IMAGE_TYPE_1D:
 			maxExt.height = 1;
 			maxExt.depth = 1;
-			if (!mvkConfig()->texture1DAs2D) {
+			if (!mvkConfig().texture1DAs2D) {
 				maxExt.width = pLimits->maxImageDimension1D;
 				maxLevels = 1;
 				sampleCounts = VK_SAMPLE_COUNT_1_BIT;
@@ -1021,7 +1021,7 @@
 MVKArrayRef<MVKQueueFamily*> MVKPhysicalDevice::getQueueFamilies() {
 	if (_queueFamilies.empty()) {
 		VkQueueFamilyProperties qfProps;
-		bool specialize = mvkConfig()->specializedQueueFamilies;
+		bool specialize = mvkConfig().specializedQueueFamilies;
 		uint32_t qfIdx = 0;
 
 		qfProps.queueCount = kMVKQueueCountPerQueueFamily;
@@ -1150,7 +1150,7 @@
 void MVKPhysicalDevice::initProperties() {
 	mvkClear(&_properties);	// Start with everything cleared
 
-	_properties.apiVersion = mvkConfig()->apiVersionToAdvertise;
+	_properties.apiVersion = mvkConfig().apiVersionToAdvertise;
 	_properties.driverVersion = MVK_VERSION;
 
 	initGPUInfoProperties();
@@ -1231,7 +1231,7 @@
 
 	if ( mvkOSVersionIsAtLeast(13.0) ) {
 		_metalFeatures.mslVersionEnum = MTLLanguageVersion2_2;
-		_metalFeatures.placementHeaps = mvkConfig()->useMTLHeap;
+		_metalFeatures.placementHeaps = mvkConfig().useMTLHeap;
 		_metalFeatures.nativeTextureSwizzle = true;
 		if (supportsMTLGPUFamily(Apple3)) {
 			_metalFeatures.native3DCompressedTextures = true;
@@ -1323,7 +1323,7 @@
 
 	if ( mvkOSVersionIsAtLeast(13.0) ) {
 		_metalFeatures.mslVersionEnum = MTLLanguageVersion2_2;
-		_metalFeatures.placementHeaps = mvkConfig()->useMTLHeap;
+		_metalFeatures.placementHeaps = mvkConfig().useMTLHeap;
 		_metalFeatures.nativeTextureSwizzle = true;
 		if (supportsMTLGPUFamily(Apple3)) {
 			_metalFeatures.native3DCompressedTextures = true;
@@ -1421,7 +1421,7 @@
         }
 		if (supportsMTLGPUFamily(Mac2)) {
 			_metalFeatures.nativeTextureSwizzle = true;
-			_metalFeatures.placementHeaps = mvkConfig()->useMTLHeap;
+			_metalFeatures.placementHeaps = mvkConfig().useMTLHeap;
 			_metalFeatures.renderWithoutAttachments = true;
 		}
 	}
@@ -3121,7 +3121,7 @@
 	// MTLCreateSystemDefaultDevice function, and if that GPU is the same as the
 	// selected GPU, update the MTLDevice instance used by the MVKPhysicalDevice.
 	id<MTLDevice> mtlDevice = _physicalDevice->getMTLDevice();
-	if (mvkConfig()->switchSystemGPU && !(mtlDevice.isLowPower || mtlDevice.isHeadless) ) {
+	if (mvkConfig().switchSystemGPU && !(mtlDevice.isLowPower || mtlDevice.isHeadless) ) {
 		id<MTLDevice> sysMTLDevice = MTLCreateSystemDefaultDevice();
 		if (mvkGetRegistryID(sysMTLDevice) == mvkGetRegistryID(mtlDevice)) {
 			_physicalDevice->replaceMTLDevice(sysMTLDevice);
@@ -3357,7 +3357,7 @@
 
 MVKDescriptorPool* MVKDevice::createDescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo,
 												   const VkAllocationCallbacks* pAllocator) {
-	return new MVKDescriptorPool(this, pCreateInfo, mvkConfig()->preallocateDescriptors);
+	return new MVKDescriptorPool(this, pCreateInfo, mvkConfig().preallocateDescriptors);
 }
 
 void MVKDevice::destroyDescriptorPool(MVKDescriptorPool* mvkDP,
@@ -3403,7 +3403,7 @@
 
 MVKCommandPool* MVKDevice::createCommandPool(const VkCommandPoolCreateInfo* pCreateInfo,
 											const VkAllocationCallbacks* pAllocator) {
-	return new MVKCommandPool(this, pCreateInfo, mvkConfig()->useCommandPooling);
+	return new MVKCommandPool(this, pCreateInfo, mvkConfig().useCommandPooling);
 }
 
 void MVKDevice::destroyCommandPool(MVKCommandPool* mvkCmdPool,
@@ -3680,7 +3680,7 @@
 MTLCompileOptions* MVKDevice::getMTLCompileOptions(bool useFastMath, bool preserveInvariance) {
 	MTLCompileOptions* mtlCompOpt = [MTLCompileOptions new];
 	mtlCompOpt.languageVersion = _pMetalFeatures->mslVersionEnum;
-	mtlCompOpt.fastMathEnabled = useFastMath && mvkConfig()->fastMathEnabled;
+	mtlCompOpt.fastMathEnabled = useFastMath && mvkConfig().fastMathEnabled;
 #if MVK_XCODE_12
 	if ([mtlCompOpt respondsToSelector: @selector(setPreserveInvariance:)]) {
 		[mtlCompOpt setPreserveInvariance: preserveInvariance];
@@ -3691,7 +3691,7 @@
 
 // Can't use prefilled Metal command buffers if any of the resource descriptors can be updated after binding.
 bool MVKDevice::shouldPrefillMTLCommandBuffers() {
-	return (mvkConfig()->prefillMetalCommandBuffers &&
+	return (mvkConfig().prefillMetalCommandBuffers &&
 			!(_enabledDescriptorIndexingFeatures.descriptorBindingUniformBufferUpdateAfterBind ||
 			  _enabledDescriptorIndexingFeatures.descriptorBindingSampledImageUpdateAfterBind ||
 			  _enabledDescriptorIndexingFeatures.descriptorBindingStorageImageUpdateAfterBind ||
@@ -3703,7 +3703,7 @@
 
 void MVKDevice::startAutoGPUCapture(MVKConfigAutoGPUCaptureScope autoGPUCaptureScope, id mtlCaptureObject) {
 
-	if (_isCurrentlyAutoGPUCapturing || (mvkConfig()->autoGPUCaptureScope != autoGPUCaptureScope)) { return; }
+	if (_isCurrentlyAutoGPUCapturing || (mvkConfig().autoGPUCaptureScope != autoGPUCaptureScope)) { return; }
 
 	_isCurrentlyAutoGPUCapturing = true;
 
@@ -3715,7 +3715,7 @@
 		captureDesc.captureObject = mtlCaptureObject;
 		captureDesc.destination = MTLCaptureDestinationDeveloperTools;
 
-		const char* filePath = mvkConfig()->autoGPUCaptureOutputFilepath;
+		const char* filePath = mvkConfig().autoGPUCaptureOutputFilepath;
 		if (strlen(filePath)) {
 			if ([captureMgr respondsToSelector: @selector(supportsDestination:)] &&
 				[captureMgr supportsDestination: MTLCaptureDestinationGPUTraceDocument] ) {
@@ -3751,7 +3751,7 @@
 }
 
 void MVKDevice::stopAutoGPUCapture(MVKConfigAutoGPUCaptureScope autoGPUCaptureScope) {
-	if (_isCurrentlyAutoGPUCapturing && mvkConfig()->autoGPUCaptureScope == autoGPUCaptureScope) {
+	if (_isCurrentlyAutoGPUCapturing && mvkConfig().autoGPUCaptureScope == autoGPUCaptureScope) {
 		[[MTLCaptureManager sharedCaptureManager] stopCapture];
 		_isCurrentlyAutoGPUCapturing = false;
 	}
@@ -3810,8 +3810,8 @@
 
 void MVKDevice::initPerformanceTracking() {
 
-	_isPerformanceTracking = mvkConfig()->performanceTracking;
-	_logActivityPerformanceInline = mvkConfig()->logActivityPerformanceInline;
+	_isPerformanceTracking = mvkConfig().performanceTracking;
+	_logActivityPerformanceInline = mvkConfig().logActivityPerformanceInline;
 
 	MVKPerformanceTracker initPerf;
     initPerf.count = 0;
@@ -3862,8 +3862,8 @@
 	_pMemoryProperties = &_physicalDevice->_memoryProperties;
 
 	// Indicate whether semaphores should use a MTLFence or MTLEvent if they are available.
-	_useMTLFenceForSemaphores = _pMetalFeatures->fences && mvkConfig()->semaphoreUseMTLFence;
-	_useMTLEventForSemaphores = _pMetalFeatures->events && mvkConfig()->semaphoreUseMTLEvent;
+	_useMTLFenceForSemaphores = _pMetalFeatures->fences && mvkConfig().semaphoreUseMTLFence;
+	_useMTLEventForSemaphores = _pMetalFeatures->events && mvkConfig().semaphoreUseMTLEvent;
 
 	MVKLogInfo("Using %s for Vulkan semaphores.", _useMTLFenceForSemaphores ? "MTLFence" : (_useMTLEventForSemaphores ? "MTLEvent" : "emulation"));
 }
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
index 5029698..4c2af5c 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
@@ -983,7 +983,7 @@
 	if (validSamples == VK_SAMPLE_COUNT_1_BIT) { return validSamples; }
 
 	// Don't use getImageType() because it hasn't been set yet.
-	if ( !((pCreateInfo->imageType == VK_IMAGE_TYPE_2D) || ((pCreateInfo->imageType == VK_IMAGE_TYPE_1D) && mvkConfig()->texture1DAs2D)) ) {
+	if ( !((pCreateInfo->imageType == VK_IMAGE_TYPE_2D) || ((pCreateInfo->imageType == VK_IMAGE_TYPE_1D) && mvkConfig().texture1DAs2D)) ) {
 		setConfigurationResult(reportError(VK_ERROR_FEATURE_NOT_PRESENT, "vkCreateImage() : Under Metal, multisampling can only be used with a 2D image type. Setting sample count to 1."));
 		validSamples = VK_SAMPLE_COUNT_1_BIT;
 	}
@@ -1476,7 +1476,7 @@
 																				  _imageView->_usage,
 																				  _imageView,
 																				  _device->_pMetalFeatures->nativeTextureSwizzle,
-																				  mvkConfig()->fullImageViewSwizzle,
+																				  mvkConfig().fullImageViewSwizzle,
 																				  _mtlPixFmt,
 																				  useSwizzle));
     _packedSwizzle = (useSwizzle) ? mvkPackSwizzle(pCreateInfo->components) : 0;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKInstance.h b/MoltenVK/MoltenVK/GPUObjects/MVKInstance.h
index 5b7e402..78a2076 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKInstance.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKInstance.h
@@ -49,7 +49,7 @@
 	bool isEnabled(uint32_t enabledVersion, const MVKExtensionList& extList) {
 		return ((isCore() && MVK_VULKAN_API_VERSION_CONFORM(enabledVersion) >= apiVersion) ||
 				(extList.isEnabled(ext1Name) || extList.isEnabled(ext2Name) ||
-				 !mvkIsAnyFlagEnabled(mvkConfig()->advertiseExtensions,
+				 !mvkIsAnyFlagEnabled(mvkConfig().advertiseExtensions,
 									  MVK_CONFIG_ADVERTISE_EXTENSIONS_ALL)));
 	}
 } MVKEntryPoint;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm b/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm
index c206921..4bac21d 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKInstance.mm
@@ -292,7 +292,7 @@
 #if MVK_MACOS
 	NSArray* rawMTLDevs = [MTLCopyAllDevices() autorelease];
 	if (rawMTLDevs) {
-		bool forceLowPower = mvkConfig()->forceLowPowerGPU;
+		bool forceLowPower = mvkConfig().forceLowPowerGPU;
 
 		// Populate the array of appropriate MTLDevices
 		for (id<MTLDevice> md in rawMTLDevs) {
@@ -684,7 +684,7 @@
 	MVKExtensionList allExtns(this, true);
 	MVKLogInfo("MoltenVK version %s, supporting Vulkan version %s.\n\tThe following %d Vulkan extensions are supported:%s",
 			   mvkGetMoltenVKVersionString(MVK_VERSION).c_str(),
-			   mvkGetVulkanVersionString(mvkConfig()->apiVersionToAdvertise).c_str(),
+			   mvkGetVulkanVersionString(mvkConfig().apiVersionToAdvertise).c_str(),
 			   allExtns.getEnabledCount(),
 			   allExtns.enabledNamesString("\n\t\t", true).c_str());
 }
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
index 913ae01..66869bf 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
@@ -40,7 +40,7 @@
 #pragma mark MVKPipelineLayout
 
 struct MVKShaderImplicitRezBinding {
-	uint32_t stages[kMVKShaderStageMax];
+	uint32_t stages[kMVKShaderStageCount];
 };
 
 /** Represents a Vulkan pipeline layout. */
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
index e9b8839..8d0ff54 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
@@ -92,7 +92,7 @@
 
 	// Add any resource bindings used by push-constants.
 	// Use VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT descriptor type as compatible with push constants in Metal.
-	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageMax; stage++) {
+	for (uint32_t stage = kMVKShaderStageVertex; stage < kMVKShaderStageCount; stage++) {
 		mvkPopulateShaderConverterContext(context,
 										  _pushConstantsMTLResourceIndexes.stages[stage],
 										  MVKShaderStage(stage),
@@ -149,7 +149,7 @@
 	// FIXME: Many of these are optional. We shouldn't set the ones that aren't
 	// present--or at least, we should move the ones that are down to avoid running over
 	// the limit of available buffers. But we can't know that until we compile the shaders.
-	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		_dynamicOffsetBufferIndex.stages[i] = _pushConstantsMTLResourceIndexes.stages[i].bufferIndex + 1;
 		_bufferSizeBufferIndex.stages[i] = _dynamicOffsetBufferIndex.stages[i] + 1;
 		_swizzleBufferIndex.stages[i] = _bufferSizeBufferIndex.stages[i] + 1;
@@ -176,7 +176,7 @@
 
 void MVKPipeline::bindPushConstants(MVKCommandEncoder* cmdEncoder) {
 	if (cmdEncoder) {
-		for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageMax; i++) {
+		for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 			cmdEncoder->getPushConstants(mvkVkShaderStageFlagBitsFromMVKShaderStage(MVKShaderStage(i)))->setMTLBufferIndex(_pushConstantsMTLResourceIndexes.stages[i].bufferIndex);
 		}
 	}
@@ -206,7 +206,7 @@
 	MVKVulkanAPIDeviceObject(device),
 	_pipelineCache(pipelineCache),
 	_pushConstantsMTLResourceIndexes(layout->getPushConstantBindings()),
-	_fullImageViewSwizzle(mvkConfig()->fullImageViewSwizzle),
+	_fullImageViewSwizzle(mvkConfig().fullImageViewSwizzle),
 	_descriptorSetCount(layout->getDescriptorSetCount()) {}
 
 
@@ -1555,11 +1555,11 @@
 		}
 	}
 
-	shaderContext.options.mslOptions.texture_1D_as_2D = mvkConfig()->texture1DAs2D;
+	shaderContext.options.mslOptions.texture_1D_as_2D = mvkConfig().texture1DAs2D;
     shaderContext.options.mslOptions.enable_point_size_builtin = isRenderingPoints(pCreateInfo) || reflectData.pointMode;
 	shaderContext.options.mslOptions.enable_frag_depth_builtin = pixFmts->isDepthFormat(mtlDSFormat);
 	shaderContext.options.mslOptions.enable_frag_stencil_ref_builtin = pixFmts->isStencilFormat(mtlDSFormat);
-    shaderContext.options.shouldFlipVertexY = mvkConfig()->shaderConversionFlipVertexY;
+    shaderContext.options.shouldFlipVertexY = mvkConfig().shaderConversionFlipVertexY;
     shaderContext.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle && !getDevice()->_pMetalFeatures->nativeTextureSwizzle;
     shaderContext.options.mslOptions.tess_domain_origin_lower_left = pTessDomainOriginState && pTessDomainOriginState->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT;
     shaderContext.options.mslOptions.multiview = mvkRendPass->isMultiview();
@@ -1779,7 +1779,7 @@
 	shaderContext.options.mslOptions.swizzle_texture_samples = _fullImageViewSwizzle && !getDevice()->_pMetalFeatures->nativeTextureSwizzle;
 	shaderContext.options.mslOptions.texture_buffer_native = _device->_pMetalFeatures->textureBuffers;
 	shaderContext.options.mslOptions.dispatch_base = _allowsDispatchBase;
-	shaderContext.options.mslOptions.texture_1D_as_2D = mvkConfig()->texture1DAs2D;
+	shaderContext.options.mslOptions.texture_1D_as_2D = mvkConfig().texture1DAs2D;
     shaderContext.options.mslOptions.fixed_subgroup_size = mvkIsAnyFlagEnabled(pSS->flags, VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) ? 0 : _device->_pMetalFeatures->maxSubgroupSize;
 
 	bool useMetalArgBuff = isUsingMetalArgumentBuffers();
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm b/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
index 351e321..f3006f8 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueryPool.mm
@@ -330,7 +330,7 @@
 MVKOcclusionQueryPool::MVKOcclusionQueryPool(MVKDevice* device,
                                              const VkQueryPoolCreateInfo* pCreateInfo) : MVKQueryPool(device, pCreateInfo, 1) {
 
-    if (mvkConfig()->supportLargeQueryPools) {
+    if (mvkConfig().supportLargeQueryPools) {
         _queryIndexOffset = 0;
 
         // Ensure we don't overflow the maximum number of queries
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
index b198718..0c19888 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.mm
@@ -38,7 +38,7 @@
 	id<MTLCommandQueue> mtlQ = _mtlQueues[queueIndex];
 	if ( !mtlQ ) {
 		@autoreleasepool {		// Catch any autoreleased objects created during MTLCommandQueue creation
-			uint32_t maxCmdBuffs = mvkConfig()->maxActiveMetalCommandBuffersPerQueue;
+			uint32_t maxCmdBuffs = mvkConfig().maxActiveMetalCommandBuffersPerQueue;
 			mtlQ = [_physicalDevice->getMTLDevice() newCommandQueueWithMaxCommandBufferCount: maxCmdBuffs];		// retained
 			_mtlQueues[queueIndex] = mtlQ;
 		}
@@ -150,7 +150,7 @@
 	if ([_mtlQueue respondsToSelector: @selector(commandBufferWithDescriptor:)]) {
 		MTLCommandBufferDescriptor* mtlCmdBuffDesc = [MTLCommandBufferDescriptor new];	// temp retain
 		mtlCmdBuffDesc.retainedReferences = retainRefs;
-		if (mvkConfig()->debugMode) {
+		if (mvkConfig().debugMode) {
 			mtlCmdBuffDesc.errorOptions |= MTLCommandBufferErrorOptionEncoderExecutionStatus;
 		}
 		id<MTLCommandBuffer> cmdBuff = [_mtlQueue commandBufferWithDescriptor: mtlCmdBuffDesc];
@@ -192,7 +192,7 @@
 
 void MVKQueue::initExecQueue() {
 	_execQueue = nil;
-	if ( !mvkConfig()->synchronousQueueSubmits ) {
+	if ( !mvkConfig().synchronousQueueSubmits ) {
 		// Determine the dispatch queue priority
 		dispatch_qos_class_t dqQOS = MVK_DISPATCH_QUEUE_QOS_CLASS;
 		int dqPriority = (1.0 - _priority) * QOS_MIN_RELATIVE_PRIORITY;
@@ -214,9 +214,8 @@
 void MVKQueue::initGPUCaptureScopes() {
 	_submissionCaptureScope = new MVKGPUCaptureScope(this);
 
-	const MVKConfiguration* pMVKConfig = mvkConfig();
-	if (_queueFamily->getIndex() == pMVKConfig->defaultGPUCaptureScopeQueueFamilyIndex &&
-		_index == pMVKConfig->defaultGPUCaptureScopeQueueIndex) {
+	if (_queueFamily->getIndex() == mvkConfig().defaultGPUCaptureScopeQueueFamilyIndex &&
+		_index == mvkConfig().defaultGPUCaptureScopeQueueIndex) {
 
 		getDevice()->startAutoGPUCapture(MVK_CONFIG_AUTO_GPU_CAPTURE_SCOPE_FRAME, _mtlQueue);
 		_submissionCaptureScope->makeDefault();
@@ -339,11 +338,11 @@
 					mvkDev->getPhysicalDevice()->setConfigurationResult(VK_ERROR_DEVICE_LOST);
 					break;
 				default:
-					if (mvkConfig()->resumeLostDevice) { mvkDev->clearConfigurationResult(); }
+					if (mvkConfig().resumeLostDevice) { mvkDev->clearConfigurationResult(); }
 					break;
 			}
 #if MVK_XCODE_12
-			if (mvkConfig()->debugMode) {
+			if (mvkConfig().debugMode) {
 				if (&MTLCommandBufferEncoderInfoErrorKey != nullptr) {
 					if (NSArray<id<MTLCommandBufferEncoderInfo>>* mtlEncInfo = mtlCB.error.userInfo[MTLCommandBufferEncoderInfoErrorKey]) {
 						MVKLogInfo("Encoders for %p \"%s\":", mtlCB, mtlCB.label ? mtlCB.label.UTF8String : "");
@@ -362,7 +361,7 @@
 #endif
 		}
 #if MVK_XCODE_12
-		if (mvkConfig()->debugMode) {
+		if (mvkConfig().debugMode) {
 			bool isFirstMsg = true;
 			for (id<MTLFunctionLog> log in mtlCB.logs) {
 				if (isFirstMsg) {
@@ -476,9 +475,8 @@
 }
 
 void MVKQueuePresentSurfaceSubmission::stopAutoGPUCapture() {
-	const MVKConfiguration* pMVKConfig = mvkConfig();
-	if (_queue->_queueFamily->getIndex() == pMVKConfig->defaultGPUCaptureScopeQueueFamilyIndex &&
-		_queue->_index == pMVKConfig->defaultGPUCaptureScopeQueueIndex) {
+	if (_queue->_queueFamily->getIndex() == mvkConfig().defaultGPUCaptureScopeQueueFamilyIndex &&
+		_queue->_index == mvkConfig().defaultGPUCaptureScopeQueueIndex) {
 		_queue->getDevice()->stopAutoGPUCapture(MVK_CONFIG_AUTO_GPU_CAPTURE_SCOPE_FRAME);
 	}
 }
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.mm b/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.mm
index 8fa6594..732e4f3 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKShaderModule.mm
@@ -291,7 +291,7 @@
 }
 
 bool MVKShaderModule::convert(SPIRVToMSLConversionConfiguration* pContext) {
-	bool shouldLogCode = mvkConfig()->debugMode;
+	bool shouldLogCode = mvkConfig().debugMode;
 	bool shouldLogEstimatedGLSL = shouldLogCode;
 
 	// If the SPIR-V converter does not have any code, but the GLSL converter does,
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm b/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
index 63700f8..c804576 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKSwapchain.mm
@@ -123,7 +123,7 @@
 
 // If the product has not been fully licensed, renders the watermark image to the surface.
 void MVKSwapchain::renderWatermark(id<MTLTexture> mtlTexture, id<MTLCommandBuffer> mtlCmdBuff) {
-    if (mvkConfig()->displayWatermark) {
+    if (mvkConfig().displayWatermark) {
         if ( !_licenseWatermark ) {
             _licenseWatermark = new MVKWatermarkRandom(getMTLDevice(),
                                                        __watermarkTextureContent,
@@ -144,7 +144,7 @@
 
 // Calculates and remembers the time interval between frames.
 void MVKSwapchain::markFrameInterval() {
-	if ( !(mvkConfig()->performanceTracking || _licenseWatermark) ) { return; }
+	if ( !(mvkConfig().performanceTracking || _licenseWatermark) ) { return; }
 
 	uint64_t prevFrameTime = _lastFrameTime;
 	_lastFrameTime = mvkGetTimestamp();
@@ -153,7 +153,7 @@
 
 	_device->addActivityPerformance(_device->_performanceStatistics.queue.frameInterval, prevFrameTime, _lastFrameTime);
 
-	uint32_t perfLogCntLimit = mvkConfig()->performanceLoggingFrameCount;
+	uint32_t perfLogCntLimit = mvkConfig().performanceLoggingFrameCount;
 	if ((perfLogCntLimit > 0) && (++_currentPerfLogFrameCount >= perfLogCntLimit)) {
 		_currentPerfLogFrameCount = 0;
 		MVKLogInfo("Performance statistics reporting every: %d frames, avg FPS: %.2f, elapsed time: %.3f seconds:",
@@ -271,7 +271,7 @@
 	_mtlLayer.pixelFormat = getPixelFormats()->getMTLPixelFormat(pCreateInfo->imageFormat);
 	_mtlLayer.maximumDrawableCountMVK = imgCnt;
 	_mtlLayer.displaySyncEnabledMVK = (pCreateInfo->presentMode != VK_PRESENT_MODE_IMMEDIATE_KHR);
-	_mtlLayer.magnificationFilter = mvkConfig()->swapchainMagFilterUseNearest ? kCAFilterNearest : kCAFilterLinear;
+	_mtlLayer.magnificationFilter = mvkConfig().swapchainMagFilterUseNearest ? kCAFilterNearest : kCAFilterLinear;
 	_mtlLayer.framebufferOnly = !mvkIsAnyFlagEnabled(pCreateInfo->imageUsage, (VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
 																			   VK_IMAGE_USAGE_TRANSFER_DST_BIT |
 																			   VK_IMAGE_USAGE_SAMPLED_BIT |
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm b/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm
index ddfc6ff..41cde47 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKSync.mm
@@ -521,7 +521,7 @@
 	dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ @autoreleasepool { block(); } });
 
 	// Limit timeout to avoid overflow since wait_for() uses wait_until()
-	chrono::nanoseconds nanoTimeout(min(mvkConfig()->metalCompileTimeout, kMVKUndefinedLargeUInt64));
+	chrono::nanoseconds nanoTimeout(min(mvkConfig().metalCompileTimeout, kMVKUndefinedLargeUInt64));
 	_blocker.wait_for(lock, nanoTimeout, [this]{ return _isCompileDone; });
 
 	if ( !_isCompileDone ) {
diff --git a/MoltenVK/MoltenVK/Layers/MVKExtensions.mm b/MoltenVK/MoltenVK/Layers/MVKExtensions.mm
index 1e866f1..b3518b1 100644
--- a/MoltenVK/MoltenVK/Layers/MVKExtensions.mm
+++ b/MoltenVK/MoltenVK/Layers/MVKExtensions.mm
@@ -51,7 +51,7 @@
 
 	// If the config indicates that not all supported extensions should be advertised,
 	// only advertise those supported extensions that have been specifically configured.
-	auto advExtns = mvkConfig()->advertiseExtensions;
+	auto advExtns = mvkConfig().advertiseExtensions;
 	if ( !mvkIsAnyFlagEnabled(advExtns, MVK_CONFIG_ADVERTISE_EXTENSIONS_ALL) ) {
 		if (mvkIsAnyFlagEnabled(advExtns, MVK_CONFIG_ADVERTISE_EXTENSIONS_MOLTENVK)) {
 			MVK_EXTENSION_MIN_OS(MVK_MOLTENVK,                         10.11,  8.0)
diff --git a/MoltenVK/MoltenVK/Layers/MVKLayers.mm b/MoltenVK/MoltenVK/Layers/MVKLayers.mm
index 8a26260..5a2eb24 100644
--- a/MoltenVK/MoltenVK/Layers/MVKLayers.mm
+++ b/MoltenVK/MoltenVK/Layers/MVKLayers.mm
@@ -45,7 +45,7 @@
 	strcpy(_layerProperties.layerName, "MoltenVK");
 	mvkClear(_layerProperties.description, VK_MAX_DESCRIPTION_SIZE);
 	strcpy(_layerProperties.description, "MoltenVK driver layer");
-	_layerProperties.specVersion = mvkConfig()->apiVersionToAdvertise;
+	_layerProperties.specVersion = mvkConfig().apiVersionToAdvertise;
 	_layerProperties.implementationVersion = MVK_VERSION;
 
 	((MVKExtensionList*)&_supportedInstanceExtensions)->disableAllButEnabledInstanceExtensions();
diff --git a/MoltenVK/MoltenVK/Utility/MVKBaseObject.mm b/MoltenVK/MoltenVK/Utility/MVKBaseObject.mm
index 1337573..1902911 100644
--- a/MoltenVK/MoltenVK/Utility/MVKBaseObject.mm
+++ b/MoltenVK/MoltenVK/Utility/MVKBaseObject.mm
@@ -79,7 +79,7 @@
 	MVKVulkanAPIObject* mvkAPIObj = mvkObj ? mvkObj->getVulkanAPIObject() : nullptr;
 	MVKInstance* mvkInst = mvkAPIObj ? mvkAPIObj->getInstance() : nullptr;
 	bool hasDebugCallbacks = mvkInst && mvkInst->hasDebugCallbacks();
-	bool shouldLog = (aslLvl < (mvkConfig()->logLevel << 2));
+	bool shouldLog = (aslLvl < (mvkConfig().logLevel << 2));
 
 	// Fail fast to avoid further unnecessary processing.
 	if ( !(shouldLog || hasDebugCallbacks) ) { return; }
diff --git a/MoltenVK/MoltenVK/Utility/MVKEnvironment.cpp b/MoltenVK/MoltenVK/Utility/MVKEnvironment.cpp
index c3b892a..aae324e 100644
--- a/MoltenVK/MoltenVK/Utility/MVKEnvironment.cpp
+++ b/MoltenVK/MoltenVK/Utility/MVKEnvironment.cpp
@@ -63,7 +63,7 @@
 	MVK_SET_FROM_ENV_OR_BUILD_BOOL  (evCfg.resumeLostDevice,                       MVK_CONFIG_RESUME_LOST_DEVICE);
 	MVK_SET_FROM_ENV_OR_BUILD_BOOL  (evCfg.useMetalArgumentBuffers,                MVK_CONFIG_USE_METAL_ARGUMENT_BUFFERS);
 
-	mvkSetConfig(&evCfg);
+	mvkSetConfig(evCfg);
 }
 
 static MVKConfiguration _mvkConfig;
@@ -72,18 +72,18 @@
 // Returns the MoltenVK config, lazily initializing it if necessary.
 // We initialize lazily instead of in a library constructor function to
 // ensure the NSProcessInfo environment is available when called upon.
-const MVKConfiguration* mvkConfig() {
+const MVKConfiguration& mvkConfig() {
 	if ( !_mvkConfigInitialized ) {
 		mvkInitConfigFromEnvVars();
 	}
-	return &_mvkConfig;
+	return _mvkConfig;
 }
 
 // Sets config content, and updates any content that needs baking, including copying the contents
 // of strings from the incoming MVKConfiguration member to a corresponding std::string, and then
 // repointing the MVKConfiguration member to the contents of the std::string.
-void mvkSetConfig(MVKConfiguration* pMVKConfig) {
-	_mvkConfig = *pMVKConfig;
+void mvkSetConfig(const MVKConfiguration& mvkConfig) {
+	_mvkConfig = mvkConfig;
 
 	// Ensure the API version is supported, and add the VK_HEADER_VERSION.
 	_mvkConfig.apiVersionToAdvertise = std::min(_mvkConfig.apiVersionToAdvertise, MVK_VULKAN_API_VERSION);
diff --git a/MoltenVK/MoltenVK/Utility/MVKEnvironment.h b/MoltenVK/MoltenVK/Utility/MVKEnvironment.h
index 5f6c821..4cb724a 100644
--- a/MoltenVK/MoltenVK/Utility/MVKEnvironment.h
+++ b/MoltenVK/MoltenVK/Utility/MVKEnvironment.h
@@ -76,10 +76,10 @@
 #pragma mark Global Configuration
 
 /** Global function to access MoltenVK configuration info. */
-const MVKConfiguration* mvkConfig();
+const MVKConfiguration& mvkConfig();
 
 /** Global function to update MoltenVK configuration info. */
-void mvkSetConfig(MVKConfiguration* pMVKConfig);
+void mvkSetConfig(const MVKConfiguration& mvkConfig);
 
 /** Flip the vertex coordinate in shaders. Enabled by default. */
 #ifndef MVK_CONFIG_SHADER_CONVERSION_FLIP_VERTEX_Y
diff --git a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
index 1a9751c..e68c4a6 100644
--- a/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
+++ b/MoltenVK/MoltenVK/Vulkan/mvk_datatypes.mm
@@ -144,7 +144,7 @@
 																  bool isMultisample) {
 	switch (vkImageType) {
 		case VK_IMAGE_TYPE_3D: return MTLTextureType3D;
-		case VK_IMAGE_TYPE_1D: return (mvkConfig()->texture1DAs2D
+		case VK_IMAGE_TYPE_1D: return (mvkConfig().texture1DAs2D
 									   ? mvkMTLTextureTypeFromVkImageType(VK_IMAGE_TYPE_2D, arraySize, isMultisample)
 									   : (arraySize > 1 ? MTLTextureType1DArray : MTLTextureType1D));
 		case VK_IMAGE_TYPE_2D:
@@ -176,8 +176,8 @@
 		case VK_IMAGE_VIEW_TYPE_3D:			return MTLTextureType3D;
 		case VK_IMAGE_VIEW_TYPE_CUBE:		return MTLTextureTypeCube;
 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	return MTLTextureTypeCubeArray;
-		case VK_IMAGE_VIEW_TYPE_1D:			return mvkConfig()->texture1DAs2D ? mvkMTLTextureTypeFromVkImageViewType(VK_IMAGE_VIEW_TYPE_2D, isMultisample) : MTLTextureType1D;
-		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	return mvkConfig()->texture1DAs2D ? mvkMTLTextureTypeFromVkImageViewType(VK_IMAGE_VIEW_TYPE_2D_ARRAY, isMultisample) : MTLTextureType1DArray;
+		case VK_IMAGE_VIEW_TYPE_1D:			return mvkConfig().texture1DAs2D ? mvkMTLTextureTypeFromVkImageViewType(VK_IMAGE_VIEW_TYPE_2D, isMultisample) : MTLTextureType1D;
+		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	return mvkConfig().texture1DAs2D ? mvkMTLTextureTypeFromVkImageViewType(VK_IMAGE_VIEW_TYPE_2D_ARRAY, isMultisample) : MTLTextureType1DArray;
 
 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
 #if MVK_MACOS
@@ -638,7 +638,7 @@
 		case VK_SHADER_STAGE_COMPUTE_BIT:					return kMVKShaderStageCompute;
 		default:
 			MVKBaseObject::reportError(mvkObj, VK_ERROR_FORMAT_NOT_SUPPORTED, "VkShaderStage %x is not supported.", vkStage);
-			return kMVKShaderStageMax;
+			return kMVKShaderStageCount;
 	}
 }
 
@@ -650,8 +650,8 @@
 		/* FIXME: kMVKShaderStageGeometry */
 		case kMVKShaderStageFragment:	return VK_SHADER_STAGE_FRAGMENT_BIT;
 		case kMVKShaderStageCompute:	return VK_SHADER_STAGE_COMPUTE_BIT;
-		case kMVKShaderStageMax:
-			assert(!"This function should never be called with kMVKShaderStageMax!");
+		case kMVKShaderStageCount:
+			assert(!"This function should never be called with kMVKShaderStageCount!");
 			return VK_SHADER_STAGE_ALL;
 	}
 }
diff --git a/MoltenVK/MoltenVK/Vulkan/vk_mvk_moltenvk.mm b/MoltenVK/MoltenVK/Vulkan/vk_mvk_moltenvk.mm
index 7de9788..6d3f522 100644
--- a/MoltenVK/MoltenVK/Vulkan/vk_mvk_moltenvk.mm
+++ b/MoltenVK/MoltenVK/Vulkan/vk_mvk_moltenvk.mm
@@ -53,7 +53,7 @@
 	MVKConfiguration*                           pConfiguration,
 	size_t*                                     pConfigurationSize) {
 
-	return mvkCopy(pConfiguration, mvkConfig(), pConfigurationSize);
+	return mvkCopy(pConfiguration, &mvkConfig(), pConfigurationSize);
 }
 
 MVK_PUBLIC_SYMBOL VkResult vkSetMoltenVKConfigurationMVK(
@@ -61,10 +61,10 @@
 	const MVKConfiguration*                     pConfiguration,
 	size_t*                                     pConfigurationSize) {
 
-	// Start with current config, in case incoming is not fully copied
-	MVKConfiguration mvkCfg = *mvkConfig();
+	// Start with copy of current config, in case incoming is not fully copied
+	MVKConfiguration mvkCfg = mvkConfig();
 	VkResult rslt = mvkCopy(&mvkCfg, pConfiguration, pConfigurationSize);
-	mvkSetConfig(&mvkCfg);
+	mvkSetConfig(mvkCfg);
 	return rslt;
 }
 
@@ -99,7 +99,7 @@
 	len = mvkVer.copy(pMoltenVersionStringBuffer, moltenVersionStringBufferLength - 1);
 	pMoltenVersionStringBuffer[len] = 0;    // terminator
 
-	string vkVer = mvkGetVulkanVersionString(mvkConfig()->apiVersionToAdvertise);
+	string vkVer = mvkGetVulkanVersionString(mvkConfig().apiVersionToAdvertise);
 	len = vkVer.copy(pVulkanVersionStringBuffer, vulkanVersionStringBufferLength - 1);
 	pVulkanVersionStringBuffer[len] = 0;    // terminator
 }
diff --git a/MoltenVK/MoltenVK/Vulkan/vulkan.mm b/MoltenVK/MoltenVK/Vulkan/vulkan.mm
index 014ae8a..2546e84 100644
--- a/MoltenVK/MoltenVK/Vulkan/vulkan.mm
+++ b/MoltenVK/MoltenVK/Vulkan/vulkan.mm
@@ -50,7 +50,7 @@
 
 // Optionally log start of function calls to stderr
 static inline uint64_t MVKTraceVulkanCallStartImpl(const char* funcName) {
-	MVKConfigTraceVulkanCalls traceLvl = mvkConfig()->traceVulkanCalls;
+	MVKConfigTraceVulkanCalls traceLvl = mvkConfig().traceVulkanCalls;
 
 	if (traceLvl == MVK_CONFIG_TRACE_VULKAN_CALLS_NONE ||
 		traceLvl > MVK_CONFIG_TRACE_VULKAN_CALLS_DURATION) { return 0; }
@@ -72,7 +72,7 @@
 
 // Optionally log end of function calls and timings to stderr
 static inline void MVKTraceVulkanCallEndImpl(const char* funcName, uint64_t startTime) {
-	switch(mvkConfig()->traceVulkanCalls) {
+	switch(mvkConfig().traceVulkanCalls) {
 		case MVK_CONFIG_TRACE_VULKAN_CALLS_DURATION:
 			fprintf(stderr, "[mvk-trace] } %s [%.4f ms]\n", funcName, mvkGetElapsedMilliseconds(startTime));
 			break;
@@ -1908,7 +1908,7 @@
     uint32_t*                                   pApiVersion) {
 
     MVKTraceVulkanCallStart();
-    *pApiVersion = mvkConfig()->apiVersionToAdvertise;
+    *pApiVersion = mvkConfig().apiVersionToAdvertise;
     MVKTraceVulkanCallEnd();
     return VK_SUCCESS;
 }