Merge pull request #1596 from spnda/google_display_timing_osx

Implement vkGetRefreshCycleDurationGOOGLE() for macOS
diff --git a/Docs/MoltenVK_Runtime_UserGuide.md b/Docs/MoltenVK_Runtime_UserGuide.md
index ca1c436..23c4bf6 100644
--- a/Docs/MoltenVK_Runtime_UserGuide.md
+++ b/Docs/MoltenVK_Runtime_UserGuide.md
@@ -271,6 +271,7 @@
 - `VK_KHR_device_group_creation`
 - `VK_KHR_driver_properties`
 - `VK_KHR_dynamic_rendering`
+- `VK_KHR_fragment_shader_barycentric` *(requires Metal 2.2 on Mac or Metal 2.3 on iOS)*
 - `VK_KHR_get_memory_requirements2`
 - `VK_KHR_get_physical_device_properties2`
 - `VK_KHR_get_surface_capabilities2`
@@ -328,6 +329,7 @@
 - `VK_AMD_shader_trinary_minmax` *(requires Metal 2.1)*
 - `VK_IMG_format_pvrtc` *(requires Apple GPU)*
 - `VK_INTEL_shader_integer_functions2`
+- `VK_NV_fragment_shader_barycentric` *(requires Metal 2.2 on Mac or Metal 2.3 on iOS)*
 - `VK_NV_glsl_shader`
 
 In order to visibly display your content on *macOS*, *iOS*, or *tvOS*, you must enable the
diff --git a/Docs/Whats_New.md b/Docs/Whats_New.md
index d35900a..34e7e5f 100644
--- a/Docs/Whats_New.md
+++ b/Docs/Whats_New.md
@@ -25,11 +25,13 @@
 	- `VK_KHR_dynamic_rendering`
 	- `VK_KHR_separate_depth_stencil_layouts`
 	- `VK_EXT_separate_stencil_usage`
+	- `VK_KHR_fragment_shader_barycentric`
 - Support attachment clearing when some clearing formats are not specified.
 - Fix error where previously bound push constants can override a descriptor buffer binding 
   used by a subsequent pipeline that does not use push constants.
 - Fix error on some Apple GPU's where a `vkCmdTimestampQuery()` after a renderpass was 
   writing timestamp before renderpass activity was complete.
+- Fix regression error in vertex buffer binding counts when establishing implicit buffers binding indexes.
 - Work around zombie memory bug in Intel Iris Plus Graphics driver when repeatedly retrieving GPU counter sets.
 - Update to latest SPIRV-Cross:
 	- MSL: Emit interface block members of array length 1 as arrays instead of scalars.
diff --git a/ExternalRevisions/Vulkan-Headers_repo_revision b/ExternalRevisions/Vulkan-Headers_repo_revision
index 30e1cdb..c14cbd0 100644
--- a/ExternalRevisions/Vulkan-Headers_repo_revision
+++ b/ExternalRevisions/Vulkan-Headers_repo_revision
@@ -1 +1 @@
-76f00ef6cbb1886eb1162d1fa39bee8b51e22ee8
+245d25ce8c3337919dc7916d0e62e31a0d8748ab
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm
index 8a826fa..3f9a52e 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.mm
@@ -193,7 +193,7 @@
                 if (pipeline->needsVertexOutputBuffer()) {
                     [mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
                                           offset: vtxOutBuff->_offset
-                                         atIndex: kMVKTessCtlInputBufferIndex];
+                                         atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding)];
                 }
 				
 				NSUInteger sgSize = pipeline->getTessControlStageState().threadExecutionWidth;
@@ -221,16 +221,16 @@
                     if (pipeline->needsTessCtlOutputBuffer()) {
                         [cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
                                                                 offset: tcOutBuff->_offset
-                                                               atIndex: kMVKTessEvalInputBufferIndex];
+                                                               atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding)];
                     }
                     if (pipeline->needsTessCtlPatchOutputBuffer()) {
                         [cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
                                                                 offset: tcPatchOutBuff->_offset
-                                                               atIndex: kMVKTessEvalPatchInputBufferIndex];
+                                                               atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding)];
                     }
                     [cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
                                                             offset: tcLevelBuff->_offset
-                                                           atIndex: kMVKTessEvalLevelBufferIndex];
+                                                           atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding)];
                     [cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
                                                                         offset: tcLevelBuff->_offset
                                                                 instanceStride: 0];
@@ -395,7 +395,7 @@
                 if (pipeline->needsVertexOutputBuffer()) {
                     [mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
                                           offset: vtxOutBuff->_offset
-                                         atIndex: kMVKTessCtlInputBufferIndex];
+                                         atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding)];
                 }
 				// The vertex shader produced output in the correct order, so there's no need to use
 				// an index buffer here.
@@ -424,16 +424,16 @@
                     if (pipeline->needsTessCtlOutputBuffer()) {
                         [cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
                                                                 offset: tcOutBuff->_offset
-                                                               atIndex: kMVKTessEvalInputBufferIndex];
+                                                               atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding)];
                     }
                     if (pipeline->needsTessCtlPatchOutputBuffer()) {
                         [cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
                                                                 offset: tcPatchOutBuff->_offset
-                                                               atIndex: kMVKTessEvalPatchInputBufferIndex];
+                                                               atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding)];
                     }
                     [cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
                                                             offset: tcLevelBuff->_offset
-                                                           atIndex: kMVKTessEvalLevelBufferIndex];
+                                                           atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding)];
                     [cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
                                                                         offset: tcLevelBuff->_offset
                                                                 instanceStride: 0];
@@ -741,7 +741,7 @@
                     if (pipeline->needsVertexOutputBuffer()) {
                         [mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
                                               offset: vtxOutBuff->_offset
-                                             atIndex: kMVKTessCtlInputBufferIndex];
+                                             atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding)];
                     }
                     [mtlTessCtlEncoder dispatchThreadgroupsWithIndirectBuffer: mtlIndBuff
                                                          indirectBufferOffset: mtlIndBuffOfst
@@ -757,16 +757,16 @@
 							if (pipeline->needsTessCtlOutputBuffer()) {
 								[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
 																		offset: tcOutBuff->_offset
-																	   atIndex: kMVKTessEvalInputBufferIndex];
+																	   atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding)];
 							}
 							if (pipeline->needsTessCtlPatchOutputBuffer()) {
 								[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
 																		offset: tcPatchOutBuff->_offset
-																	   atIndex: kMVKTessEvalPatchInputBufferIndex];
+																	   atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding)];
 							}
 							[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
 																	offset: tcLevelBuff->_offset
-																   atIndex: kMVKTessEvalLevelBufferIndex];
+																   atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding)];
 							[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
 																				offset: tcLevelBuff->_offset
 																		instanceStride: 0];
@@ -1076,7 +1076,7 @@
                     if (pipeline->needsVertexOutputBuffer()) {
                         [mtlTessCtlEncoder setBuffer: vtxOutBuff->_mtlBuffer
                                               offset: vtxOutBuff->_offset
-                                             atIndex: kMVKTessCtlInputBufferIndex];
+                                             atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding)];
                     }
                     [mtlTessCtlEncoder dispatchThreadgroupsWithIndirectBuffer: mtlIndBuff
                                                          indirectBufferOffset: mtlTempIndBuffOfst
@@ -1092,16 +1092,16 @@
 							if (pipeline->needsTessCtlOutputBuffer()) {
 								[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcOutBuff->_mtlBuffer
 																		offset: tcOutBuff->_offset
-																	   atIndex: kMVKTessEvalInputBufferIndex];
+																	   atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding)];
 							}
 							if (pipeline->needsTessCtlPatchOutputBuffer()) {
 								[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcPatchOutBuff->_mtlBuffer
 																		offset: tcPatchOutBuff->_offset
-																	   atIndex: kMVKTessEvalPatchInputBufferIndex];
+																	   atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding)];
 							}
 							[cmdEncoder->_mtlRenderEncoder setVertexBuffer: tcLevelBuff->_mtlBuffer
 																	offset: tcLevelBuff->_offset
-																   atIndex: kMVKTessEvalLevelBufferIndex];
+																   atIndex: cmdEncoder->getDevice()->getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding)];
 							[cmdEncoder->_mtlRenderEncoder setTessellationFactorBuffer: tcLevelBuff->_mtlBuffer
 																				offset: tcLevelBuff->_offset
 																		instanceStride: 0];
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h
index d7d2c48..c6e7280 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h
@@ -407,6 +407,7 @@
 
     // Template function that executes a lambda expression on each dirty element of
     // a vector of bindings, and marks the bindings and the vector as no longer dirty.
+	// Clear isDirty flag before operation to allow operation to possibly override.
 	template<class T, class V>
 	void encodeBinding(V& bindings,
 					   bool& bindingsDirtyFlag,
@@ -415,8 +416,9 @@
 			bindingsDirtyFlag = false;
 			for (auto& b : bindings) {
 				if (b.isDirty) {
-					mtlOperation(_cmdEncoder, b);
 					b.isDirty = false;
+					mtlOperation(_cmdEncoder, b);
+					if (b.isDirty) { bindingsDirtyFlag = true; }
 				}
 			}
 		}
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
index 7d47598..dfb8c1c 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
@@ -775,26 +775,33 @@
 	} else if (!forTessellation && stage == kMVKGraphicsStageRasterization) {
         encodeBindings(kMVKShaderStageVertex, "vertex", fullImageViewSwizzle,
                        [pipeline](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b)->void {
-					       if (b.isInline) {
-                               cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
-                                                          b.mtlBytes,
-                                                          b.size,
-                                                          b.index);
-					       } else {
-                               [cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
-                                                                       offset: b.offset
-                                                                      atIndex: b.index];
+                           // The app may have bound more vertex attribute buffers than used by the pipeline.
+                           // We must not bind those extra buffers to the shader because they might overwrite
+                           // any implicit buffers used by the pipeline.
+                           if (pipeline->isValidVertexBufferIndex(kMVKShaderStageVertex, b.index)) {
+                               if (b.isInline) {
+                                   cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
+                                                              b.mtlBytes,
+                                                              b.size,
+                                                              b.index);
+                               } else {
+                                   [cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
+                                                                           offset: b.offset
+                                                                          atIndex: b.index];
 
-							   // Add any translated vertex bindings for this binding
-							   auto xltdVtxBindings = pipeline->getTranslatedVertexBindings();
-							   for (auto& xltdBind : xltdVtxBindings) {
-								   if (b.index == pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.binding)) {
-									   [cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
-																			   offset: b.offset + xltdBind.translationOffset
-																			  atIndex: pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.translationBinding)];
-								   }
-							   }
-					       }
+                                   // Add any translated vertex bindings for this binding
+                                   auto xltdVtxBindings = pipeline->getTranslatedVertexBindings();
+                                   for (auto& xltdBind : xltdVtxBindings) {
+                                       if (b.index == pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.binding)) {
+                                           [cmdEncoder->_mtlRenderEncoder setVertexBuffer: b.mtlBuffer
+                                                                                   offset: b.offset + xltdBind.translationOffset
+                                                                                  atIndex: pipeline->getMetalBufferIndexForVertexAttributeBinding(xltdBind.translationBinding)];
+                                       }
+                                   }
+                               }
+                           } else {
+                               b.isDirty = true;	// We haven't written it out, so leave dirty until next time.
+						   }
                        },
                        [](MVKCommandEncoder* cmdEncoder, MVKMTLBufferBinding& b, const MVKArrayRef<uint32_t> s)->void {
                            cmdEncoder->setVertexBytes(cmdEncoder->_mtlRenderEncoder,
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
index a929eeb..0693632 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDevice.mm
@@ -287,6 +287,11 @@
 				separateDepthStencilLayoutsFeatures->separateDepthStencilLayouts = true;
 				break;
 			}
+            case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR: {
+                auto* barycentricProperties = (VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR*)next;
+                barycentricProperties->fragmentShaderBarycentric = true;
+                break;
+            }
 			default:
 				break;
 		}
@@ -480,6 +485,11 @@
 				sampLocnProps->variableSampleLocations = VK_FALSE;
 				break;
 			}
+            case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR: {
+                auto* barycentricProperties = (VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR*)next;
+                barycentricProperties->triStripVertexOrderIndependentOfProvokingVertex = false;
+                break;
+            }
 			default:
 				break;
 		}
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
index 21ffc44..88b71b1 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
@@ -112,13 +112,13 @@
 #pragma mark -
 #pragma mark MVKPipeline
 
-static const uint32_t kMVKTessCtlInputBufferIndex = 30;
 static const uint32_t kMVKTessCtlNumReservedBuffers = 1;
+static const uint32_t kMVKTessCtlInputBufferBinding = 0;
 
-static const uint32_t kMVKTessEvalInputBufferIndex = 30;
-static const uint32_t kMVKTessEvalPatchInputBufferIndex = 29;
-static const uint32_t kMVKTessEvalLevelBufferIndex = 28;
 static const uint32_t kMVKTessEvalNumReservedBuffers = 3;
+static const uint32_t kMVKTessEvalInputBufferBinding = 0;
+static const uint32_t kMVKTessEvalPatchInputBufferBinding = 1;
+static const uint32_t kMVKTessEvalLevelBufferBinding = 2;
 
 /** Represents an abstract Vulkan pipeline. */
 class MVKPipeline : public MVKVulkanAPIDeviceObject {
@@ -259,6 +259,13 @@
 	/** Returns whether this pipeline has custom sample positions enabled. */
 	bool isUsingCustomSamplePositions() { return _isUsingCustomSamplePositions; }
 
+	/**
+	 * Returns whether the MTLBuffer vertex shader buffer index is valid for a stage of this pipeline.
+	 * It is if it is a descriptor binding within the descriptor binding range,
+	 * or a vertex attribute binding above any implicit buffer bindings.
+	 */
+	bool isValidVertexBufferIndex(MVKShaderStage stage, uint32_t mtlBufferIndex);
+
 	/** Returns the custom samples used by this pipeline. */
 	MVKArrayRef<MTLSamplePosition> getCustomSamplePositions() { return _customSamplePositions.contents(); }
 
@@ -293,6 +300,7 @@
 	void initCustomSamplePositions(const VkGraphicsPipelineCreateInfo* pCreateInfo);
     void initMTLRenderPipelineState(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
     void initShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
+	void initReservedVertexAttributeBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo);
     void addVertexInputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, const VkGraphicsPipelineCreateInfo* pCreateInfo);
     void addPrevStageOutputToShaderConversionConfig(SPIRVToMSLConversionConfiguration& shaderConfig, SPIRVShaderOutputs& outputs);
     MTLRenderPipelineDescriptor* newMTLRenderPipelineDescriptor(const VkGraphicsPipelineCreateInfo* pCreateInfo, const SPIRVTessReflectionData& reflectData);
@@ -313,8 +321,7 @@
     bool isRasterizationDisabled(const VkGraphicsPipelineCreateInfo* pCreateInfo);
 	bool verifyImplicitBuffer(bool needsBuffer, MVKShaderImplicitRezBinding& index, MVKShaderStage stage, const char* name);
 	uint32_t getTranslatedVertexBinding(uint32_t binding, uint32_t translationOffset, uint32_t maxBinding);
-	uint32_t getImplicitBufferIndex(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage, uint32_t bufferIndexOffset);
-	uint32_t getReservedBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage);
+	uint32_t getImplicitBufferIndex(MVKShaderStage stage, uint32_t bufferIndexOffset);
 
 	const VkPipelineShaderStageCreateInfo* _pVertexSS = nullptr;
 	const VkPipelineShaderStageCreateInfo* _pTessCtlSS = nullptr;
@@ -351,6 +358,7 @@
 
     float _blendConstants[4] = { 0.0, 0.0, 0.0, 1.0 };
     uint32_t _outputControlPointCount;
+	MVKShaderImplicitRezBinding _reservedVertexAttributeBufferCount;
 	MVKShaderImplicitRezBinding _viewRangeBufferIndex;
 	MVKShaderImplicitRezBinding _outputBufferIndex;
 	uint32_t _tessCtlPatchOutputBufferIndex = 0;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
index bb50b01..f801d3f 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
@@ -869,7 +869,7 @@
 				}
 				innerLoc = location;
 			}
-			plDesc.vertexDescriptor.attributes[location].bufferIndex = kMVKTessEvalLevelBufferIndex;
+			plDesc.vertexDescriptor.attributes[location].bufferIndex = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding);
 			if (reflectData.patchKind == spv::ExecutionModeTriangles || output.builtin == spv::BuiltInTessLevelOuter) {
 				plDesc.vertexDescriptor.attributes[location].offset = 0;
 				plDesc.vertexDescriptor.attributes[location].format = MTLVertexFormatHalf4;	// FIXME Should use Float4
@@ -879,7 +879,7 @@
 			}
 		} else if (output.perPatch) {
 			patchOffset = (uint32_t)mvkAlignByteCount(patchOffset, getShaderOutputAlignment(output));
-			plDesc.vertexDescriptor.attributes[output.location].bufferIndex = kMVKTessEvalPatchInputBufferIndex;
+			plDesc.vertexDescriptor.attributes[output.location].bufferIndex = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding);
 			plDesc.vertexDescriptor.attributes[output.location].format = getPixelFormats()->getMTLVertexFormat(mvkFormatFromOutput(output));
 			plDesc.vertexDescriptor.attributes[output.location].offset = patchOffset;
 			patchOffset += getShaderOutputSize(output);
@@ -887,7 +887,7 @@
 			usedPerPatch = true;
 		} else {
 			offset = (uint32_t)mvkAlignByteCount(offset, getShaderOutputAlignment(output));
-			plDesc.vertexDescriptor.attributes[output.location].bufferIndex = kMVKTessEvalInputBufferIndex;
+			plDesc.vertexDescriptor.attributes[output.location].bufferIndex = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding);
 			plDesc.vertexDescriptor.attributes[output.location].format = getPixelFormats()->getMTLVertexFormat(mvkFormatFromOutput(output));
 			plDesc.vertexDescriptor.attributes[output.location].offset = offset;
 			offset += getShaderOutputSize(output);
@@ -896,16 +896,19 @@
 		}
 	}
 	if (usedPerVertex) {
-		plDesc.vertexDescriptor.layouts[kMVKTessEvalInputBufferIndex].stepFunction = MTLVertexStepFunctionPerPatchControlPoint;
-		plDesc.vertexDescriptor.layouts[kMVKTessEvalInputBufferIndex].stride = mvkAlignByteCount(offset, getShaderOutputAlignment(*firstVertex));
+		uint32_t mtlVBIdx = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalInputBufferBinding);
+		plDesc.vertexDescriptor.layouts[mtlVBIdx].stepFunction = MTLVertexStepFunctionPerPatchControlPoint;
+		plDesc.vertexDescriptor.layouts[mtlVBIdx].stride = mvkAlignByteCount(offset, getShaderOutputAlignment(*firstVertex));
 	}
 	if (usedPerPatch) {
-		plDesc.vertexDescriptor.layouts[kMVKTessEvalPatchInputBufferIndex].stepFunction = MTLVertexStepFunctionPerPatch;
-		plDesc.vertexDescriptor.layouts[kMVKTessEvalPatchInputBufferIndex].stride = mvkAlignByteCount(patchOffset, getShaderOutputAlignment(*firstPatch));
+		uint32_t mtlVBIdx = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalPatchInputBufferBinding);
+		plDesc.vertexDescriptor.layouts[mtlVBIdx].stepFunction = MTLVertexStepFunctionPerPatch;
+		plDesc.vertexDescriptor.layouts[mtlVBIdx].stride = mvkAlignByteCount(patchOffset, getShaderOutputAlignment(*firstPatch));
 	}
 	if (outerLoc != (uint32_t)(-1) || innerLoc != (uint32_t)(-1)) {
-		plDesc.vertexDescriptor.layouts[kMVKTessEvalLevelBufferIndex].stepFunction = MTLVertexStepFunctionPerPatch;
-		plDesc.vertexDescriptor.layouts[kMVKTessEvalLevelBufferIndex].stride =
+		uint32_t mtlVBIdx = getMetalBufferIndexForVertexAttributeBinding(kMVKTessEvalLevelBufferBinding);
+		plDesc.vertexDescriptor.layouts[mtlVBIdx].stepFunction = MTLVertexStepFunctionPerPatch;
+		plDesc.vertexDescriptor.layouts[mtlVBIdx].stride =
 			reflectData.patchKind == spv::ExecutionModeTriangles ? sizeof(MTLTriangleTessellationFactorsHalf) :
 																   sizeof(MTLQuadTessellationFactorsHalf);
 	}
@@ -1074,7 +1077,7 @@
 	shaderConfig.options.entryPointName = _pTessCtlSS->pName;
 	shaderConfig.options.mslOptions.swizzle_buffer_index = _swizzleBufferIndex.stages[kMVKShaderStageTessCtl];
 	shaderConfig.options.mslOptions.indirect_params_buffer_index = _indirectParamsIndex.stages[kMVKShaderStageTessCtl];
-	shaderConfig.options.mslOptions.shader_input_buffer_index = kMVKTessCtlInputBufferIndex;
+	shaderConfig.options.mslOptions.shader_input_buffer_index = getMetalBufferIndexForVertexAttributeBinding(kMVKTessCtlInputBufferBinding);
 	shaderConfig.options.mslOptions.shader_output_buffer_index = _outputBufferIndex.stages[kMVKShaderStageTessCtl];
 	shaderConfig.options.mslOptions.shader_patch_output_buffer_index = _tessCtlPatchOutputBufferIndex;
 	shaderConfig.options.mslOptions.shader_tess_factor_buffer_index = _tessCtlLevelBufferIndex;
@@ -1571,16 +1574,17 @@
 	// FIXME: Many of these are optional. We shouldn't set the ones that aren't
 	// present--or at least, we should move the ones that are down to avoid running over
 	// the limit of available buffers. But we can't know that until we compile the shaders.
+	initReservedVertexAttributeBufferCount(pCreateInfo);
 	for (uint32_t i = kMVKShaderStageVertex; i < kMVKShaderStageCount; i++) {
 		MVKShaderStage stage = (MVKShaderStage)i;
-		_dynamicOffsetBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 0);
-		_bufferSizeBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 1);
-		_swizzleBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 2);
-		_indirectParamsIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 3);
-		_outputBufferIndex.stages[stage] = getImplicitBufferIndex(pCreateInfo, stage, 4);
+		_dynamicOffsetBufferIndex.stages[stage] = getImplicitBufferIndex(stage, 0);
+		_bufferSizeBufferIndex.stages[stage] = getImplicitBufferIndex(stage, 1);
+		_swizzleBufferIndex.stages[stage] = getImplicitBufferIndex(stage, 2);
+		_indirectParamsIndex.stages[stage] = getImplicitBufferIndex(stage, 3);
+		_outputBufferIndex.stages[stage] = getImplicitBufferIndex(stage, 4);
 		if (stage == kMVKShaderStageTessCtl) {
-			_tessCtlPatchOutputBufferIndex = getImplicitBufferIndex(pCreateInfo, stage, 5);
-			_tessCtlLevelBufferIndex = getImplicitBufferIndex(pCreateInfo, stage, 6);
+			_tessCtlPatchOutputBufferIndex = getImplicitBufferIndex(stage, 5);
+			_tessCtlLevelBufferIndex = getImplicitBufferIndex(stage, 6);
 		}
 	}
 	// Since we currently can't use multiview with tessellation or geometry shaders,
@@ -1630,17 +1634,48 @@
     shaderConfig.options.numTessControlPoints = reflectData.numControlPoints;
 }
 
-uint32_t MVKGraphicsPipeline::getImplicitBufferIndex(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage, uint32_t bufferIndexOffset) {
-	return _device->_pMetalFeatures->maxPerStageBufferCount - (getReservedBufferCount(pCreateInfo, stage) + bufferIndexOffset + 1);
+uint32_t MVKGraphicsPipeline::getImplicitBufferIndex(MVKShaderStage stage, uint32_t bufferIndexOffset) {
+	return getMetalBufferIndexForVertexAttributeBinding(_reservedVertexAttributeBufferCount.stages[stage] + bufferIndexOffset);
 }
 
-uint32_t MVKGraphicsPipeline::getReservedBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo, MVKShaderStage stage) {
-	switch (stage) {
-		case kMVKShaderStageVertex:		return pCreateInfo->pVertexInputState->vertexBindingDescriptionCount;
-		case kMVKShaderStageTessCtl:	return kMVKTessCtlNumReservedBuffers;
-		case kMVKShaderStageTessEval:	return kMVKTessEvalNumReservedBuffers;
-		default:						return 0;
+// Set the number of vertex attribute buffers consumed by this pipeline at each stage.
+// Any implicit buffers needed by this pipeline will be assigned indexes below the range
+// defined by this count below the max number of Metal buffer bindings per stage.
+// Must be called before any calls to getImplicitBufferIndex().
+void MVKGraphicsPipeline::initReservedVertexAttributeBufferCount(const VkGraphicsPipelineCreateInfo* pCreateInfo) {
+	int32_t maxBinding = -1;
+	uint32_t xltdBuffCnt = 0;
+
+	const VkPipelineVertexInputStateCreateInfo* pVI = pCreateInfo->pVertexInputState;
+	uint32_t vaCnt = pVI->vertexAttributeDescriptionCount;
+	uint32_t vbCnt = pVI->vertexBindingDescriptionCount;
+
+	// Determine the highest binding number used by the vertex buffers
+	for (uint32_t vbIdx = 0; vbIdx < vbCnt; vbIdx++) {
+		const VkVertexInputBindingDescription* pVKVB = &pVI->pVertexBindingDescriptions[vbIdx];
+		maxBinding = max<int32_t>(pVKVB->binding, maxBinding);
+
+		// Iterate through the vertex attributes and determine if any need a synthetic binding buffer to
+		// accommodate offsets that are outside the stride, which Vulkan supports, but Metal does not.
+		// This value will be worst case, as some synthetic buffers may end up being shared.
+		for (uint32_t vaIdx = 0; vaIdx < vaCnt; vaIdx++) {
+			const VkVertexInputAttributeDescription* pVKVA = &pVI->pVertexAttributeDescriptions[vaIdx];
+			if ((pVKVA->binding == pVKVB->binding) && (pVKVA->offset + getPixelFormats()->getBytesPerBlock(pVKVA->format) > pVKVB->stride)) {
+				xltdBuffCnt++;
+			}
+		}
 	}
+
+	// The number of reserved bindings we need for the vertex stage is determined from the largest vertex
+	// attribute binding number, plus any synthetic buffer bindings created to support translated offsets.
+	mvkClear<uint32_t>(_reservedVertexAttributeBufferCount.stages, kMVKShaderStageCount);
+	_reservedVertexAttributeBufferCount.stages[kMVKShaderStageVertex] = (maxBinding + 1) + xltdBuffCnt;
+	_reservedVertexAttributeBufferCount.stages[kMVKShaderStageTessCtl] = kMVKTessCtlNumReservedBuffers;
+	_reservedVertexAttributeBufferCount.stages[kMVKShaderStageTessEval] = kMVKTessEvalNumReservedBuffers;
+}
+
+bool MVKGraphicsPipeline::isValidVertexBufferIndex(MVKShaderStage stage, uint32_t mtlBufferIndex) {
+	return mtlBufferIndex < _descriptorBufferCounts.stages[stage] || mtlBufferIndex > getImplicitBufferIndex(stage, 0);
 }
 
 // Initializes the vertex attributes in a shader conversion configuration.
diff --git a/MoltenVK/MoltenVK/Layers/MVKExtensions.def b/MoltenVK/MoltenVK/Layers/MVKExtensions.def
index 019b23b..927e233 100644
--- a/MoltenVK/MoltenVK/Layers/MVKExtensions.def
+++ b/MoltenVK/MoltenVK/Layers/MVKExtensions.def
@@ -58,6 +58,7 @@
 MVK_EXTENSION(KHR_external_memory_capabilities,    KHR_EXTERNAL_MEMORY_CAPABILITIES,     INSTANCE, 10.11,  8.0)
 MVK_EXTENSION(KHR_external_semaphore,              KHR_EXTERNAL_SEMAPHORE,               DEVICE,   10.11,  8.0)
 MVK_EXTENSION(KHR_external_semaphore_capabilities, KHR_EXTERNAL_SEMAPHORE_CAPABILITIES,  INSTANCE, 10.11,  8.0)
+MVK_EXTENSION(KHR_fragment_shader_barycentric,     KHR_FRAGMENT_SHADER_BARYCENTRIC,      DEVICE,   10.15, 14.0)
 MVK_EXTENSION(KHR_get_memory_requirements2,        KHR_GET_MEMORY_REQUIREMENTS_2,        DEVICE,   10.11,  8.0)
 MVK_EXTENSION(KHR_get_physical_device_properties2, KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2, INSTANCE, 10.11,  8.0)
 MVK_EXTENSION(KHR_get_surface_capabilities2,       KHR_GET_SURFACE_CAPABILITIES_2,       INSTANCE, 10.11,  8.0)
@@ -117,6 +118,7 @@
 MVK_EXTENSION(MVK_ios_surface,                     MVK_IOS_SURFACE,                      INSTANCE, MVK_NA, 8.0)
 MVK_EXTENSION(MVK_macos_surface,                   MVK_MACOS_SURFACE,                    INSTANCE, 10.11, MVK_NA)
 MVK_EXTENSION(MVK_moltenvk,                        MVK_MOLTENVK,                         INSTANCE, 10.11,  8.0)
+MVK_EXTENSION(NV_fragment_shader_barycentric,      NV_FRAGMENT_SHADER_BARYCENTRIC,       DEVICE,   10.15, 14.0)
 MVK_EXTENSION_LAST(NV_glsl_shader,                 NV_GLSL_SHADER,                       DEVICE,   10.11,  8.0)
 
 #undef MVK_EXTENSION