Add MVKSmallVector as a more memory efficient substitute of MVKVector.

Replicate MVKVector.h and MVKVectorAllocator.h into MVKSmallVector.h and
MVKSmallVectorAllocator.h, and collapse class hierarchies.
Add MVKArrayRef struct to allow the contents of MVKSmallVector and MVKVector
to be passed between functions consistently.
Add contents() function to MVKVector to return MVKArrayRef.
diff --git a/MoltenVK/MoltenVK.xcodeproj/project.pbxproj b/MoltenVK/MoltenVK.xcodeproj/project.pbxproj
index cf3c94c..83265aa 100644
--- a/MoltenVK/MoltenVK.xcodeproj/project.pbxproj
+++ b/MoltenVK/MoltenVK.xcodeproj/project.pbxproj
@@ -233,6 +233,10 @@
 		A9F042A51FB4CF83009FCCB8 /* MVKCommonEnvironment.h in Headers */ = {isa = PBXBuildFile; fileRef = A9F0429D1FB4CF82009FCCB8 /* MVKCommonEnvironment.h */; };
 		A9F042A61FB4CF83009FCCB8 /* MVKLogging.h in Headers */ = {isa = PBXBuildFile; fileRef = A9F0429E1FB4CF82009FCCB8 /* MVKLogging.h */; };
 		A9F042A71FB4CF83009FCCB8 /* MVKLogging.h in Headers */ = {isa = PBXBuildFile; fileRef = A9F0429E1FB4CF82009FCCB8 /* MVKLogging.h */; };
+		A9F3D9DC24732A4D00745190 /* MVKSmallVectorAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = A9F3D9D924732A4C00745190 /* MVKSmallVectorAllocator.h */; };
+		A9F3D9DD24732A4D00745190 /* MVKSmallVectorAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = A9F3D9D924732A4C00745190 /* MVKSmallVectorAllocator.h */; };
+		A9F3D9DE24732A4D00745190 /* MVKSmallVector.h in Headers */ = {isa = PBXBuildFile; fileRef = A9F3D9DB24732A4D00745190 /* MVKSmallVector.h */; };
+		A9F3D9DF24732A4D00745190 /* MVKSmallVector.h in Headers */ = {isa = PBXBuildFile; fileRef = A9F3D9DB24732A4D00745190 /* MVKSmallVector.h */; };
 /* End PBXBuildFile section */
 
 /* Begin PBXContainerItemProxy section */
@@ -424,6 +428,8 @@
 		A9F0429D1FB4CF82009FCCB8 /* MVKCommonEnvironment.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKCommonEnvironment.h; sourceTree = "<group>"; };
 		A9F0429E1FB4CF82009FCCB8 /* MVKLogging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKLogging.h; sourceTree = "<group>"; };
 		A9F2559121F96814008C7785 /* vulkan-portability */ = {isa = PBXFileReference; lastKnownFileType = folder; path = "vulkan-portability"; sourceTree = "<group>"; };
+		A9F3D9D924732A4C00745190 /* MVKSmallVectorAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKSmallVectorAllocator.h; sourceTree = "<group>"; };
+		A9F3D9DB24732A4D00745190 /* MVKSmallVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MVKSmallVector.h; sourceTree = "<group>"; };
 /* End PBXFileReference section */
 
 /* Begin PBXGroup section */
@@ -570,6 +576,8 @@
 				A98149451FB6A3F7005F00B4 /* MVKFoundation.cpp */,
 				A98149441FB6A3F7005F00B4 /* MVKFoundation.h */,
 				A98149461FB6A3F7005F00B4 /* MVKObjectPool.h */,
+				A9F3D9DB24732A4D00745190 /* MVKSmallVector.h */,
+				A9F3D9D924732A4C00745190 /* MVKSmallVectorAllocator.h */,
 				83A4AD2521BD75570006C935 /* MVKVector.h */,
 				83A4AD2921BD75570006C935 /* MVKVectorAllocator.h */,
 				A98149491FB6A3F7005F00B4 /* MVKWatermark.h */,
@@ -688,6 +696,7 @@
 				A9E53DE32100B197002781DD /* MTLSamplerDescriptor+MoltenVK.h in Headers */,
 				A94FB8181C7DFB4800632CA3 /* MVKSync.h in Headers */,
 				A94FB7E41C7DFB4800632CA3 /* MVKDevice.h in Headers */,
+				A9F3D9DE24732A4D00745190 /* MVKSmallVector.h in Headers */,
 				83A4AD2A21BD75570006C935 /* MVKVector.h in Headers */,
 				A94FB7D41C7DFB4800632CA3 /* MVKCommandPool.h in Headers */,
 				A94FB80C1C7DFB4800632CA3 /* MVKShaderModule.h in Headers */,
@@ -705,6 +714,7 @@
 				A9B51BD9225E986A00AC74D2 /* MVKOSExtensions.h in Headers */,
 				A94FB7C41C7DFB4800632CA3 /* MVKCmdRenderPass.h in Headers */,
 				A94FB7BC1C7DFB4800632CA3 /* MVKCmdPipeline.h in Headers */,
+				A9F3D9DC24732A4D00745190 /* MVKSmallVectorAllocator.h in Headers */,
 				A94FB7F81C7DFB4800632CA3 /* MVKPipeline.h in Headers */,
 				A94FB7F01C7DFB4800632CA3 /* MVKImage.h in Headers */,
 				4553AEFD2251617100E8EBCD /* MVKBlockObserver.h in Headers */,
@@ -758,6 +768,7 @@
 				A9E53DE42100B197002781DD /* MTLSamplerDescriptor+MoltenVK.h in Headers */,
 				A94FB8191C7DFB4800632CA3 /* MVKSync.h in Headers */,
 				A94FB7E51C7DFB4800632CA3 /* MVKDevice.h in Headers */,
+				A9F3D9DF24732A4D00745190 /* MVKSmallVector.h in Headers */,
 				83A4AD2B21BD75570006C935 /* MVKVector.h in Headers */,
 				A94FB7D51C7DFB4800632CA3 /* MVKCommandPool.h in Headers */,
 				A94FB80D1C7DFB4800632CA3 /* MVKShaderModule.h in Headers */,
@@ -775,6 +786,7 @@
 				A9B51BDA225E986A00AC74D2 /* MVKOSExtensions.h in Headers */,
 				A94FB7C51C7DFB4800632CA3 /* MVKCmdRenderPass.h in Headers */,
 				A94FB7BD1C7DFB4800632CA3 /* MVKCmdPipeline.h in Headers */,
+				A9F3D9DD24732A4D00745190 /* MVKSmallVectorAllocator.h in Headers */,
 				A94FB7F91C7DFB4800632CA3 /* MVKPipeline.h in Headers */,
 				A94FB7F11C7DFB4800632CA3 /* MVKImage.h in Headers */,
 				4553AEFE2251617100E8EBCD /* MVKBlockObserver.h in Headers */,
diff --git a/MoltenVK/MoltenVK/Utility/MVKFoundation.h b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
index d4ae601..a0f0eac 100644
--- a/MoltenVK/MoltenVK/Utility/MVKFoundation.h
+++ b/MoltenVK/MoltenVK/Utility/MVKFoundation.h
@@ -370,6 +370,23 @@
 
 #pragma mark Containers
 
+/**
+ * Structure to reference an array of typed elements in contiguous memory.
+ * Allocation and management of the memory is handled externally.
+ */
+template<typename Type>
+struct MVKArrayRef {
+	Type* data;
+	const size_t size;
+
+	const Type* begin() const { return data; }
+	const Type* end() const { return &data[size]; }
+	const Type& operator[]( const size_t i ) const { return data[i]; }
+	Type& operator[]( const size_t i ) { return data[i]; }
+	MVKArrayRef() : MVKArrayRef(nullptr, 0) {}
+	MVKArrayRef(Type* d, size_t s) : data(d), size(s) {}
+};
+
 /** Ensures the size of the specified container is at least the specified size. */
 template<typename C, typename S>
 void mvkEnsureSize(C& container, S size) {
diff --git a/MoltenVK/MoltenVK/Utility/MVKSmallVector.h b/MoltenVK/MoltenVK/Utility/MVKSmallVector.h
new file mode 100755
index 0000000..90da4cc
--- /dev/null
+++ b/MoltenVK/MoltenVK/Utility/MVKSmallVector.h
@@ -0,0 +1,858 @@
+/*

+ * MVKSmallVector.h

+ *

+ * Copyright (c) 2012-2020 Dr. Torsten Hans (hans@ipacs.de)

+ *

+ * Licensed under the Apache License, Version 2.0 (the "License");

+ * you may not use this file except in compliance with the License.

+ * You may obtain a copy of the License at

+ * 

+ *     http://www.apache.org/licenses/LICENSE-2.0

+ * 

+ * Unless required by applicable law or agreed to in writing, software

+ * distributed under the License is distributed on an "AS IS" BASIS,

+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * See the License for the specific language governing permissions and

+ * limitations under the License.

+ */

+

+#pragma once

+

+//

+// in case MVKSmallVector should use std::vector

+//

+#if 0

+

+template<typename T, size_t N = 0>

+using MVKSmallVector = std::vector<T>;

+

+#else

+

+//

+// MVKSmallVector.h is a sequence container that (optionally) implements a small

+// buffer optimization.

+// It behaves similarly to std::vector, except until a certain number of

+// elements are reserved, it does not use the heap.

+// Like std::vector, MVKSmallVector is guaranteed to use contiguous memory, so if the

+// preallocated number of elements are exceeded, all elements are then in heap.

+// MVKSmallVector supports just the necessary members to be compatible with MoltenVK

+// If C++17 will be the default in the future, code can be simplified quite a bit.

+//

+// Example:

+//

+//  MVKSmallVector<int, 3> vector;

+//  vector.emplace_back( 1 );

+//  vector.emplace_back( 2 );

+//  vector.emplace_back( 3 );

+//  // adding another element now reserves memory from heap

+//  vector.emplace_back( 4 );

+//

+// If you don't need any inline storage use

+//  MVKSmallVector<int> vector;   // this is essentially the same as using std::vector

+//

+// The memory overhead per-instance of MVKSmallVector (16 bytes) is smaller than MVKVector (40 bytes),

+// but MVKSmallVector lacks the polymorphism of MVKVector, that allows MVKVector to be passed around

+// to functions without reference to its pre-allocation size. MVKSmallVector supports the contents()

+// function to derive an MVKVector from its contents, to allow a reference to the contents to be

+// passed around without reference to its pre-allocaton size.

+

+#include "MVKSmallVectorAllocator.h"

+#include "MVKFoundation.h"

+#include <type_traits>

+#include <initializer_list>

+#include <utility>

+

+

+template<typename Type, typename Allocator = mvk_smallvector_allocator<Type, 0>>

+class MVKSmallVectorImpl

+{

+  Allocator  alc;

+  

+public:

+  class iterator : public std::iterator<std::forward_iterator_tag, Type>

+  {

+    const MVKSmallVectorImpl *vector;

+    size_t               index;

+

+  public:

+    iterator() = delete;

+    iterator( const size_t _index, const MVKSmallVectorImpl &_vector ) : vector{ &_vector }, index{ _index } { }

+

+    iterator &operator=( const iterator &it )

+    {

+      vector = it.vector;

+      index  = it.index;

+      return *this;

+    }

+

+    Type *operator->() { return &vector->alc.ptr[index]; }

+    Type &operator*()  { return  vector->alc.ptr[index]; }

+    operator Type*()   { return &vector->alc.ptr[index]; }

+

+    bool operator==( const iterator &it ) const { return vector == it.vector && index == it.index; }

+    bool operator!=( const iterator &it ) const { return vector != it.vector || index != it.index; }

+

+    iterator& operator++()      {                 ++index; return *this; }

+    iterator  operator++( int ) { auto t = *this; ++index; return t; }

+

+    bool   is_valid()     const { return index < vector->alc.size(); }

+    size_t get_position() const { return index; }

+  };

+

+private:

+  // this is the growth strategy -> adjust to your needs

+  size_t vector_GetNextCapacity() const

+  {

+    constexpr auto ELEMENTS_FOR_64_BYTES = 64 / sizeof( Type );

+    constexpr auto MINIMUM_CAPACITY = ELEMENTS_FOR_64_BYTES > 4 ? ELEMENTS_FOR_64_BYTES : 4;

+    const auto current_capacity = capacity();

+    return MINIMUM_CAPACITY + ( 3 * current_capacity ) / 2;

+  }

+

+  void vector_Allocate( const size_t s )

+  {

+    const auto new_reserved_size = s > size() ? s : size();

+

+    alc.allocate( new_reserved_size );

+  }

+

+  void vector_ReAllocate( const size_t s )

+  {

+    alc.re_allocate( s );

+  }

+

+public:

+  MVKSmallVectorImpl()

+  {

+  }

+

+  MVKSmallVectorImpl( const size_t n, const Type t )

+  {

+    if( n > 0 )

+    {

+      alc.allocate( n );

+

+      for( size_t i = 0; i < n; ++i )

+      {

+        alc.construct( &alc.ptr[i], t );

+      }

+

+      alc.num_elements_used = n;

+    }

+  }

+

+  MVKSmallVectorImpl( const MVKSmallVectorImpl &a )

+  {

+    const size_t n = a.size();

+

+    if( n > 0 )

+    {

+      alc.allocate( n );

+

+      for( size_t i = 0; i < n; ++i )

+      {

+        alc.construct( &alc.ptr[i], a.alc.ptr[i] );

+      }

+

+      alc.num_elements_used = n;

+    }

+  }

+

+  template<typename U>

+  MVKSmallVectorImpl( const U &a )

+  {

+    const size_t n = a.size();

+

+    if( n > 0 )

+    {

+      alc.allocate( n );

+

+      for( size_t i = 0; i < n; ++i )

+      {

+        alc.construct( &alc.ptr[i], a[i] );

+      }

+

+      alc.num_elements_used = n;

+    }

+  }

+

+  MVKSmallVectorImpl( MVKSmallVectorImpl &&a ) : alc{ std::move( a.alc ) }

+  {

+  }

+

+  MVKSmallVectorImpl( std::initializer_list<Type> vector )

+  {

+    if( vector.size() > capacity() )

+    {

+      vector_Allocate( vector.size() );

+    }

+

+    // std::initializer_list does not yet support std::move, we use it anyway but it has no effect

+    for( auto &&element : vector )

+    {

+      alc.construct( &alc.ptr[alc.num_elements_used], std::move( element ) );

+      ++alc.num_elements_used;

+    }

+  }

+

+  ~MVKSmallVectorImpl()

+  {

+  }

+

+  template<typename U>

+  MVKSmallVectorImpl& operator=( const U &a )

+  {

+    static_assert( std::is_base_of<MVKSmallVectorImpl<Type>, U>::value, "argument is not of type MVKSmallVectorImpl" );

+

+    if( this != reinterpret_cast<const MVKSmallVectorImpl<Type>*>( &a ) )

+    {

+      const auto n = a.size();

+

+      if( alc.num_elements_used == n )

+      {

+        for( size_t i = 0; i < n; ++i )

+        {

+          alc.ptr[i] = a.alc.ptr[i];

+        }

+      }

+      else

+      {

+        if( n > capacity() )

+        {

+          vector_ReAllocate( n );

+        }

+        else

+        {

+          alc.template destruct_all<Type>();

+        }

+

+        for( size_t i = 0; i < n; ++i )

+        {

+          alc.construct( &alc.ptr[i], a[i] );

+        }

+

+        alc.num_elements_used = n;

+      }

+    }

+

+    return *this;

+  }

+

+  MVKSmallVectorImpl& operator=( MVKSmallVectorImpl &&a )

+  {

+    alc.swap( a.alc );

+    return *this;

+  }

+

+  bool operator==( const MVKSmallVectorImpl &a ) const

+  {

+    if( alc.num_elements_used != a.alc.num_elements_used )

+      return false;

+    for( size_t i = 0; i < alc.num_elements_used; ++i )

+    {

+      if( alc[i] != a.alc[i] )

+        return false;

+    }

+    return true;

+  }

+

+  bool operator!=( const MVKSmallVectorImpl &a ) const

+  {

+    if( alc.num_elements_used != a.alc.num_elements_used )

+      return true;

+    for( size_t i = 0; i < alc.num_elements_used; ++i )

+    {

+      if( alc.ptr[i] != a.alc[i] )

+        return true;

+    }

+    return false;

+  }

+

+  void swap( MVKSmallVectorImpl &a )

+  {

+    alc.swap( a.alc );

+  }

+

+  iterator begin() const { return iterator( 0, *this ); }

+  iterator end()   const { return iterator( alc.num_elements_used, *this ); }

+

+  const MVKArrayRef<Type> contents() const { return MVKArrayRef<Type>(data(), size()); }

+        MVKArrayRef<Type> contents()       { return MVKArrayRef<Type>(data(), size()); }

+

+  const Type &operator[]( const size_t i ) const { return alc[i]; }

+        Type &operator[]( const size_t i )        { return alc[i]; }

+  const Type &at( const size_t i )         const { return alc[i]; }

+        Type &at( const size_t i )                { return alc[i]; }

+  const Type &front()                      const  { return alc[0]; }

+        Type &front()                             { return alc[0]; }

+  const Type &back()                       const  { return alc[alc.num_elements_used - 1]; }

+        Type &back()                              { return alc[alc.num_elements_used - 1]; }

+  const Type *data()                       const  { return alc.ptr; }

+        Type *data()                              { return alc.ptr; }

+

+  size_t      size()                       const { return alc.num_elements_used; }

+  bool        empty()                      const { return alc.num_elements_used == 0; }

+  size_t      capacity()                   const { return alc.get_capacity(); }

+

+  void pop_back()

+  {

+    if( alc.num_elements_used > 0 )

+    {

+      --alc.num_elements_used;

+      alc.destruct( &alc.ptr[alc.num_elements_used] );

+    }

+  }

+

+  void clear()

+  {

+    alc.template destruct_all<Type>();

+  }

+

+  void reset()

+  {

+    alc.deallocate();

+  }

+

+  void reserve( const size_t new_size )

+  {

+    if( new_size > capacity() )

+    {

+      vector_ReAllocate( new_size );

+    }

+  }

+

+  void assign( const size_t new_size, const Type &t )

+  {

+    if( new_size <= capacity() )

+    {

+      clear();

+    }

+    else

+    {

+      vector_Allocate( new_size );

+    }

+

+    for( size_t i = 0; i < new_size; ++i )

+    {

+      alc.construct( &alc.ptr[i], t );

+    }

+

+    alc.num_elements_used = new_size;

+  }

+

+  template <class InputIterator>

+  void assign( InputIterator first, InputIterator last )

+  {

+    clear();

+

+    while( first != last )

+    {

+      emplace_back( *first );

+      ++first;

+    }

+  }

+

+  void resize( const size_t new_size, const Type t = { } )

+  {

+    if( new_size == alc.num_elements_used )

+    {

+      return;

+    }

+

+    if( new_size == 0 )

+    {

+      clear();

+      return;

+    }

+

+    if( new_size > alc.num_elements_used )

+    {

+      if( new_size > capacity() )

+      {

+        vector_ReAllocate( new_size );

+      }

+

+      while( alc.num_elements_used < new_size )

+      {

+        alc.construct( &alc.ptr[alc.num_elements_used], t );

+        ++alc.num_elements_used;

+      }

+    }

+    else

+    {

+      //if constexpr( !std::is_trivially_destructible<Type>::value )

+      {

+        while( alc.num_elements_used > new_size )

+        {

+          --alc.num_elements_used;

+          alc.destruct( &alc.ptr[alc.num_elements_used] );

+        }

+      }

+      //else

+      //{

+      //  alc.num_elements_used = new_size;

+      //}

+    }

+  }

+

+  // trims the capacity of the slist to the number of alc.ptr

+  void shrink_to_fit()

+  {

+    alc.shrink_to_fit();

+  }

+

+  void erase( const iterator it )

+  {

+    if( it.is_valid() )

+    {

+      --alc.num_elements_used;

+

+      for( size_t i = it.get_position(); i < alc.num_elements_used; ++i )

+      {

+        alc.ptr[i] = std::move( alc.ptr[i + 1] );

+      }

+

+      // this is required for types with a destructor

+      alc.destruct( &alc.ptr[alc.num_elements_used] );

+    }

+  }

+

+  void erase( const iterator first, const iterator last )

+  {

+    if( first.is_valid() )

+    {

+      size_t last_pos = last.is_valid() ? last.get_position() : size();

+      size_t n = last_pos - first.get_position();

+      alc.num_elements_used -= n;

+

+      for( size_t i = first.get_position(), e = last_pos; i < alc.num_elements_used && e < alc.num_elements_used + n; ++i, ++e )

+      {

+        alc.ptr[i] = std::move( alc.ptr[e] );

+      }

+

+      // this is required for types with a destructor

+      for( size_t i = alc.num_elements_used; i < alc.num_elements_used + n; ++i )

+      {

+        alc.destruct( &alc.ptr[i] );

+      }

+    }

+  }

+

+  // adds t before it and automatically resizes vector if necessary

+  void insert( const iterator it, Type t )

+  {

+    if( !it.is_valid() || alc.num_elements_used == 0 )

+    {

+      push_back( std::move( t ) );

+    }

+    else

+    {

+      if( alc.num_elements_used == capacity() )

+        vector_ReAllocate( vector_GetNextCapacity() );

+

+      // move construct last element

+      alc.construct( &alc.ptr[alc.num_elements_used], std::move( alc.ptr[alc.num_elements_used - 1] ) );

+

+      // move the remaining elements

+      const size_t it_position = it.get_position();

+      for( size_t i = alc.num_elements_used - 1; i > it_position; --i )

+      {

+        alc.ptr[i] = std::move( alc.ptr[i - 1] );

+      }

+

+      alc.ptr[it_position] = std::move( t );

+      ++alc.num_elements_used;

+    }

+  }

+

+  void push_back( const Type &t )

+  {

+    if( alc.num_elements_used == capacity() )

+      vector_ReAllocate( vector_GetNextCapacity() );

+

+    alc.construct( &alc.ptr[alc.num_elements_used], t );

+    ++alc.num_elements_used;

+  }

+

+  void push_back( Type &&t )

+  {

+    if( alc.num_elements_used == capacity() )

+      vector_ReAllocate( vector_GetNextCapacity() );

+

+    alc.construct( &alc.ptr[alc.num_elements_used], std::forward<Type>( t ) );

+    ++alc.num_elements_used;

+  }

+

+  template<class... Args>

+  Type &emplace_back( Args&&... args )

+  {

+    if( alc.num_elements_used == capacity() )

+      vector_ReAllocate( vector_GetNextCapacity() );

+

+    alc.construct( &alc.ptr[alc.num_elements_used], std::forward<Args>( args )... );

+    ++alc.num_elements_used;

+

+    return alc.ptr[alc.num_elements_used - 1];

+  }

+};

+

+// specialization for pointer types

+template<typename Type, typename Allocator>

+class MVKSmallVectorImpl<Type*, Allocator>

+{

+

+  Allocator  alc;

+

+public:

+  class iterator : public std::iterator<std::forward_iterator_tag, Type*>

+  {

+    MVKSmallVectorImpl *vector;

+    size_t         index;

+

+  public:

+    iterator() = delete;

+    iterator( const size_t _index, MVKSmallVectorImpl &_vector ) : vector{ &_vector }, index{ _index } { }

+

+    iterator &operator=( const iterator &it )

+    {

+      vector = it.vector;

+      index = it.index;

+      return *this;

+    }

+

+    Type *&operator*() { return vector->alc[index]; }

+

+    bool operator==( const iterator &it ) const { return vector == it.vector && index == it.index; }

+    bool operator!=( const iterator &it ) const { return vector != it.vector || index != it.index; }

+

+    iterator& operator++() { ++index; return *this; }

+    iterator  operator++( int ) { auto t = *this; ++index; return t; }

+

+    bool   is_valid()     const { return index < vector->alc.size(); }

+    size_t get_position() const { return index; }

+  };

+

+private:

+  // this is the growth strategy -> adjust to your needs

+  size_t vector_GetNextCapacity() const

+  {

+    constexpr auto ELEMENTS_FOR_64_BYTES = 64 / sizeof( Type* );

+    constexpr auto MINIMUM_CAPACITY = ELEMENTS_FOR_64_BYTES > 4 ? ELEMENTS_FOR_64_BYTES : 4;

+    const auto current_capacity = capacity();

+    return MINIMUM_CAPACITY + ( 3 * current_capacity ) / 2;

+  }

+

+  void vector_Allocate( const size_t s )

+  {

+    const auto new_reserved_size = s > size() ? s : size();

+

+    alc.allocate( new_reserved_size );

+  }

+

+  void vector_ReAllocate( const size_t s )

+  {

+    alc.re_allocate( s );

+  }

+

+public:

+  MVKSmallVectorImpl()

+  {

+  }

+

+  MVKSmallVectorImpl( const size_t n, const Type *t )

+  {

+    if ( n > 0 )

+    {

+      alc.allocate( n );

+

+      for ( size_t i = 0; i < n; ++i )

+      {

+        alc.ptr[i] = t;

+      }

+

+      alc.num_elements_used = n;

+    }

+  }

+

+  MVKSmallVectorImpl( const MVKSmallVectorImpl &a )

+  {

+    const size_t n = a.size();

+

+    if ( n > 0 )

+    {

+      alc.allocate( n );

+

+      for ( size_t i = 0; i < n; ++i )

+      {

+        alc.ptr[i] = a.alc.ptr[i];

+      }

+

+      alc.num_elements_used = n;

+    }

+  }

+

+  MVKSmallVectorImpl( MVKSmallVectorImpl &&a ) : alc{ std::move( a.alc ) }

+  {

+  }

+

+  MVKSmallVectorImpl( std::initializer_list<Type*> vector )

+  {

+    if ( vector.size() > capacity() )

+    {

+      vector_Allocate( vector.size() );

+    }

+

+    // std::initializer_list does not yet support std::move, we use it anyway but it has no effect

+    for ( auto element : vector )

+    {

+      alc.ptr[alc.num_elements_used] = element;

+      ++alc.num_elements_used;

+    }

+  }

+

+  ~MVKSmallVectorImpl()

+  {

+  }

+

+  template<typename U>

+  MVKSmallVectorImpl& operator=( const U &a )

+  {

+    static_assert( std::is_base_of<MVKSmallVectorImpl<U>, U>::value, "argument is not of type MVKSmallVectorImpl" );

+

+    if ( this != reinterpret_cast< const MVKSmallVectorImpl<Type>* >( &a ) )

+    {

+      const auto n = a.size();

+

+      if ( alc.num_elements_used == n )

+      {

+        for ( size_t i = 0; i < n; ++i )

+        {

+          alc.ptr[i] = a.alc.ptr[i];

+        }

+      }

+      else

+      {

+        if ( n > capacity() )

+        {

+          vector_ReAllocate( n );

+        }

+

+        for ( size_t i = 0; i < n; ++i )

+        {

+          alc.ptr[i] = a[i];

+        }

+

+        alc.num_elements_used = n;

+      }

+    }

+

+    return *this;

+  }

+

+  MVKSmallVectorImpl& operator=( MVKSmallVectorImpl &&a )

+  {

+    alc.swap( a.alc );

+    return *this;

+  }

+

+  bool operator==( const MVKSmallVectorImpl &a ) const

+  {

+    if ( alc.num_elements_used != a.alc.num_elements_used )

+      return false;

+    for ( size_t i = 0; i < alc.num_elements_used; ++i )

+    {

+      if ( alc[i] != a.alc[i] )

+        return false;

+    }

+    return true;

+  }

+

+  bool operator!=( const MVKSmallVectorImpl &a ) const

+  {

+    if ( alc.num_elements_used != a.alc.num_elements_used )

+      return true;

+    for ( size_t i = 0; i < alc.num_elements_used; ++i )

+    {

+      if ( alc.ptr[i] != a.alc[i] )

+        return true;

+    }

+    return false;

+  }

+

+  void swap( MVKSmallVectorImpl &a )

+  {

+    alc.swap( a.alc );

+  }

+

+  iterator begin()        { return iterator( 0, *this ); }

+  iterator end()          { return iterator( alc.num_elements_used, *this ); }

+

+  const MVKArrayRef<Type*> contents() const { return MVKArrayRef<Type*>(data(), size()); }

+        MVKArrayRef<Type*> contents()       { return MVKArrayRef<Type*>(data(), size()); }

+

+  const Type * const  at( const size_t i )         const { return alc[i]; }

+        Type *       &at( const size_t i )               { return alc[i]; }

+  const Type * const  operator[]( const size_t i ) const { return alc[i]; }

+        Type *       &operator[]( const size_t i )       { return alc[i]; }

+  const Type * const  front()                      const { return alc[0]; }

+        Type *       &front()                            { return alc[0]; }

+  const Type * const  back()                       const { return alc[alc.num_elements_used - 1]; }

+        Type *       &back()                             { return alc[alc.num_elements_used - 1]; }

+  const Type * const *data()                       const { return alc.ptr; }

+        Type *       *data()                             { return alc.ptr; }

+

+  size_t   size()                                  const { return alc.num_elements_used; }

+  bool     empty()                                 const { return alc.num_elements_used == 0; }

+  size_t   capacity()                              const { return alc.get_capacity(); }

+

+  void pop_back()

+  {

+    if ( alc.num_elements_used > 0 )

+    {

+      --alc.num_elements_used;

+    }

+  }

+

+  void clear()

+  {

+    alc.num_elements_used = 0;

+  }

+

+  void reset()

+  {

+    alc.deallocate();

+  }

+

+  void reserve( const size_t new_size )

+  {

+    if ( new_size > capacity() )

+    {

+      vector_ReAllocate( new_size );

+    }

+  }

+

+  void assign( const size_t new_size, const Type *t )

+  {

+    if ( new_size <= capacity() )

+    {

+      clear();

+    }

+    else

+    {

+      vector_Allocate( new_size );

+    }

+

+    for ( size_t i = 0; i < new_size; ++i )

+    {

+      alc.ptr[i] = const_cast< Type* >( t );

+    }

+

+    alc.num_elements_used = new_size;

+  }

+

+  void resize( const size_t new_size, const Type *t = nullptr )

+  {

+    if ( new_size == alc.num_elements_used )

+    {

+      return;

+    }

+

+    if ( new_size == 0 )

+    {

+      clear();

+      return;

+    }

+

+    if ( new_size > alc.num_elements_used )

+    {

+      if ( new_size > capacity() )

+      {

+        vector_ReAllocate( new_size );

+      }

+

+      while ( alc.num_elements_used < new_size )

+      {

+        alc.ptr[alc.num_elements_used] = const_cast< Type* >( t );

+        ++alc.num_elements_used;

+      }

+    }

+    else

+    {

+      alc.num_elements_used = new_size;

+    }

+  }

+

+  // trims the capacity of the MVKSmallVectorImpl to the number of used elements

+  void shrink_to_fit()

+  {

+    alc.shrink_to_fit();

+  }

+

+  void erase( const iterator it )

+  {

+    if ( it.is_valid() )

+    {

+      --alc.num_elements_used;

+

+      for ( size_t i = it.get_position(); i < alc.num_elements_used; ++i )

+      {

+        alc.ptr[i] = alc.ptr[i + 1];

+      }

+    }

+  }

+

+  void erase( const iterator first, const iterator last )

+  {

+    if( first.is_valid() )

+    {

+      size_t last_pos = last.is_valid() ? last.get_position() : size();

+      size_t n = last_pos - first.get_position();

+      alc.num_elements_used -= n;

+

+      for( size_t i = first.get_position(), e = last_pos; i < alc.num_elements_used && e < alc.num_elements_used + n; ++i, ++e )

+      {

+        alc.ptr[i] = alc.ptr[e];

+      }

+    }

+  }

+

+  // adds t before position it and automatically resizes vector if necessary

+  void insert( const iterator it, const Type *t )

+  {

+    if ( !it.is_valid() || alc.num_elements_used == 0 )

+    {

+      push_back( t );

+    }

+    else

+    {

+      if ( alc.num_elements_used == capacity() )

+        vector_ReAllocate( vector_GetNextCapacity() );

+

+      // move the remaining elements

+      const size_t it_position = it.get_position();

+      for ( size_t i = alc.num_elements_used; i > it_position; --i )

+      {

+        alc.ptr[i] = alc.ptr[i - 1];

+      }

+

+      alc.ptr[it_position] = const_cast< Type* >( t );

+      ++alc.num_elements_used;

+    }

+  }

+

+  void push_back( const Type *t )

+  {

+    if ( alc.num_elements_used == capacity() )

+      vector_ReAllocate( vector_GetNextCapacity() );

+

+    alc.ptr[alc.num_elements_used] = const_cast< Type* >( t );

+    ++alc.num_elements_used;

+  }

+};

+

+template<typename Type, size_t N = 0>

+using MVKSmallVector = MVKSmallVectorImpl<Type, mvk_smallvector_allocator<Type, N>>;

+

+#endif

+

+

diff --git a/MoltenVK/MoltenVK/Utility/MVKSmallVectorAllocator.h b/MoltenVK/MoltenVK/Utility/MVKSmallVectorAllocator.h
new file mode 100755
index 0000000..9a88d56
--- /dev/null
+++ b/MoltenVK/MoltenVK/Utility/MVKSmallVectorAllocator.h
@@ -0,0 +1,365 @@
+/*

+ * MVKSmallVectorAllocator.h

+ *

+ * Copyright (c) 2012-2020 Dr. Torsten Hans (hans@ipacs.de)

+ *

+ * Licensed under the Apache License, Version 2.0 (the "License");

+ * you may not use this file except in compliance with the License.

+ * You may obtain a copy of the License at

+ * 

+ *     http://www.apache.org/licenses/LICENSE-2.0

+ * 

+ * Unless required by applicable law or agreed to in writing, software

+ * distributed under the License is distributed on an "AS IS" BASIS,

+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * See the License for the specific language governing permissions and

+ * limitations under the License.

+ */

+

+#pragma once

+

+#include <new>

+#include <type_traits>

+

+

+namespace mvk_smallvector_memory_allocator

+{

+  inline char *alloc( const size_t num_bytes )

+  {

+    return new char[num_bytes];

+  }

+

+  inline void free( void *ptr )

+  {

+    delete[] (char*)ptr;

+  }

+};

+

+

+//////////////////////////////////////////////////////////////////////////////////////////

+//

+// mvk_smallvector_allocator -> malloc based MVKSmallVector allocator with preallocated storage

+//

+//////////////////////////////////////////////////////////////////////////////////////////

+template <typename T, int N>

+class mvk_smallvector_allocator final

+{

+public:

+	typedef T value_type;

+	T      *ptr;

+	size_t  num_elements_used;

+

+private:

+	static constexpr size_t STACK_SIZE = N * sizeof( T );

+	alignas( alignof( T ) ) unsigned char   elements_stack[ MAX( STACK_SIZE, sizeof(void*) ) ];

+

+  void set_num_elements_reserved( const size_t num_elements_reserved )

+  {

+    *reinterpret_cast<size_t*>( &elements_stack[0] ) = num_elements_reserved;

+  }

+

+public:

+  const T &operator[]( const size_t i ) const { return ptr[i]; }

+  T       &operator[]( const size_t i )       { return ptr[i]; }

+

+  size_t size() const { return num_elements_used; }

+

+  //

+  // faster element construction and destruction using type traits

+  //

+  template<class S, class... Args> typename std::enable_if< !std::is_trivially_constructible<S, Args...>::value >::type

+    construct( S *_ptr, Args&&... _args )

+  {

+    new ( _ptr ) S( std::forward<Args>( _args )... );

+  }

+

+  template<class S, class... Args> typename std::enable_if< std::is_trivially_constructible<S, Args...>::value >::type

+    construct( S *_ptr, Args&&... _args )

+  {

+    *_ptr = S( std::forward<Args>( _args )... );

+  }

+

+  template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type

+    destruct( S *_ptr )

+  {

+    _ptr->~S();

+  }

+

+  template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type

+    destruct( S *_ptr )

+  {

+  }

+

+  template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type

+    destruct_all()

+  {

+    for( size_t i = 0; i < num_elements_used; ++i )

+    {

+      ptr[i].~S();

+    }

+

+    num_elements_used = 0;

+  }

+

+  template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type

+    destruct_all()

+  {

+    num_elements_used = 0;

+  }

+

+  template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type

+    swap_stack( mvk_smallvector_allocator &a )

+  {

+    T stack_copy[N];

+

+    for( size_t i = 0; i < num_elements_used; ++i )

+    {

+      construct( &stack_copy[i], std::move( S::ptr[i] ) );

+      destruct( &ptr[i] );

+    }

+

+    for( size_t i = 0; i < a.num_elements_used; ++i )

+    {

+      construct( &ptr[i], std::move( a.ptr[i] ) );

+      destruct( &ptr[i] );

+    }

+

+    for( size_t i = 0; i < num_elements_used; ++i )

+    {

+      construct( &a.ptr[i], std::move( stack_copy[i] ) );

+      destruct( &stack_copy[i] );

+    }

+  }

+

+  template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type

+    swap_stack( mvk_smallvector_allocator &a )

+  {

+//    constexpr int STACK_SIZE = N * sizeof( T );

+    for( int i = 0; i < STACK_SIZE; ++i )

+    {

+      const auto v = elements_stack[i];

+      elements_stack[i] = a.elements_stack[i];

+      a.elements_stack[i] = v;

+    }

+  }

+

+public:

+  mvk_smallvector_allocator() : ptr(reinterpret_cast<T*>( &elements_stack[0] )), num_elements_used(0)

+  {

+  }

+

+  mvk_smallvector_allocator( mvk_smallvector_allocator &&a ) : mvk_smallvector_allocator(nullptr, a.num_elements_used)

+  {

+    // is a heap based -> steal ptr from a

+    if( !a.get_data_on_stack() )

+    {

+      ptr = a.ptr;

+      set_num_elements_reserved( a.get_capacity() );

+

+      a.ptr = a.get_default_ptr();

+    }

+    else

+    {

+      ptr = get_default_ptr();

+      for( size_t i = 0; i < a.num_elements_used; ++i )

+      {

+        construct( &ptr[i], std::move( a.ptr[i] ) );

+        destruct( &a.ptr[i] );

+      }

+    }

+

+    a.num_elements_used = 0;

+  }

+

+  ~mvk_smallvector_allocator()

+  {

+    deallocate();

+  }

+

+  size_t get_capacity() const

+  {

+    return get_data_on_stack() ? N : *reinterpret_cast<const size_t*>( &elements_stack[0] );

+  }

+

+  constexpr T *get_default_ptr() const

+  {

+    return reinterpret_cast< T* >( const_cast< unsigned char * >( &elements_stack[0] ) );

+  }

+

+  bool get_data_on_stack() const

+  {

+    return ptr == get_default_ptr();

+  }

+

+  void swap( mvk_smallvector_allocator &a )

+  {

+    // both allocators on heap -> easy case

+    if( !get_data_on_stack() && !a.get_data_on_stack() )

+    {

+      auto copy_ptr = ptr;

+      auto copy_num_elements_reserved = get_capacity();

+      ptr = a.ptr;

+      set_num_elements_reserved( a.get_capacity() );

+      a.ptr = copy_ptr;

+      a.set_num_elements_reserved( copy_num_elements_reserved );

+    }

+    // both allocators on stack -> just switch the stack contents

+    else if( get_data_on_stack() && a.get_data_on_stack() )

+    {

+      swap_stack<T>( a );

+    }

+    else if( get_data_on_stack() && !a.get_data_on_stack() )

+    {

+      auto copy_ptr = a.ptr;

+      auto copy_num_elements_reserved = a.get_capacity();

+

+      a.ptr = a.get_default_ptr();

+      for( size_t i = 0; i < num_elements_used; ++i )

+      {

+        construct( &a.ptr[i], std::move( ptr[i] ) );

+        destruct( &ptr[i] );

+      }

+

+      ptr = copy_ptr;

+      set_num_elements_reserved( copy_num_elements_reserved );

+    }

+    else if( !get_data_on_stack() && a.get_data_on_stack() )

+    {

+      auto copy_ptr = ptr;

+      auto copy_num_elements_reserved = get_capacity();

+

+      ptr = get_default_ptr();

+      for( size_t i = 0; i < a.num_elements_used; ++i )

+      {

+        construct( &ptr[i], std::move( a.ptr[i] ) );

+        destruct( &a.ptr[i] );

+      }

+

+      a.ptr = copy_ptr;

+      a.set_num_elements_reserved( copy_num_elements_reserved );

+    }

+

+    auto copy_num_elements_used = num_elements_used;

+    num_elements_used = a.num_elements_used;

+    a.num_elements_used = copy_num_elements_used;

+  }

+

+  //

+  // allocates rounded up to the defined alignment the number of bytes / if the system cannot allocate the specified amount of memory then a null block is returned

+  //

+  void allocate( const size_t num_elements_to_reserve )

+  {

+    deallocate();

+

+    // check if enough memory on stack space is left

+    if( num_elements_to_reserve <= N )

+    {

+      return;

+    }

+

+    ptr = reinterpret_cast< T* >( mvk_smallvector_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );

+    num_elements_used = 0;

+    set_num_elements_reserved( num_elements_to_reserve );

+  }

+

+  //template<class S> typename std::enable_if< !std::is_trivially_copyable<S>::value >::type

+  void _re_allocate( const size_t num_elements_to_reserve )

+  {

+    auto *new_ptr = reinterpret_cast< T* >( mvk_smallvector_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );

+

+    for( size_t i = 0; i < num_elements_used; ++i )

+    {

+      construct( &new_ptr[i], std::move( ptr[i] ) );

+      destruct( &ptr[i] );

+    }

+

+    if( ptr != get_default_ptr() )

+    {

+      mvk_smallvector_memory_allocator::free( ptr );

+    }

+

+    ptr = new_ptr;

+    set_num_elements_reserved( num_elements_to_reserve );

+  }

+

+  //template<class S> typename std::enable_if< std::is_trivially_copyable<S>::value >::type

+  //  _re_allocate( const size_t num_elements_to_reserve )

+  //{

+  //  const bool data_is_on_stack = get_data_on_stack();

+  //

+  //  auto *new_ptr = reinterpret_cast< S* >( mvk_smallvector_memory_allocator::tm_memrealloc( data_is_on_stack ? nullptr : ptr, num_elements_to_reserve * sizeof( S ) ) );

+  //  if( data_is_on_stack )

+  //  {

+  //    for( int i = 0; i < N; ++i )

+  //    {

+  //      new_ptr[i] = ptr[i];

+  //    }

+  //  }

+  //

+  //  ptr = new_ptr;

+  //  set_num_elements_reserved( num_elements_to_reserve );

+  //}

+

+  void re_allocate( const size_t num_elements_to_reserve )

+  {

+    //TM_ASSERT( num_elements_to_reserve > get_capacity() );

+

+    if( num_elements_to_reserve > N )

+    {

+      _re_allocate( num_elements_to_reserve );

+    }

+  }

+

+  void shrink_to_fit()

+  {

+    // nothing to do if data is on stack already

+    if( get_data_on_stack() )

+      return;

+

+    // move elements to stack space

+    if( num_elements_used <= N )

+    {

+      //const auto num_elements_reserved = get_capacity();

+

+      auto *stack_ptr = get_default_ptr();

+      for( size_t i = 0; i < num_elements_used; ++i )

+      {

+        construct( &stack_ptr[i], std::move( ptr[i] ) );

+        destruct( &ptr[i] );

+      }

+

+      mvk_smallvector_memory_allocator::free( ptr );

+

+      ptr = stack_ptr;

+    }

+    else

+    {

+      auto *new_ptr = reinterpret_cast< T* >( mvk_smallvector_memory_allocator::alloc( num_elements_used * sizeof( T ) ) );

+

+      for( size_t i = 0; i < num_elements_used; ++i )

+      {

+        construct( &new_ptr[i], std::move( ptr[i] ) );

+        destruct( &ptr[i] );

+      }

+

+      mvk_smallvector_memory_allocator::free( ptr );

+

+      ptr = new_ptr;

+      set_num_elements_reserved( num_elements_used );

+    }

+  }

+

+  void deallocate()

+  {

+    destruct_all<T>();

+

+    if( !get_data_on_stack() )

+    {

+      mvk_smallvector_memory_allocator::free( ptr );

+    }

+

+    ptr = get_default_ptr();

+    num_elements_used = 0;

+  }

+};

+

diff --git a/MoltenVK/MoltenVK/Utility/MVKVector.h b/MoltenVK/MoltenVK/Utility/MVKVector.h
index c11137a..60a4871 100755
--- a/MoltenVK/MoltenVK/Utility/MVKVector.h
+++ b/MoltenVK/MoltenVK/Utility/MVKVector.h
@@ -62,6 +62,7 @@
 // use MVKVector.

 //

 #include "MVKVectorAllocator.h"

+#include "MVKFoundation.h"

 #include <type_traits>

 #include <initializer_list>

 #include <utility>

@@ -106,6 +107,9 @@
   iterator begin() const { return iterator( 0,               *this ); }

   iterator end()   const { return iterator( alc_ptr->size(), *this ); }

 

+  const MVKArrayRef<Type> contents() const { return MVKArrayRef<Type>(data(), size()); }

+        MVKArrayRef<Type> contents()       { return MVKArrayRef<Type>(data(), size()); }

+

   virtual const Type &operator[]( const size_t i ) const                  = 0;

   virtual       Type &operator[]( const size_t i )                        = 0;

   virtual const Type &at( const size_t i ) const                          = 0;

@@ -171,6 +175,9 @@
   iterator begin() const { return iterator( 0,               *this ); }

   iterator end()   const { return iterator( alc_ptr->size(), *this ); }

 

+  const MVKArrayRef<Type*> contents() const { return MVKArrayRef<Type*>(data(), size()); }

+        MVKArrayRef<Type*> contents()       { return MVKArrayRef<Type*>(data(), size()); }

+

   virtual const Type * const  operator[]( const size_t i ) const             = 0;

   virtual       Type *       &operator[]( const size_t i )                   = 0;

   virtual const Type * const  at( const size_t i ) const                     = 0;

diff --git a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h
index edad56c..38623c0 100755
--- a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h
+++ b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h
@@ -242,7 +242,7 @@
   //size_t  num_elements_reserved; // uhh, num_elements_reserved is mapped onto the stack elements, let the fun begin

   alignas( alignof( T ) ) unsigned char   elements_stack[N * sizeof( T )];

 

-  static_assert( N * sizeof( T ) >= sizeof( size_t ), "Bummer, nasty optimization doesn't work" );

+  static_assert( N * sizeof( T ) >= sizeof( size_t ), "Initial static allocation must be at least 8 bytes. Increase the count of pre-allocated elements." );

 

   void set_num_elements_reserved( const size_t num_elements_reserved )

   {