Merge branch 'development' into linear_allocator
diff --git a/src/Tests.cpp b/src/Tests.cpp
index c408b0f..3942f33 100644
--- a/src/Tests.cpp
+++ b/src/Tests.cpp
@@ -1388,6 +1388,191 @@
 }

 #endif

 

+static void TestLinearAllocator()

+{

+    wprintf(L"Test linear allocator\n");

+

+    RandomNumberGenerator rand{645332};

+

+    VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };

+    sampleBufCreateInfo.size = 1024; // Whatever.

+    sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;

+

+    VmaAllocationCreateInfo sampleAllocCreateInfo = {};

+    sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;

+

+    VmaPoolCreateInfo poolCreateInfo = {};

+    VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);

+    assert(res == VK_SUCCESS);

+

+    poolCreateInfo.blockSize = 1024 * 300;

+    poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;

+    poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;

+

+    VmaPool pool = nullptr;

+    res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);

+    assert(res == VK_SUCCESS);

+

+    VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;

+

+    VmaAllocationCreateInfo allocCreateInfo = {};

+    allocCreateInfo.pool = pool;

+

+    constexpr size_t maxBufCount = 100;

+    std::vector<BufferInfo> bufInfo;

+

+    constexpr VkDeviceSize bufSizeMin = 16;

+    constexpr VkDeviceSize bufSizeMax = 1024;

+    VmaAllocationInfo allocInfo;

+    VkDeviceSize prevOffset = 0;

+

+    // Test one-time free.

+    for(size_t i = 0; i < 2; ++i)

+    {

+        // Allocate number of buffers of varying size that surely fit into this block.

+        VkDeviceSize bufSumSize = 0;

+        for(size_t i = 0; i < maxBufCount; ++i)

+        {

+            bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);

+            BufferInfo newBufInfo;

+            res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,

+                &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);

+            assert(res == VK_SUCCESS);

+            assert(i == 0 || allocInfo.offset > prevOffset);

+            bufInfo.push_back(newBufInfo);

+            prevOffset = allocInfo.offset;

+            bufSumSize += bufCreateInfo.size;

+        }

+

+        // Validate pool stats.

+        VmaPoolStats stats;

+        vmaGetPoolStats(g_hAllocator, pool, &stats);

+        assert(stats.size == poolCreateInfo.blockSize);

+        assert(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize);

+        assert(stats.allocationCount == bufInfo.size());

+

+        // Destroy the buffers in random order.

+        while(!bufInfo.empty())

+        {

+            const size_t indexToDestroy = rand.Generate() % bufInfo.size();

+            const BufferInfo& currBufInfo = bufInfo[indexToDestroy];

+            vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);

+            bufInfo.erase(bufInfo.begin() + indexToDestroy);

+        }

+    }

+

+    // Test stack.

+    {

+        // Allocate number of buffers of varying size that surely fit into this block.

+        for(size_t i = 0; i < maxBufCount; ++i)

+        {

+            bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);

+            BufferInfo newBufInfo;

+            res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,

+                &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);

+            assert(res == VK_SUCCESS);

+            assert(i == 0 || allocInfo.offset > prevOffset);

+            bufInfo.push_back(newBufInfo);

+            prevOffset = allocInfo.offset;

+        }

+

+        // Destroy few buffers from top of the stack.

+        for(size_t i = 0; i < maxBufCount / 5; ++i)

+        {

+            const BufferInfo& currBufInfo = bufInfo.back();

+            vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);

+            bufInfo.pop_back();

+        }

+

+        // Create some more

+        for(size_t i = 0; i < maxBufCount / 5; ++i)

+        {

+            bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);

+            BufferInfo newBufInfo;

+            res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,

+                &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);

+            assert(res == VK_SUCCESS);

+            assert(i == 0 || allocInfo.offset > prevOffset);

+            bufInfo.push_back(newBufInfo);

+            prevOffset = allocInfo.offset;

+        }

+

+        // Destroy the buffers in reverse order.

+        while(!bufInfo.empty())

+        {

+            const BufferInfo& currBufInfo = bufInfo.back();

+            vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);

+            bufInfo.pop_back();

+        }

+    }

+

+    // Test ring buffer.

+    {

+        // Allocate number of buffers that surely fit into this block.

+        bufCreateInfo.size = bufSizeMax;

+        for(size_t i = 0; i < maxBufCount; ++i)

+        {

+            BufferInfo newBufInfo;

+            res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,

+                &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);

+            assert(res == VK_SUCCESS);

+            assert(i == 0 || allocInfo.offset > prevOffset);

+            bufInfo.push_back(newBufInfo);

+            prevOffset = allocInfo.offset;

+        }

+

+        // Free and allocate new buffers so many times that we make sure we wrap-around at least once.

+        const size_t buffersPerIter = maxBufCount / 10 - 1;

+        const size_t iterCount = poolCreateInfo.blockSize / bufCreateInfo.size / buffersPerIter * 2;

+        for(size_t iter = 0; iter < iterCount; ++iter)

+        {

+            for(size_t bufPerIter = 0; bufPerIter < buffersPerIter; ++bufPerIter)

+            {

+                const BufferInfo& currBufInfo = bufInfo.front();

+                vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);

+                bufInfo.erase(bufInfo.begin());

+            }

+            for(size_t bufPerIter = 0; bufPerIter < buffersPerIter; ++bufPerIter)

+            {

+                BufferInfo newBufInfo;

+                res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,

+                    &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);

+                assert(res == VK_SUCCESS);

+                bufInfo.push_back(newBufInfo);

+            }

+        }

+        

+        // Allocate buffers until we reach out-of-memory.

+        uint32_t debugIndex = 0;

+        while(res == VK_SUCCESS)

+        {

+            BufferInfo newBufInfo;

+            res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,

+                &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);

+            if(res == VK_SUCCESS)

+            {

+                bufInfo.push_back(newBufInfo);

+            }

+            else

+            {

+                assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);

+            }

+            ++debugIndex;

+        }

+

+        // Destroy the buffers in random order.

+        while(!bufInfo.empty())

+        {

+            const size_t indexToDestroy = rand.Generate() % bufInfo.size();

+            const BufferInfo& currBufInfo = bufInfo[indexToDestroy];

+            vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);

+            bufInfo.erase(bufInfo.begin() + indexToDestroy);

+        }

+    }

+

+    vmaDestroyPool(g_hAllocator, pool);

+}

+

 static void TestPool_SameSize()

 {

     const VkDeviceSize BUF_SIZE = 1024 * 1024;

@@ -3112,6 +3297,8 @@
     wprintf(L"TESTING:\n");

 

     // TEMP tests

+TestLinearAllocator();

+return;

 

     // # Simple tests

 

@@ -3127,6 +3314,7 @@
 #endif

     TestMapping();

     TestMappingMultithreaded();

+    TestLinearAllocator();

     TestDefragmentationSimple();

     TestDefragmentationFull();

 

diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h
index 5b8fcc0..ce04a15 100644
--- a/src/vk_mem_alloc.h
+++ b/src/vk_mem_alloc.h
@@ -496,7 +496,7 @@
 -# Fill VmaPoolCreateInfo structure.

 -# Call vmaCreatePool() to obtain #VmaPool handle.

 -# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.

-   You don't need to specify any other parameters of this structure, like usage.

+   You don't need to specify any other parameters of this structure, like `usage`.

 

 Example:

 

@@ -833,7 +833,7 @@
 

 \section debugging_memory_usage_initialization Memory initialization

 

-If you experience a bug with incorrect data in your program and you suspect uninitialized memory to be used,

+If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,

 you can enable automatic memory initialization to verify this.

 To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.

 

@@ -852,7 +852,7 @@
 Memory initialization works only with memory types that are `HOST_VISIBLE`.

 It works also with dedicated allocations.

 It doesn't work with allocations created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,

-as these they cannot be mapped.

+as they cannot be mapped.

 

 \section debugging_memory_usage_margins Margins

 

@@ -1027,7 +1027,7 @@
 This is a more complex situation. Different solutions are possible,

 and the best one depends on specific GPU type, but you can use this simple approach for the start.

 Prefer to write to such resource sequentially (e.g. using `memcpy`).

-Don't perform random access or any reads from it, as it may be very slow.

+Don't perform random access or any reads from it on CPU, as it may be very slow.

 

 \subsection usage_patterns_readback Readback

 

@@ -1047,7 +1047,7 @@
 by detecting it in Vulkan.

 To do it, call `vkGetPhysicalDeviceProperties()`, inspect

 `VkPhysicalDeviceProperties::deviceType` and look for `VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU`.

-When you find it, you can assume that memory is unified and all memory types are equally fast

+When you find it, you can assume that memory is unified and all memory types are comparably fast

 to access from GPU, regardless of `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.

 

 You can then sum up sizes of all available memory heaps and treat them as useful for

@@ -1071,7 +1071,7 @@
 Some general recommendations:

 

 - On integrated graphics use (2) or (3) to avoid unnecesary time and memory overhead

-  related to using a second copy.

+  related to using a second copy and making transfer.

 - For small resources (e.g. constant buffers) use (2).

   Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable.

   Even if the resource ends up in system memory, its data may be cached on GPU after first

@@ -1706,6 +1706,9 @@
     freed together with the allocation. It is also used in vmaBuildStatsString().

     */

     VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,

+    /** TODO

+    */

+    VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,

 

     VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF

 } VmaAllocationCreateFlagBits;

@@ -1816,7 +1819,7 @@
 typedef enum VmaPoolCreateFlagBits {

     /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.

 

-    This is na optional optimization flag.

+    This is an optional optimization flag.

 

     If you always allocate using vmaCreateBuffer(), vmaCreateImage(),

     vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator

@@ -1833,6 +1836,12 @@
     */

     VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,

 

+    /** \brief TODO

+

+    TODO

+    */

+    VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,

+

     VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF

 } VmaPoolCreateFlagBits;

 typedef VkFlags VmaPoolCreateFlags;

@@ -4366,32 +4375,101 @@
 */

 class VmaBlockMetadata

 {

-    VMA_CLASS_NO_COPY(VmaBlockMetadata)

 public:

-    VmaBlockMetadata(VmaAllocator hAllocator);

-    ~VmaBlockMetadata();

-    void Init(VkDeviceSize size);

+    VmaBlockMetadata() : m_Size(0) { }

+    virtual ~VmaBlockMetadata() { }

+    virtual void Init(VkDeviceSize size) { m_Size = size; }

 

     // Validates all data structures inside this object. If not valid, returns false.

-    bool Validate() const;

+    virtual bool Validate() const = 0;

     VkDeviceSize GetSize() const { return m_Size; }

-    size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }

-    VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }

-    VkDeviceSize GetUnusedRangeSizeMax() const;

+    virtual size_t GetAllocationCount() const = 0;

+    virtual VkDeviceSize GetSumFreeSize() const = 0;

+    virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;

     // Returns true if this block is empty - contains only single free suballocation.

-    bool IsEmpty() const;

+    virtual bool IsEmpty() const = 0;

 

-    void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;

-    void AddPoolStats(VmaPoolStats& inoutStats) const;

+    virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;

+    virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;

 

 #if VMA_STATS_STRING_ENABLED

-    void PrintDetailedMap(class VmaJsonWriter& json) const;

+    virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;

 #endif

 

     // Tries to find a place for suballocation with given parameters inside this block.

     // If succeeded, fills pAllocationRequest and returns true.

     // If failed, returns false.

-    bool CreateAllocationRequest(

+    virtual bool CreateAllocationRequest(

+        uint32_t currentFrameIndex,

+        uint32_t frameInUseCount,

+        VkDeviceSize bufferImageGranularity,

+        VkDeviceSize allocSize,

+        VkDeviceSize allocAlignment,

+        VmaSuballocationType allocType,

+        bool canMakeOtherLost,

+        VmaAllocationRequest* pAllocationRequest) = 0;

+

+    virtual bool MakeRequestedAllocationsLost(

+        uint32_t currentFrameIndex,

+        uint32_t frameInUseCount,

+        VmaAllocationRequest* pAllocationRequest) = 0;

+

+    virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;

+

+    virtual VkResult CheckCorruption(const void* pBlockData) = 0;

+

+    // Makes actual allocation based on request. Request must already be checked and valid.

+    virtual void Alloc(

+        const VmaAllocationRequest& request,

+        VmaSuballocationType type,

+        VkDeviceSize allocSize,

+        VmaAllocation hAllocation) = 0;

+

+    // Frees suballocation assigned to given memory region.

+    virtual void Free(const VmaAllocation allocation) = 0;

+    virtual void FreeAtOffset(VkDeviceSize offset) = 0;

+

+protected:

+#if VMA_STATS_STRING_ENABLED

+    void PrintDetailedMap_Begin(class VmaJsonWriter& json,

+        VkDeviceSize unusedBytes,

+        size_t allocationCount,

+        size_t unusedRangeCount) const;

+    void PrintDetailedMap_Allocation(class VmaJsonWriter& json,

+        VkDeviceSize offset,

+        VmaAllocation hAllocation) const;

+    void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,

+        VkDeviceSize offset,

+        VkDeviceSize size) const;

+    void PrintDetailedMap_End(class VmaJsonWriter& json) const;

+#endif

+

+private:

+    VkDeviceSize m_Size;

+};

+

+class VmaBlockMetadata_Generic : public VmaBlockMetadata

+{

+    VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)

+public:

+    VmaBlockMetadata_Generic(VmaAllocator hAllocator);

+    virtual ~VmaBlockMetadata_Generic();

+    virtual void Init(VkDeviceSize size);

+

+    virtual bool Validate() const;

+    virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }

+    virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }

+    virtual VkDeviceSize GetUnusedRangeSizeMax() const;

+    virtual bool IsEmpty() const;

+

+    virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;

+    virtual void AddPoolStats(VmaPoolStats& inoutStats) const;

+

+#if VMA_STATS_STRING_ENABLED

+    virtual void PrintDetailedMap(class VmaJsonWriter& json) const;

+#endif

+

+    virtual bool CreateAllocationRequest(

         uint32_t currentFrameIndex,

         uint32_t frameInUseCount,

         VkDeviceSize bufferImageGranularity,

@@ -4401,28 +4479,25 @@
         bool canMakeOtherLost,

         VmaAllocationRequest* pAllocationRequest);

 

-    bool MakeRequestedAllocationsLost(

+    virtual bool MakeRequestedAllocationsLost(

         uint32_t currentFrameIndex,

         uint32_t frameInUseCount,

         VmaAllocationRequest* pAllocationRequest);

 

-    uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);

+    virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);

 

-    VkResult CheckCorruption(const void* pBlockData);

+    virtual VkResult CheckCorruption(const void* pBlockData);

 

-    // Makes actual allocation based on request. Request must already be checked and valid.

-    void Alloc(

+    virtual void Alloc(

         const VmaAllocationRequest& request,

         VmaSuballocationType type,

         VkDeviceSize allocSize,

         VmaAllocation hAllocation);

 

-    // Frees suballocation assigned to given memory region.

-    void Free(const VmaAllocation allocation);

-    void FreeAtOffset(VkDeviceSize offset);

+    virtual void Free(const VmaAllocation allocation);

+    virtual void FreeAtOffset(VkDeviceSize offset);

 

 private:

-    VkDeviceSize m_Size;

     uint32_t m_FreeCount;

     VkDeviceSize m_SumFreeSize;

     VmaSuballocationList m_Suballocations;

@@ -4461,6 +4536,95 @@
     void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);

 };

 

+class VmaBlockMetadata_Linear : public VmaBlockMetadata

+{

+public:

+    VmaBlockMetadata_Linear(VmaAllocator hAllocator);

+    virtual ~VmaBlockMetadata_Linear();

+    virtual void Init(VkDeviceSize size);

+

+    virtual bool Validate() const;

+    virtual size_t GetAllocationCount() const;

+    virtual VkDeviceSize GetSumFreeSize() const;

+    virtual VkDeviceSize GetUnusedRangeSizeMax() const;

+    virtual bool IsEmpty() const { return GetAllocationCount() == 0; }

+

+    virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;

+    virtual void AddPoolStats(VmaPoolStats& inoutStats) const;

+

+#if VMA_STATS_STRING_ENABLED

+    virtual void PrintDetailedMap(class VmaJsonWriter& json) const;

+#endif

+

+    virtual bool CreateAllocationRequest(

+        uint32_t currentFrameIndex,

+        uint32_t frameInUseCount,

+        VkDeviceSize bufferImageGranularity,

+        VkDeviceSize allocSize,

+        VkDeviceSize allocAlignment,

+        VmaSuballocationType allocType,

+        bool canMakeOtherLost,

+        VmaAllocationRequest* pAllocationRequest);

+

+    virtual bool MakeRequestedAllocationsLost(

+        uint32_t currentFrameIndex,

+        uint32_t frameInUseCount,

+        VmaAllocationRequest* pAllocationRequest);

+

+    virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);

+

+    virtual VkResult CheckCorruption(const void* pBlockData);

+

+    virtual void Alloc(

+        const VmaAllocationRequest& request,

+        VmaSuballocationType type,

+        VkDeviceSize allocSize,

+        VmaAllocation hAllocation);

+

+    virtual void Free(const VmaAllocation allocation);

+    virtual void FreeAtOffset(VkDeviceSize offset);

+

+private:

+    /*

+    There are two suballocation vectors, used in ping-pong way.

+    The one with index m_1stVectorIndex is called 1st.

+    The one with index (m_1stVectorIndex ^ 1) is called 2nd.

+    2nd can be non-empty only when 1st is not empty.

+    When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.

+    */

+    typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;

+

+    enum SECOND_VECTOR_MODE

+    {

+        SECOND_VECTOR_EMPTY,

+        /*

+        Suballocations in 2nd vector are created later than the ones in 1st, but they

+        all have smaller offset.

+        */

+        SECOND_VECTOR_RING_BUFFER,

+        SECOND_VECTOR_DOUBLE_STACK,

+    };

+

+    SuballocationVectorType m_Suballocations0, m_Suballocations1;

+    uint32_t m_1stVectorIndex;

+    SECOND_VECTOR_MODE m_2ndVectorMode;

+

+    SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }

+    SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }

+    const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }

+    const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }

+    

+    // Number of items in 1st vector with hAllocation = null at the beginning.

+    size_t m_1stNullItemsBeginCount;

+    // Number of other items in 1st vector with hAllocation = null somewhere in the middle.

+    size_t m_1stNullItemsMiddleCount;

+    // Number of items in 2nd vector with hAllocation = null.

+    size_t m_2ndNullItemsCount;

+

+    bool ShouldCompact1st() const;

+    void CleanupAfterFree();

+};

+

 /*

 Represents a single block of device memory (`VkDeviceMemory`) with all the

 data about its regions (aka suballocations, #VmaAllocation), assigned and free.

@@ -4471,7 +4635,7 @@
 {

     VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)

 public:

-    VmaBlockMetadata m_Metadata;

+    VmaBlockMetadata* m_pMetadata;

 

     VmaDeviceMemoryBlock(VmaAllocator hAllocator);

 

@@ -4483,10 +4647,12 @@
 

     // Always call after construction.

     void Init(

+        VmaAllocator hAllocator,

         uint32_t newMemoryTypeIndex,

         VkDeviceMemory newMemory,

         VkDeviceSize newSize,

-        uint32_t id);

+        uint32_t id,

+        bool linearAlgorithm);

     // Always call before destruction.

     void Destroy(VmaAllocator allocator);

     

@@ -4556,7 +4722,8 @@
         size_t maxBlockCount,

         VkDeviceSize bufferImageGranularity,

         uint32_t frameInUseCount,

-        bool isCustomPool);

+        bool isCustomPool,

+        bool linearAlgorithm);

     ~VmaBlockVector();

 

     VkResult CreateMinBlocks();

@@ -4565,6 +4732,7 @@
     VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }

     VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }

     uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }

+    bool UsesLinearAlgorithm() const { return m_LinearAlgorithm; }

 

     void GetPoolStats(VmaPoolStats* pStats);

 

@@ -4617,13 +4785,14 @@
     const VkDeviceSize m_BufferImageGranularity;

     const uint32_t m_FrameInUseCount;

     const bool m_IsCustomPool;

+    const bool m_LinearAlgorithm;

+    bool m_HasEmptyBlock;

     VMA_MUTEX m_Mutex;

     // Incrementally sorted by sumFreeSize, ascending.

     VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;

     /* There can be at most one allocation that is completely empty - a

     hysteresis to avoid pessimistic case of alternating creation and destruction

     of a VkDeviceMemory. */

-    bool m_HasEmptyBlock;

     VmaDefragmentator* m_pDefragmentator;

     uint32_t m_NextBlockId;

 

@@ -4711,7 +4880,7 @@
 

         void CalcHasNonMovableAllocations()

         {

-            const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();

+            const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();

             const size_t defragmentAllocCount = m_Allocations.size();

             m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;

         }

@@ -4755,7 +4924,7 @@
             {

                 return false;

             }

-            if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())

+            if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())

             {

                 return true;

             }

@@ -5863,11 +6032,79 @@
     }

 };

 

+

 ////////////////////////////////////////////////////////////////////////////////

 // class VmaBlockMetadata

 

-VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :

-    m_Size(0),

+#if VMA_STATS_STRING_ENABLED

+

+void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,

+    VkDeviceSize unusedBytes,

+    size_t allocationCount,

+    size_t unusedRangeCount) const

+{

+    json.BeginObject();

+

+    json.WriteString("TotalBytes");

+    json.WriteNumber(GetSize());

+

+    json.WriteString("UnusedBytes");

+    json.WriteNumber(unusedBytes);

+

+    json.WriteString("Allocations");

+    json.WriteNumber((uint64_t)allocationCount);

+

+    json.WriteString("UnusedRanges");

+    json.WriteNumber((uint64_t)unusedRangeCount);

+

+    json.WriteString("Suballocations");

+    json.BeginArray();

+}

+

+void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,

+    VkDeviceSize offset,

+    VmaAllocation hAllocation) const

+{

+    json.BeginObject(true);

+        

+    json.WriteString("Offset");

+    json.WriteNumber(offset);

+

+    hAllocation->PrintParameters(json);

+

+    json.EndObject();

+}

+

+void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,

+    VkDeviceSize offset,

+    VkDeviceSize size) const

+{

+    json.BeginObject(true);

+        

+    json.WriteString("Offset");

+    json.WriteNumber(offset);

+

+    json.WriteString("Type");

+    json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);

+

+    json.WriteString("Size");

+    json.WriteNumber(size);

+

+    json.EndObject();

+}

+

+void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const

+{

+    json.EndArray();

+    json.EndObject();

+}

+

+#endif // #if VMA_STATS_STRING_ENABLED

+

+////////////////////////////////////////////////////////////////////////////////

+// class VmaBlockMetadata_Generic

+

+VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :

     m_FreeCount(0),

     m_SumFreeSize(0),

     m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),

@@ -5875,13 +6112,13 @@
 {

 }

 

-VmaBlockMetadata::~VmaBlockMetadata()

+VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()

 {

 }

 

-void VmaBlockMetadata::Init(VkDeviceSize size)

+void VmaBlockMetadata_Generic::Init(VkDeviceSize size)

 {

-    m_Size = size;

+    VmaBlockMetadata::Init(size);

     m_FreeCount = 1;

     m_SumFreeSize = size;

 

@@ -5897,14 +6134,14 @@
     m_FreeSuballocationsBySize.push_back(suballocItem);

 }

 

-bool VmaBlockMetadata::Validate() const

+bool VmaBlockMetadata_Generic::Validate() const

 {

     if(m_Suballocations.empty())

     {

         return false;

     }

     

-    // Expected offset of new suballocation as calculates from previous ones.

+    // Expected offset of new suballocation as calculated from previous ones.

     VkDeviceSize calculatedOffset = 0;

     // Expected number of free suballocations as calculated from traversing their list.

     uint32_t calculatedFreeCount = 0;

@@ -6005,7 +6242,7 @@
 

     // Check if totals match calculacted values.

     if(!ValidateFreeSuballocationList() ||

-        (calculatedOffset != m_Size) ||

+        (calculatedOffset != GetSize()) ||

         (calculatedSumFreeSize != m_SumFreeSize) ||

         (calculatedFreeCount != m_FreeCount))

     {

@@ -6015,7 +6252,7 @@
     return true;

 }

 

-VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const

+VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const

 {

     if(!m_FreeSuballocationsBySize.empty())

     {

@@ -6027,12 +6264,12 @@
     }

 }

 

-bool VmaBlockMetadata::IsEmpty() const

+bool VmaBlockMetadata_Generic::IsEmpty() const

 {

     return (m_Suballocations.size() == 1) && (m_FreeCount == 1);

 }

 

-void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const

+void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const

 {

     outInfo.blockCount = 1;

 

@@ -6041,7 +6278,7 @@
     outInfo.unusedRangeCount = m_FreeCount;

     

     outInfo.unusedBytes = m_SumFreeSize;

-    outInfo.usedBytes = m_Size - outInfo.unusedBytes;

+    outInfo.usedBytes = GetSize() - outInfo.unusedBytes;

 

     outInfo.allocationSizeMin = UINT64_MAX;

     outInfo.allocationSizeMax = 0;

@@ -6066,11 +6303,11 @@
     }

 }

 

-void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const

+void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const

 {

     const uint32_t rangeCount = (uint32_t)m_Suballocations.size();

 

-    inoutStats.size += m_Size;

+    inoutStats.size += GetSize();

     inoutStats.unusedSize += m_SumFreeSize;

     inoutStats.allocationCount += rangeCount - m_FreeCount;

     inoutStats.unusedRangeCount += m_FreeCount;

@@ -6079,52 +6316,29 @@
 

 #if VMA_STATS_STRING_ENABLED

 

-void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const

+void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const

 {

-    json.BeginObject();

+    PrintDetailedMap_Begin(json,

+        m_SumFreeSize, // unusedBytes

+        m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount

+        m_FreeCount); // unusedRangeCount

 

-    json.WriteString("TotalBytes");

-    json.WriteNumber(m_Size);

-

-    json.WriteString("UnusedBytes");

-    json.WriteNumber(m_SumFreeSize);

-

-    json.WriteString("Allocations");

-    json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);

-

-    json.WriteString("UnusedRanges");

-    json.WriteNumber(m_FreeCount);

-

-    json.WriteString("Suballocations");

-    json.BeginArray();

     size_t i = 0;

     for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();

         suballocItem != m_Suballocations.cend();

         ++suballocItem, ++i)

     {

-        json.BeginObject(true);

-        

-        json.WriteString("Offset");

-        json.WriteNumber(suballocItem->offset);

-

         if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)

         {

-            json.WriteString("Type");

-            json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);

-

-            json.WriteString("Size");

-            json.WriteNumber(suballocItem->size);

+            PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);

         }

         else

         {

-            suballocItem->hAllocation->PrintParameters(json);

+            PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);

         }

-

-        json.EndObject();

     }

-    json.EndArray();

 

-    json.EndObject();

+    PrintDetailedMap_End(json);

 }

 

 #endif // #if VMA_STATS_STRING_ENABLED

@@ -6139,7 +6353,7 @@
 */

 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;

 

-bool VmaBlockMetadata::CreateAllocationRequest(

+bool VmaBlockMetadata_Generic::CreateAllocationRequest(

     uint32_t currentFrameIndex,

     uint32_t frameInUseCount,

     VkDeviceSize bufferImageGranularity,

@@ -6268,7 +6482,7 @@
     return false;

 }

 

-bool VmaBlockMetadata::MakeRequestedAllocationsLost(

+bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(

     uint32_t currentFrameIndex,

     uint32_t frameInUseCount,

     VmaAllocationRequest* pAllocationRequest)

@@ -6300,7 +6514,7 @@
     return true;

 }

 

-uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)

+uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)

 {

     uint32_t lostAllocationCount = 0;

     for(VmaSuballocationList::iterator it = m_Suballocations.begin();

@@ -6318,7 +6532,7 @@
     return lostAllocationCount;

 }

 

-VkResult VmaBlockMetadata::CheckCorruption(const void* pBlockData)

+VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)

 {

     for(VmaSuballocationList::iterator it = m_Suballocations.begin();

         it != m_Suballocations.end();

@@ -6342,7 +6556,7 @@
     return VK_SUCCESS;

 }

 

-void VmaBlockMetadata::Alloc(

+void VmaBlockMetadata_Generic::Alloc(

     const VmaAllocationRequest& request,

     VmaSuballocationType type,

     VkDeviceSize allocSize,

@@ -6406,7 +6620,7 @@
     m_SumFreeSize -= allocSize;

 }

 

-void VmaBlockMetadata::Free(const VmaAllocation allocation)

+void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)

 {

     for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();

         suballocItem != m_Suballocations.end();

@@ -6423,7 +6637,7 @@
     VMA_ASSERT(0 && "Not found!");

 }

 

-void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)

+void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)

 {

     for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();

         suballocItem != m_Suballocations.end();

@@ -6439,7 +6653,7 @@
     VMA_ASSERT(0 && "Not found!");

 }

 

-bool VmaBlockMetadata::ValidateFreeSuballocationList() const

+bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const

 {

     VkDeviceSize lastSize = 0;

     for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)

@@ -6467,7 +6681,7 @@
     return true;

 }

 

-bool VmaBlockMetadata::CheckAllocation(

+bool VmaBlockMetadata_Generic::CheckAllocation(

     uint32_t currentFrameIndex,

     uint32_t frameInUseCount,

     VkDeviceSize bufferImageGranularity,

@@ -6511,7 +6725,7 @@
         }

 

         // Remaining size is too small for this request: Early return.

-        if(m_Size - suballocItem->offset < allocSize)

+        if(GetSize() - suballocItem->offset < allocSize)

         {

             return false;

         }

@@ -6571,7 +6785,7 @@
 

         const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;

         // Another early return check.

-        if(suballocItem->offset + totalSize > m_Size)

+        if(suballocItem->offset + totalSize > GetSize())

         {

             return false;

         }

@@ -6741,7 +6955,7 @@
     return true;

 }

 

-void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)

+void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)

 {

     VMA_ASSERT(item != m_Suballocations.end());

     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);

@@ -6756,7 +6970,7 @@
     m_Suballocations.erase(nextItem);

 }

 

-VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)

+VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)

 {

     // Change this suballocation to be marked as free.

     VmaSuballocation& suballoc = *suballocItem;

@@ -6808,7 +7022,7 @@
     }

 }

 

-void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)

+void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)

 {

     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);

     VMA_ASSERT(item->size > 0);

@@ -6833,7 +7047,7 @@
 }

 

 

-void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)

+void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)

 {

     VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);

     VMA_ASSERT(item->size > 0);

@@ -6867,10 +7081,1098 @@
 }

 

 ////////////////////////////////////////////////////////////////////////////////

+// class VmaBlockMetadata_Linear

+

+VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :

+    m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),

+    m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),

+    m_1stVectorIndex(0),

+    m_2ndVectorMode(SECOND_VECTOR_EMPTY),

+    m_1stNullItemsBeginCount(0),

+    m_1stNullItemsMiddleCount(0),

+    m_2ndNullItemsCount(0)

+{

+}

+

+VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()

+{

+}

+

+void VmaBlockMetadata_Linear::Init(VkDeviceSize size)

+{

+    VmaBlockMetadata::Init(size);

+}

+

+bool VmaBlockMetadata_Linear::Validate() const

+{

+    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();

+    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();

+

+    if(suballocations2nd.empty() != (m_2ndVectorMode == SECOND_VECTOR_EMPTY))

+    {

+        return false;

+    }

+    if(suballocations1st.empty() && !suballocations2nd.empty())

+    {

+        return false;

+    }

+    if(!suballocations1st.empty())

+    {

+        // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.

+        if(suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)

+        {

+            return false;

+        }

+        // Null item at the end should be just pop_back().

+        if(suballocations1st.back().hAllocation == VK_NULL_HANDLE)

+        {

+            return false;

+        }

+    }

+    if(!suballocations2nd.empty())

+    {

+        // Null item at the end should be just pop_back().

+        if(suballocations2nd.back().hAllocation == VK_NULL_HANDLE)

+        {

+            return false;

+        }

+    }

+

+    const size_t suballoc1stCount = suballocations1st.size();

+    VkDeviceSize offset = VMA_DEBUG_MARGIN;

+

+    if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)

+    {

+        const size_t suballoc2ndCount = suballocations2nd.size();

+        size_t nullItem2ndCount = 0;

+        for(size_t i = 0; i < suballoc2ndCount; ++i)

+        {

+            const VmaSuballocation& suballoc = suballocations2nd[i];

+            const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);

+

+            if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))

+            {

+                return false;

+            }

+            if(suballoc.offset < offset)

+            {

+                return false;

+            }

+

+            if(!currFree)

+            {

+                if(suballoc.hAllocation->GetOffset() != suballoc.offset)

+                {

+                    return false;

+                }

+                if(suballoc.hAllocation->GetSize() != suballoc.size)

+                {

+                    return false;

+                }

+            }

+

+            if(currFree)

+            {

+                ++nullItem2ndCount;

+            }

+

+            offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;

+        }

+

+        if(nullItem2ndCount != m_2ndNullItemsCount)

+        {

+            return false;

+        }

+    }

+

+    for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)

+    {

+        const VmaSuballocation& suballoc = suballocations1st[i];

+        if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE ||

+            suballoc.hAllocation != VK_NULL_HANDLE)

+        {

+            return false;

+        }

+    }

+

+    size_t nullItem1stCount = m_1stNullItemsBeginCount;

+

+    for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)

+    {

+        const VmaSuballocation& suballoc = suballocations1st[i];

+        const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);

+

+        if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))

+        {

+            return false;

+        }

+        if(suballoc.offset < offset)

+        {

+            return false;

+        }

+        if(i < m_1stNullItemsBeginCount && !currFree)

+        {

+            return false;

+        }

+

+        if(!currFree)

+        {

+            if(suballoc.hAllocation->GetOffset() != suballoc.offset)

+            {

+                return false;

+            }

+            if(suballoc.hAllocation->GetSize() != suballoc.size)

+            {

+                return false;

+            }

+        }

+

+        if(currFree)

+        {

+            ++nullItem1stCount;

+        }

+

+        offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;

+    }

+

+    if(offset > GetSize())

+    {

+        return false;

+    }

+    if(nullItem1stCount != m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount)

+    {

+        return false;

+    }

+

+    return true;

+}

+

+size_t VmaBlockMetadata_Linear::GetAllocationCount() const

+{

+    return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +

+        AccessSuballocations2nd().size() - m_2ndNullItemsCount;

+}

+

+VkDeviceSize VmaBlockMetadata_Linear::GetSumFreeSize() const

+{

+    // TODO

+    return GetSize();

+}

+

+VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const

+{

+    // TODO

+    return GetSize();

+}

+

+void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const

+{

+    const VkDeviceSize size = GetSize();

+    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();

+    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();

+    const size_t suballoc1stCount = suballocations1st.size();

+    const size_t suballoc2ndCount = suballocations2nd.size();

+

+    outInfo.blockCount = 1;

+    outInfo.allocationCount = (uint32_t)GetAllocationCount();

+    outInfo.unusedRangeCount = 0;

+    outInfo.usedBytes = 0;

+    outInfo.allocationSizeMin = UINT64_MAX;

+    outInfo.allocationSizeMax = 0;

+    outInfo.unusedRangeSizeMin = UINT64_MAX;

+    outInfo.unusedRangeSizeMax = 0;

+

+    VkDeviceSize lastOffset = 0;

+

+    if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)

+    {

+        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;

+        size_t nextAlloc2ndIndex = 0;

+        while(lastOffset < freeSpace2ndTo1stEnd)

+        {

+            // Find next non-null allocation or move nextAllocIndex to the end.

+            while(nextAlloc2ndIndex < suballoc2ndCount &&

+                suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)

+            {

+                ++nextAlloc2ndIndex;

+            }

+

+            // Found non-null allocation.

+            if(nextAlloc2ndIndex < suballoc2ndCount)

+            {

+                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];

+            

+                // 1. Process free space before this allocation.

+                if(lastOffset < suballoc.offset)

+                {

+                    // There is free space from lastOffset to suballoc.offset.

+                    const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;

+                    ++outInfo.unusedRangeCount;

+                    outInfo.unusedBytes += unusedRangeSize;

+                    outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);

+                    outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);

+                }

+            

+                // 2. Process this allocation.

+                // There is allocation with suballoc.offset, suballoc.size.

+                outInfo.usedBytes += suballoc.size;

+                outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);

+                outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);

+            

+                // 3. Prepare for next iteration.

+                lastOffset = suballoc.offset + suballoc.size;

+                ++nextAlloc2ndIndex;

+            }

+            // We are at the end.

+            else

+            {

+                // There is free space from lastOffset to freeSpace2ndTo1stEnd.

+                if(lastOffset < freeSpace2ndTo1stEnd)

+                {

+                    const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;

+                    ++outInfo.unusedRangeCount;

+                    outInfo.unusedBytes += unusedRangeSize;

+                    outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);

+                    outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);

+               }

+

+                // End of loop.

+                lastOffset = freeSpace2ndTo1stEnd;

+            }

+        }

+    }

+

+    size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;

+    while(lastOffset < size)

+    {

+        // Find next non-null allocation or move nextAllocIndex to the end.

+        while(nextAlloc1stIndex < suballoc1stCount &&

+            suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)

+        {

+            ++nextAlloc1stIndex;

+        }

+

+        // Found non-null allocation.

+        if(nextAlloc1stIndex < suballoc1stCount)

+        {

+            const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];

+            

+            // 1. Process free space before this allocation.

+            if(lastOffset < suballoc.offset)

+            {

+                // There is free space from lastOffset to suballoc.offset.

+                const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;

+                ++outInfo.unusedRangeCount;

+                outInfo.unusedBytes += unusedRangeSize;

+                outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);

+                outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);

+            }

+            

+            // 2. Process this allocation.

+            // There is allocation with suballoc.offset, suballoc.size.

+            outInfo.usedBytes += suballoc.size;

+            outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);

+            outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);

+            

+            // 3. Prepare for next iteration.

+            lastOffset = suballoc.offset + suballoc.size;

+            ++nextAlloc1stIndex;

+        }

+        // We are at the end.

+        else

+        {

+            // There is free space from lastOffset to size.

+            if(lastOffset < size)

+            {

+                const VkDeviceSize unusedRangeSize = size - lastOffset;

+                ++outInfo.unusedRangeCount;

+                outInfo.unusedBytes += unusedRangeSize;

+                outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);

+                outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);

+           }

+

+            // End of loop.

+            lastOffset = size;

+        }

+    }

+

+    outInfo.unusedBytes = size - outInfo.usedBytes;

+}

+

+void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const

+{

+    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();

+    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();

+    const VkDeviceSize size = GetSize();

+    const size_t suballoc1stCount = suballocations1st.size();

+    const size_t suballoc2ndCount = suballocations2nd.size();

+

+    inoutStats.size += size;

+

+    VkDeviceSize lastOffset = 0;

+

+    if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)

+    {

+        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;

+        size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;

+        while(lastOffset < freeSpace2ndTo1stEnd)

+        {

+            // Find next non-null allocation or move nextAlloc2ndIndex to the end.

+            while(nextAlloc2ndIndex < suballoc2ndCount &&

+                suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)

+            {

+                ++nextAlloc2ndIndex;

+            }

+

+            // Found non-null allocation.

+            if(nextAlloc2ndIndex < suballoc2ndCount)

+            {

+                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];

+            

+                // 1. Process free space before this allocation.

+                if(lastOffset < suballoc.offset)

+                {

+                    // There is free space from lastOffset to suballoc.offset.

+                    const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;

+                    inoutStats.unusedSize += unusedRangeSize;

+                    ++inoutStats.unusedRangeCount;

+                    inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);

+                }

+            

+                // 2. Process this allocation.

+                // There is allocation with suballoc.offset, suballoc.size.

+                ++inoutStats.allocationCount;

+            

+                // 3. Prepare for next iteration.

+                lastOffset = suballoc.offset + suballoc.size;

+                ++nextAlloc2ndIndex;

+            }

+            // We are at the end.

+            else

+            {

+                if(lastOffset < freeSpace2ndTo1stEnd)

+                {

+                    // There is free space from lastOffset to freeSpace2ndTo1stEnd.

+                    const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;

+                    inoutStats.unusedSize += unusedRangeSize;

+                    ++inoutStats.unusedRangeCount;

+                    inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);

+                }

+

+                // End of loop.

+                lastOffset = freeSpace2ndTo1stEnd;

+            }

+        }

+    }

+

+    size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;

+    while(lastOffset < size)

+    {

+        // Find next non-null allocation or move nextAllocIndex to the end.

+        while(nextAlloc1stIndex < suballoc1stCount &&

+            suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)

+        {

+            ++nextAlloc1stIndex;

+        }

+

+        // Found non-null allocation.

+        if(nextAlloc1stIndex < suballoc1stCount)

+        {

+            const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];

+            

+            // 1. Process free space before this allocation.

+            if(lastOffset < suballoc.offset)

+            {

+                // There is free space from lastOffset to suballoc.offset.

+                const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;

+                inoutStats.unusedSize += unusedRangeSize;

+                ++inoutStats.unusedRangeCount;

+                inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);

+            }

+            

+            // 2. Process this allocation.

+            // There is allocation with suballoc.offset, suballoc.size.

+            ++inoutStats.allocationCount;

+            

+            // 3. Prepare for next iteration.

+            lastOffset = suballoc.offset + suballoc.size;

+            ++nextAlloc1stIndex;

+        }

+        // We are at the end.

+        else

+        {

+            if(lastOffset < size)

+            {

+                // There is free space from lastOffset to size.

+                const VkDeviceSize unusedRangeSize = size - lastOffset;

+                inoutStats.unusedSize += unusedRangeSize;

+                ++inoutStats.unusedRangeCount;

+                inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);

+            }

+

+            // End of loop.

+            lastOffset = size;

+        }

+    }

+}

+

+#if VMA_STATS_STRING_ENABLED

+void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const

+{

+    // TODO include 2nd vector

+

+    const VkDeviceSize size = GetSize();

+    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();

+    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();

+    const size_t suballoc1stCount = suballocations1st.size();

+    const size_t suballoc2ndCount = suballocations2nd.size();

+

+    // FIRST PASS

+

+    size_t unusedRangeCount = 0;

+    VkDeviceSize usedBytes = 0;

+

+    VkDeviceSize lastOffset = 0;

+

+    size_t alloc2ndCount = 0;

+    if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)

+    {

+        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;

+        size_t nextAlloc2ndIndex = 0;

+        while(lastOffset < freeSpace2ndTo1stEnd)

+        {

+            // Find next non-null allocation or move nextAlloc2ndIndex to the end.

+            while(nextAlloc2ndIndex < suballoc2ndCount &&

+                suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)

+            {

+                ++nextAlloc2ndIndex;

+            }

+

+            // Found non-null allocation.

+            if(nextAlloc2ndIndex < suballoc2ndCount)

+            {

+                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];

+            

+                // 1. Process free space before this allocation.

+                if(lastOffset < suballoc.offset)

+                {

+                    // There is free space from lastOffset to suballoc.offset.

+                    ++unusedRangeCount;

+                }

+            

+                // 2. Process this allocation.

+                // There is allocation with suballoc.offset, suballoc.size.

+                ++alloc2ndCount;

+                usedBytes += suballoc.size;

+            

+                // 3. Prepare for next iteration.

+                lastOffset = suballoc.offset + suballoc.size;

+                ++nextAlloc2ndIndex;

+            }

+            // We are at the end.

+            else

+            {

+                if(lastOffset < freeSpace2ndTo1stEnd)

+                {

+                    // There is free space from lastOffset to freeSpace2ndTo1stEnd.

+                    ++unusedRangeCount;

+                }

+

+                // End of loop.

+                lastOffset = freeSpace2ndTo1stEnd;

+            }

+        }

+    }

+

+    size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;

+    size_t alloc1stCount = 0;

+    while(lastOffset < size)

+    {

+        // Find next non-null allocation or move nextAllocIndex to the end.

+        while(nextAlloc1stIndex < suballoc1stCount &&

+            suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)

+        {

+            ++nextAlloc1stIndex;

+        }

+

+        // Found non-null allocation.

+        if(nextAlloc1stIndex < suballoc1stCount)

+        {

+            const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];

+            

+            // 1. Process free space before this allocation.

+            if(lastOffset < suballoc.offset)

+            {

+                // There is free space from lastOffset to suballoc.offset.

+                ++unusedRangeCount;

+            }

+            

+            // 2. Process this allocation.

+            // There is allocation with suballoc.offset, suballoc.size.

+            ++alloc1stCount;

+            usedBytes += suballoc.size;

+            

+            // 3. Prepare for next iteration.

+            lastOffset = suballoc.offset + suballoc.size;

+            ++nextAlloc1stIndex;

+        }

+        // We are at the end.

+        else

+        {

+            if(lastOffset < size)

+            {

+                // There is free space from lastOffset to size.

+                ++unusedRangeCount;

+            }

+

+            // End of loop.

+            lastOffset = size;

+        }

+    }

+

+    const VkDeviceSize unusedBytes = size - usedBytes;

+    PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);

+

+    // SECOND PASS

+    lastOffset = 0;

+

+    if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)

+    {

+        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;

+        size_t nextAlloc2ndIndex = 0;

+        while(lastOffset < freeSpace2ndTo1stEnd)

+        {

+            // Find next non-null allocation or move nextAlloc2ndIndex to the end.

+            while(nextAlloc2ndIndex < suballoc2ndCount &&

+                suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)

+            {

+                ++nextAlloc2ndIndex;

+            }

+

+            // Found non-null allocation.

+            if(nextAlloc2ndIndex < suballoc2ndCount)

+            {

+                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];

+            

+                // 1. Process free space before this allocation.

+                if(lastOffset < suballoc.offset)

+                {

+                    // There is free space from lastOffset to suballoc.offset.

+                    const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;

+                    PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);

+                }

+            

+                // 2. Process this allocation.

+                // There is allocation with suballoc.offset, suballoc.size.

+                PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);

+            

+                // 3. Prepare for next iteration.

+                lastOffset = suballoc.offset + suballoc.size;

+                ++nextAlloc2ndIndex;

+            }

+            // We are at the end.

+            else

+            {

+                if(lastOffset < freeSpace2ndTo1stEnd)

+                {

+                    // There is free space from lastOffset to freeSpace2ndTo1stEnd.

+                    const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;

+                    PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);

+                }

+

+                // End of loop.

+                lastOffset = freeSpace2ndTo1stEnd;

+            }

+        }

+    }

+

+    nextAlloc1stIndex = m_1stNullItemsBeginCount;

+    while(lastOffset < size)

+    {

+        // Find next non-null allocation or move nextAllocIndex to the end.

+        while(nextAlloc1stIndex < suballoc1stCount &&

+            suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)

+        {

+            ++nextAlloc1stIndex;

+        }

+

+        // Found non-null allocation.

+        if(nextAlloc1stIndex < suballoc1stCount)

+        {

+            const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];

+            

+            // 1. Process free space before this allocation.

+            if(lastOffset < suballoc.offset)

+            {

+                // There is free space from lastOffset to suballoc.offset.

+                const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;

+                PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);

+            }

+            

+            // 2. Process this allocation.

+            // There is allocation with suballoc.offset, suballoc.size.

+            PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);

+            

+            // 3. Prepare for next iteration.

+            lastOffset = suballoc.offset + suballoc.size;

+            ++nextAlloc1stIndex;

+        }

+        // We are at the end.

+        else

+        {

+            if(lastOffset < size)

+            {

+                // There is free space from lastOffset to size.

+                const VkDeviceSize unusedRangeSize = size - lastOffset;

+                PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);

+            }

+

+            // End of loop.

+            lastOffset = size;

+        }

+    }

+

+    PrintDetailedMap_End(json);

+}

+#endif // #if VMA_STATS_STRING_ENABLED

+

+bool VmaBlockMetadata_Linear::CreateAllocationRequest(

+    uint32_t currentFrameIndex,

+    uint32_t frameInUseCount,

+    VkDeviceSize bufferImageGranularity,

+    VkDeviceSize allocSize,

+    VkDeviceSize allocAlignment,

+    VmaSuballocationType allocType,

+    bool canMakeOtherLost,

+    VmaAllocationRequest* pAllocationRequest)

+{

+    VMA_ASSERT(allocSize > 0);

+    VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);

+    VMA_ASSERT(pAllocationRequest != VMA_NULL);

+    VMA_HEAVY_ASSERT(Validate());

+

+    SuballocationVectorType& suballocations1st = AccessSuballocations1st();

+    SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();

+

+    if(suballocations2nd.empty())

+    {

+        // Try to allocate at the end of 1st vector.

+        VkDeviceSize resultBaseOffset = 0;

+        if(!suballocations1st.empty())

+        {

+            const VmaSuballocation& lastSuballoc = suballocations1st.back();

+            resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;

+        }

+

+        // Start from offset equal to beginning of free space.

+        VkDeviceSize resultOffset = resultBaseOffset;

+

+        // Apply VMA_DEBUG_MARGIN at the beginning.

+        if(VMA_DEBUG_MARGIN > 0)

+        {

+            resultOffset += VMA_DEBUG_MARGIN;

+        }

+

+        // Apply alignment.

+        resultOffset = VmaAlignUp(resultOffset, allocAlignment);

+

+        // Check previous suballocations for BufferImageGranularity conflicts.

+        // Make bigger alignment if necessary.

+        if(bufferImageGranularity > 1 && !suballocations1st.empty())

+        {

+            bool bufferImageGranularityConflict = false;

+            for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )

+            {

+                const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];

+                if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))

+                {

+                    if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))

+                    {

+                        bufferImageGranularityConflict = true;

+                        break;

+                    }

+                }

+                else

+                    // Already on previous page.

+                    break;

+            }

+            if(bufferImageGranularityConflict)

+            {

+                resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);

+            }

+        }

+

+        // There is enough free space at the end after alignment.

+        if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= GetSize())

+        {

+            // Check next suballocations for BufferImageGranularity conflicts {when there are some}.

+            // If conflict exists, allocation cannot be made here.

+

+            // All tests passed: Success.

+            pAllocationRequest->offset = resultOffset;

+            pAllocationRequest->sumFreeSize = GetSize() - resultBaseOffset;

+            pAllocationRequest->sumItemSize = 0;

+            // pAllocationRequest->item unused.

+            pAllocationRequest->itemsToMakeLostCount = 0;

+            return true;

+        }

+    }

+

+    // Wrap-around to end of 2nd vector. Try to allocate there, watching for the

+    // beginning of 1st vector as the end of free space.

+    if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)

+    {

+        VkDeviceSize resultBaseOffset = 0;

+        if(!suballocations2nd.empty())

+        {

+            const VmaSuballocation& lastSuballoc = suballocations2nd.back();

+            resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;

+        }

+

+        // Start from offset equal to beginning of free space.

+        VkDeviceSize resultOffset = resultBaseOffset;

+

+        // Apply VMA_DEBUG_MARGIN at the beginning.

+        if(VMA_DEBUG_MARGIN > 0)

+        {

+            resultOffset += VMA_DEBUG_MARGIN;

+        }

+

+        // Apply alignment.

+        resultOffset = VmaAlignUp(resultOffset, allocAlignment);

+

+        // Check previous suballocations for BufferImageGranularity conflicts.

+        // Make bigger alignment if necessary.

+        if(bufferImageGranularity > 1 && !suballocations2nd.empty())

+        {

+            bool bufferImageGranularityConflict = false;

+            for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )

+            {

+                const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];

+                if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))

+                {

+                    if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))

+                    {

+                        bufferImageGranularityConflict = true;

+                        break;

+                    }

+                }

+                else

+                    // Already on previous page.

+                    break;

+            }

+            if(bufferImageGranularityConflict)

+            {

+                resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);

+            }

+        }

+

+        // There is enough free space at the end after alignment.

+        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;

+        if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpace2ndTo1stEnd)

+        {

+            // Check next suballocations for BufferImageGranularity conflicts.

+            // If conflict exists, allocation cannot be made here.

+            if(bufferImageGranularity > 1)

+            {

+                for(size_t nextSuballocIndex = m_1stNullItemsBeginCount;

+                    nextSuballocIndex < suballocations1st.size();

+                    nextSuballocIndex++)

+                {

+                    const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];

+                    if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))

+                    {

+                        if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))

+                        {

+                            return false;

+                        }

+                    }

+                    else

+                    {

+                        // Already on next page.

+                        break;

+                    }

+                }

+            }

+

+            // All tests passed: Success.

+            pAllocationRequest->offset = resultOffset;

+            pAllocationRequest->sumFreeSize = freeSpace2ndTo1stEnd - resultBaseOffset;

+            pAllocationRequest->sumItemSize = 0;

+            // pAllocationRequest->item unused.

+            pAllocationRequest->itemsToMakeLostCount = 0;

+            return true;

+        }

+    }

+

+    return false;

+}

+

+bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(

+    uint32_t currentFrameIndex,

+    uint32_t frameInUseCount,

+    VmaAllocationRequest* pAllocationRequest)

+{

+    VMA_ASSERT(0 && "TODO");

+    return false;

+}

+

+uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)

+{

+    VMA_ASSERT(0 && "TODO");

+    return 0;

+}

+

+VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)

+{

+    // TODO

+    return VK_SUCCESS;

+}

+

+void VmaBlockMetadata_Linear::Alloc(

+    const VmaAllocationRequest& request,

+    VmaSuballocationType type,

+    VkDeviceSize allocSize,

+    VmaAllocation hAllocation)

+{

+    const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };

+    SuballocationVectorType& suballocations1st = AccessSuballocations1st();

+

+    // First allocation.

+    if(suballocations1st.empty())

+    {

+        suballocations1st.push_back(newSuballoc);

+    }

+    else

+    {

+        // New allocation at the end of 1st vector.

+        if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)

+        {

+            // Check if it fits before the end of the block.

+            VMA_ASSERT(request.offset + allocSize <= GetSize());

+            suballocations1st.push_back(newSuballoc);

+        }

+        // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.

+        else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)

+        {

+            SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();

+

+            switch(m_2ndVectorMode)

+            {

+            case SECOND_VECTOR_EMPTY:

+                // First allocation from second part ring buffer.

+                VMA_ASSERT(suballocations2nd.empty());

+                m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;

+                break;

+            case SECOND_VECTOR_RING_BUFFER:

+                // 2-part ring buffer is already started.

+                VMA_ASSERT(!suballocations2nd.empty());

+                break;

+            case SECOND_VECTOR_DOUBLE_STACK:

+                VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");

+                break;

+            default:

+                VMA_ASSERT(0);

+            }

+

+            suballocations2nd.push_back(newSuballoc);

+        }

+        else

+        {

+            VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");

+        }

+    }

+}

+

+void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)

+{

+    FreeAtOffset(allocation->GetOffset());

+}

+

+void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)

+{

+    SuballocationVectorType& suballocations1st = AccessSuballocations1st();

+    SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();

+    VMA_ASSERT(!suballocations1st.empty());

+

+    // First allocation: Mark it as next empty at the beginning.

+    VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];

+    if(firstSuballoc.offset == offset)

+    {

+        firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;

+        firstSuballoc.hAllocation = VK_NULL_HANDLE;

+        ++m_1stNullItemsBeginCount;

+        CleanupAfterFree();

+        return;

+    }

+

+    // Last allocation in 2-part ring buffer.

+    if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)

+    {

+        VmaSuballocation& lastSuballoc = suballocations2nd.back();

+        if(lastSuballoc.offset == offset)

+        {

+            suballocations2nd.pop_back();

+            CleanupAfterFree();

+            return;

+        }

+    }

+    // Last allocation in 1st vector.

+    else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)

+    {

+        VmaSuballocation& lastSuballoc = suballocations1st.back();

+        if(lastSuballoc.offset == offset)

+        {

+            suballocations1st.pop_back();

+            CleanupAfterFree();

+            return;

+        }

+    }

+

+    // Item from the middle of 1st vector.

+    // TODO optimize using binary search.

+    for(size_t i = m_1stNullItemsBeginCount + 1; i < suballocations1st.size(); ++i)

+    {

+        VmaSuballocation& currSuballoc = suballocations1st[i];

+        if(currSuballoc.offset == offset)

+        {

+            currSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;

+            currSuballoc.hAllocation = VK_NULL_HANDLE;

+            ++m_1stNullItemsMiddleCount;

+            CleanupAfterFree();

+            return;

+        }

+    }

+

+    if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)

+    {

+        // Item from the middle of 2nd vector.

+        // TODO optimize using binary search.

+        for(size_t i = 0; i < suballocations2nd.size() - 1; ++i)

+        {

+            VmaSuballocation& currSuballoc = suballocations2nd[i];

+            if(currSuballoc.offset == offset)

+            {

+                currSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;

+                currSuballoc.hAllocation = VK_NULL_HANDLE;

+                ++m_2ndNullItemsCount;

+                CleanupAfterFree();

+                return;

+            }

+        }

+    }

+

+    VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");

+}

+

+bool VmaBlockMetadata_Linear::ShouldCompact1st() const

+{

+    const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;

+    const size_t suballocCount = AccessSuballocations1st().size();

+    return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;

+}

+

+void VmaBlockMetadata_Linear::CleanupAfterFree()

+{

+    SuballocationVectorType& suballocations1st = AccessSuballocations1st();

+

+    if(IsEmpty())

+    {

+        suballocations1st.clear();

+        m_1stNullItemsBeginCount = 0;

+        m_1stNullItemsMiddleCount = 0;

+        m_2ndVectorMode = SECOND_VECTOR_EMPTY;

+    }

+    else

+    {

+        SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();

+        const size_t suballoc1stCount = suballocations1st.size();

+        const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;

+        VMA_ASSERT(nullItem1stCount <= suballoc1stCount);

+

+        // Find more null items at the beginning of 1st vector.

+        while(m_1stNullItemsBeginCount < suballoc1stCount &&

+            suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)

+        {

+            ++m_1stNullItemsBeginCount;

+            --m_1stNullItemsMiddleCount;

+        }

+

+        // Find more null items at the end of 1st vector.

+        while(m_1stNullItemsMiddleCount > 0 &&

+            suballocations1st.back().hAllocation == VK_NULL_HANDLE)

+        {

+            --m_1stNullItemsMiddleCount;

+            suballocations1st.pop_back();

+        }

+

+        // Find more null items at the end of 2nd vector.

+        while(m_2ndNullItemsCount > 0 &&

+            suballocations2nd.back().hAllocation == VK_NULL_HANDLE)

+        {

+            --m_2ndNullItemsCount;

+            suballocations2nd.pop_back();

+        }

+

+        if(ShouldCompact1st())

+        {

+            const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;

+            size_t srcIndex = m_1stNullItemsBeginCount;

+            for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)

+            {

+                while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)

+                {

+                    ++srcIndex;

+                }

+                if(dstIndex != srcIndex)

+                {

+                    suballocations1st[dstIndex] = suballocations1st[srcIndex];

+                }

+                ++srcIndex;

+            }

+            suballocations1st.resize(nonNullItemCount);

+            m_1stNullItemsBeginCount = 0;

+            m_1stNullItemsMiddleCount = 0;

+        }

+

+        // 2nd vector became empty.

+        if(suballocations2nd.empty())

+        {

+            m_2ndVectorMode = SECOND_VECTOR_EMPTY;

+        }

+

+        // 1st vector became empty.

+        if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)

+        {

+            suballocations1st.clear();

+            m_1stNullItemsBeginCount = 0;

+

+            if(!suballocations2nd.empty())

+            {

+                // Swap 1st with 2nd. Now 2nd is empty.

+                m_2ndVectorMode = SECOND_VECTOR_EMPTY;

+                m_1stNullItemsMiddleCount = m_2ndNullItemsCount;

+                while(m_1stNullItemsBeginCount < suballocations2nd.size() &&

+                    suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)

+                {

+                    ++m_1stNullItemsBeginCount;

+                    --m_1stNullItemsMiddleCount;

+                }

+                m_2ndNullItemsCount = 0;

+                m_1stVectorIndex ^= 1;

+            }

+        }

+    }

+

+    VMA_HEAVY_ASSERT(Validate());

+}

+

+

+////////////////////////////////////////////////////////////////////////////////

 // class VmaDeviceMemoryBlock

 

 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :

-    m_Metadata(hAllocator),

+    m_pMetadata(VMA_NULL),

     m_MemoryTypeIndex(UINT32_MAX),

     m_Id(0),

     m_hMemory(VK_NULL_HANDLE),

@@ -6880,10 +8182,12 @@
 }

 

 void VmaDeviceMemoryBlock::Init(

+    VmaAllocator hAllocator,

     uint32_t newMemoryTypeIndex,

     VkDeviceMemory newMemory,

     VkDeviceSize newSize,

-    uint32_t id)

+    uint32_t id,

+    bool linearAlgorithm)

 {

     VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);

 

@@ -6891,29 +8195,40 @@
     m_Id = id;

     m_hMemory = newMemory;

 

-    m_Metadata.Init(newSize);

+    if(linearAlgorithm)

+    {

+        m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);

+    }

+    else

+    {

+        m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);

+    }

+    m_pMetadata->Init(newSize);

 }

 

 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)

 {

     // This is the most important assert in the entire library.

     // Hitting it means you have some memory leak - unreleased VmaAllocation objects.

-    VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");

-    

+    VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");

+

     VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);

-    allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);

+    allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);

     m_hMemory = VK_NULL_HANDLE;

+

+    vma_delete(allocator, m_pMetadata);

+    m_pMetadata = VMA_NULL;

 }

 

 bool VmaDeviceMemoryBlock::Validate() const

 {

     if((m_hMemory == VK_NULL_HANDLE) ||

-        (m_Metadata.GetSize() == 0))

+        (m_pMetadata->GetSize() == 0))

     {

         return false;

     }

     

-    return m_Metadata.Validate();

+    return m_pMetadata->Validate();

 }

 

 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)

@@ -6925,7 +8240,7 @@
         return res;

     }

 

-    res = m_Metadata.CheckCorruption(pData);

+    res = m_pMetadata->CheckCorruption(pData);

 

     Unmap(hAllocator, 1);

 

@@ -7112,7 +8427,8 @@
         createInfo.maxBlockCount,

         (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),

         createInfo.frameInUseCount,

-        true), // isCustomPool

+        true, // isCustomPool

+        (createInfo.flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0), // linearAlgorithm

     m_Id(0)

 {

 }

@@ -7133,7 +8449,8 @@
     size_t maxBlockCount,

     VkDeviceSize bufferImageGranularity,

     uint32_t frameInUseCount,

-    bool isCustomPool) :

+    bool isCustomPool,

+    bool linearAlgorithm) :

     m_hAllocator(hAllocator),

     m_MemoryTypeIndex(memoryTypeIndex),

     m_PreferredBlockSize(preferredBlockSize),

@@ -7142,6 +8459,7 @@
     m_BufferImageGranularity(bufferImageGranularity),

     m_FrameInUseCount(frameInUseCount),

     m_IsCustomPool(isCustomPool),

+    m_LinearAlgorithm(linearAlgorithm),

     m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),

     m_HasEmptyBlock(false),

     m_pDefragmentator(VMA_NULL),

@@ -7188,7 +8506,7 @@
         const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];

         VMA_ASSERT(pBlock);

         VMA_HEAVY_ASSERT(pBlock->Validate());

-        pBlock->m_Metadata.AddPoolStats(*pStats);

+        pBlock->m_pMetadata->AddPoolStats(*pStats);

     }

 }

 

@@ -7229,7 +8547,7 @@
         VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];

         VMA_ASSERT(pCurrBlock);

         VmaAllocationRequest currRequest = {};

-        if(pCurrBlock->m_Metadata.CreateAllocationRequest(

+        if(pCurrBlock->m_pMetadata->CreateAllocationRequest(

             currentFrameIndex,

             m_FrameInUseCount,

             m_BufferImageGranularity,

@@ -7252,13 +8570,13 @@
             }

             

             // We no longer have an empty Allocation.

-            if(pCurrBlock->m_Metadata.IsEmpty())

+            if(pCurrBlock->m_pMetadata->IsEmpty())

             {

                 m_HasEmptyBlock = false;

             }

             

             *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);

-            pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, size, *pAllocation);

+            pCurrBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);

             (*pAllocation)->InitBlockAllocation(

                 hCurrentPool,

                 pCurrBlock,

@@ -7341,7 +8659,7 @@
         if(res == VK_SUCCESS)

         {

             VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];

-            VMA_ASSERT(pBlock->m_Metadata.GetSize() >= size);

+            VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);

 

             if(mapped)

             {

@@ -7354,7 +8672,7 @@
 

             // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.

             VmaAllocationRequest allocRequest;

-            if(pBlock->m_Metadata.CreateAllocationRequest(

+            if(pBlock->m_pMetadata->CreateAllocationRequest(

                 currentFrameIndex,

                 m_FrameInUseCount,

                 m_BufferImageGranularity,

@@ -7365,7 +8683,7 @@
                 &allocRequest))

             {

                 *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);

-                pBlock->m_Metadata.Alloc(allocRequest, suballocType, size, *pAllocation);

+                pBlock->m_pMetadata->Alloc(allocRequest, suballocType, size, *pAllocation);

                 (*pAllocation)->InitBlockAllocation(

                     hCurrentPool,

                     pBlock,

@@ -7416,7 +8734,7 @@
                 VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];

                 VMA_ASSERT(pCurrBlock);

                 VmaAllocationRequest currRequest = {};

-                if(pCurrBlock->m_Metadata.CreateAllocationRequest(

+                if(pCurrBlock->m_pMetadata->CreateAllocationRequest(

                     currentFrameIndex,

                     m_FrameInUseCount,

                     m_BufferImageGranularity,

@@ -7453,19 +8771,19 @@
                     }

                 }

 

-                if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(

+                if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(

                     currentFrameIndex,

                     m_FrameInUseCount,

                     &bestRequest))

                 {

                     // We no longer have an empty Allocation.

-                    if(pBestRequestBlock->m_Metadata.IsEmpty())

+                    if(pBestRequestBlock->m_pMetadata->IsEmpty())

                     {

                         m_HasEmptyBlock = false;

                     }

                     // Allocate from this pBlock.

                     *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);

-                    pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, size, *pAllocation);

+                    pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);

                     (*pAllocation)->InitBlockAllocation(

                         hCurrentPool,

                         pBestRequestBlock,

@@ -7531,13 +8849,13 @@
             pBlock->Unmap(m_hAllocator, 1);

         }

 

-        pBlock->m_Metadata.Free(hAllocation);

+        pBlock->m_pMetadata->Free(hAllocation);

         VMA_HEAVY_ASSERT(pBlock->Validate());

 

         VMA_DEBUG_LOG("  Freed from MemoryTypeIndex=%u", memTypeIndex);

 

         // pBlock became empty after this deallocation.

-        if(pBlock->m_Metadata.IsEmpty())

+        if(pBlock->m_pMetadata->IsEmpty())

         {

             // Already has empty Allocation. We don't want to have two, so delete this one.

             if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)

@@ -7556,7 +8874,7 @@
         else if(m_HasEmptyBlock)

         {

             VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();

-            if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)

+            if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)

             {

                 pBlockToDelete = pLastBlock;

                 m_Blocks.pop_back();

@@ -7582,7 +8900,7 @@
     VkDeviceSize result = 0;

     for(size_t i = m_Blocks.size(); i--; )

     {

-        result = VMA_MAX(result, m_Blocks[i]->m_Metadata.GetSize());

+        result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());

         if(result >= m_PreferredBlockSize)

         {

             break;

@@ -7609,7 +8927,7 @@
     // Bubble sort only until first swap.

     for(size_t i = 1; i < m_Blocks.size(); ++i)

     {

-        if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())

+        if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())

         {

             VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);

             return;

@@ -7634,10 +8952,12 @@
     // Create new Allocation for it.

     VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);

     pBlock->Init(

+        m_hAllocator,

         m_MemoryTypeIndex,

         mem,

         allocInfo.allocationSize,

-        m_NextBlockId++);

+        m_NextBlockId++,

+        m_LinearAlgorithm);

 

     m_Blocks.push_back(pBlock);

     if(pNewBlockIndex != VMA_NULL)

@@ -7700,7 +9020,7 @@
         json.ContinueString(m_Blocks[i]->GetId());

         json.EndString();

 

-        m_Blocks[i]->m_Metadata.PrintDetailedMap(json);

+        m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);

     }

     json.EndObject();

 

@@ -7757,14 +9077,14 @@
     for(size_t blockIndex = m_Blocks.size(); blockIndex--; )

     {

         VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];

-        if(pBlock->m_Metadata.IsEmpty())

+        if(pBlock->m_pMetadata->IsEmpty())

         {

             if(m_Blocks.size() > m_MinBlockCount)

             {

                 if(pDefragmentationStats != VMA_NULL)

                 {

                     ++pDefragmentationStats->deviceMemoryBlocksFreed;

-                    pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();

+                    pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();

                 }

 

                 VmaVectorRemove(m_Blocks, blockIndex);

@@ -7800,7 +9120,7 @@
     {

         VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];

         VMA_ASSERT(pBlock);

-        lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);

+        lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);

     }

     if(pLostAllocationCount != VMA_NULL)

     {

@@ -7842,7 +9162,7 @@
         VMA_ASSERT(pBlock);

         VMA_HEAVY_ASSERT(pBlock->Validate());

         VmaStatInfo allocationStatInfo;

-        pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);

+        pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);

         VmaAddStatInfo(pStats->total, allocationStatInfo);

         VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);

         VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);

@@ -7962,7 +9282,7 @@
         {

             BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];

             VmaAllocationRequest dstAllocRequest;

-            if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(

+            if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(

                 m_CurrentFrameIndex,

                 m_pBlockVector->GetFrameInUseCount(),

                 m_pBlockVector->GetBufferImageGranularity(),

@@ -8009,8 +9329,8 @@
                     VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);

                 }

                 

-                pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);

-                pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);

+                pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);

+                pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);

                 

                 allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);

 

@@ -8698,7 +10018,8 @@
             SIZE_MAX,

             GetBufferImageGranularity(),

             pCreateInfo->frameInUseCount,

-            false); // isCustomPool

+            false, // isCustomPool

+            false); // linearAlgorithm

         // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,

         // becase minBlockCount is 0.

         m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));

@@ -9890,7 +11211,7 @@
             // 2. Adjust to whole block.

             const VkDeviceSize allocationOffset = hAllocation->GetOffset();

             VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);

-            const VkDeviceSize blockSize = hAllocation->GetBlock()->m_Metadata.GetSize();

+            const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();

             memRange.offset += allocationOffset;

             memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);