Fixed a race condition with incremental defragmentation. The issue here is that VmaBlockVector::DefragmentationEnd() relied on the mutex being previously locked to safely mutate its data, but with incremental defrag this isn't guaranteed to be the case anymore.
diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h
index 7b31463..a466e1f 100644
--- a/src/vk_mem_alloc.h
+++ b/src/vk_mem_alloc.h
@@ -6523,6 +6523,7 @@
         VkCommandBuffer commandBuffer);

     void DefragmentationEnd(

         class VmaBlockVectorDefragmentationContext* pCtx,

+        uint32_t flags,

         VmaDefragmentationStats* pStats);

 

     uint32_t ProcessDefragmentations(

@@ -13180,22 +13181,36 @@
 

 void VmaBlockVector::DefragmentationEnd(

     class VmaBlockVectorDefragmentationContext* pCtx,

+    uint32_t flags,

     VmaDefragmentationStats* pStats)

 {

-    // Destroy buffers.

-    for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )

+    if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)

     {

-        VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];

-        if(blockCtx.hBuffer)

-        {

-            (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(

-                m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());

-        }

+        VMA_ASSERT(pCtx->mutexLocked == false);

+

+        // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any

+        // lock protecting us. Since we mutate state here, we have to take the lock out now

+        m_Mutex.LockWrite();

+        pCtx->mutexLocked = true;

     }

 

-    if(pCtx->res >= VK_SUCCESS)

+    // If the mutex isn't locked we didn't do any work and there is nothing to delete.

+    if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)

     {

-        FreeEmptyBlocks(pStats);

+        // Destroy buffers.

+        for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)

+        {

+            VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];

+            if(blockCtx.hBuffer)

+            {

+                (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());

+            }

+        }

+

+        if(pCtx->res >= VK_SUCCESS)

+        {

+            FreeEmptyBlocks(pStats);

+        }

     }

 

     if(pCtx->mutexLocked)

@@ -14117,7 +14132,7 @@
     for(size_t i = m_CustomPoolContexts.size(); i--; )

     {

         VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];

-        pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);

+        pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);

         vma_delete(m_hAllocator, pBlockVectorCtx);

     }

     for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )

@@ -14125,7 +14140,7 @@
         VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];

         if(pBlockVectorCtx)

         {

-            pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);

+            pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);

             vma_delete(m_hAllocator, pBlockVectorCtx);

         }

     }