Added support for lost allocations in ring buffer.
Fixed some more bugs.
diff --git a/src/Tests.cpp b/src/Tests.cpp
index a2415a2..a79b9a8 100644
--- a/src/Tests.cpp
+++ b/src/Tests.cpp
@@ -101,6 +101,8 @@
static const uint32_t IMAGE_BYTES_PER_PIXEL = 1;
+static uint32_t g_FrameIndex = 0;
+
struct BufferInfo
{
VkBuffer Buffer = VK_NULL_HANDLE;
@@ -1701,6 +1703,95 @@
}
}
+ // Test ring buffer with lost allocations.
+ {
+ // Allocate number of buffers until pool is full.
+ // Notice CAN_BECOME_LOST flag and call to vmaSetCurrentFrameIndex.
+ allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT;
+ res = VK_SUCCESS;
+ for(size_t i = 0; res == VK_SUCCESS; ++i)
+ {
+ vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
+
+ bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
+
+ BufferInfo newBufInfo;
+ res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
+ &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
+ if(res == VK_SUCCESS)
+ bufInfo.push_back(newBufInfo);
+ }
+
+ // Free first half of it.
+ {
+ const size_t buffersToDelete = bufInfo.size() / 2;
+ for(size_t i = 0; i < buffersToDelete; ++i)
+ {
+ vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
+ }
+ bufInfo.erase(bufInfo.begin(), bufInfo.begin() + buffersToDelete);
+ }
+
+ // Allocate number of buffers until pool is full again.
+ // This way we make sure ring buffers wraps around.
+ res = VK_SUCCESS;
+ for(size_t i = 0; res == VK_SUCCESS; ++i)
+ {
+ vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
+
+ bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
+
+ BufferInfo newBufInfo;
+ res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
+ &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
+ if(res == VK_SUCCESS)
+ bufInfo.push_back(newBufInfo);
+ }
+
+ VkDeviceSize firstNewOffset;
+ {
+ vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
+
+ // Allocate a large buffer with CAN_MAKE_OTHER_LOST.
+ allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
+ bufCreateInfo.size = bufSizeMax;
+
+ BufferInfo newBufInfo;
+ res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
+ &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
+ assert(res == VK_SUCCESS);
+ bufInfo.push_back(newBufInfo);
+ firstNewOffset = allocInfo.offset;
+
+ // Make sure at least one buffer from the beginning became lost.
+ vmaGetAllocationInfo(g_hAllocator, bufInfo[0].Allocation, &allocInfo);
+ assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
+ }
+
+ // Allocate more buffers that CAN_MAKE_OTHER_LOST until we wrap-around with this.
+ size_t newCount = 1;
+ for(;;)
+ {
+ vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
+
+ bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
+
+ BufferInfo newBufInfo;
+ res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
+ &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
+ assert(res == VK_SUCCESS);
+ bufInfo.push_back(newBufInfo);
+ ++newCount;
+ if(allocInfo.offset < firstNewOffset)
+ break;
+ }
+
+ // Destroy all the buffers in forward order.
+ for(size_t i = 0; i < bufInfo.size(); ++i)
+ vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
+ bufInfo.clear();
+ }
+
vmaDestroyPool(g_hAllocator, pool);
}
@@ -3541,10 +3632,12 @@
{
wprintf(L"TESTING:\n");
- // TEMP tests
-TestLinearAllocator();
-ManuallyTestLinearAllocator();
-return;
+ // TODO delete
+ {
+ TestLinearAllocator();
+ ManuallyTestLinearAllocator();
+ return;
+ }
// # Simple tests
@@ -3561,6 +3654,7 @@
TestMapping();
TestMappingMultithreaded();
TestLinearAllocator();
+ ManuallyTestLinearAllocator();
TestDefragmentationSimple();
TestDefragmentationFull();
diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h
index 810b889..84b3adc 100644
--- a/src/vk_mem_alloc.h
+++ b/src/vk_mem_alloc.h
@@ -8102,6 +8102,7 @@
VMA_ASSERT(pAllocationRequest != VMA_NULL);
VMA_HEAVY_ASSERT(Validate());
+ const VkDeviceSize size = GetSize();
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
@@ -8114,11 +8115,11 @@
}
// Try to allocate before 2nd.back(), or end of block if 2nd.empty().
- if(allocSize > GetSize())
+ if(allocSize > size)
{
return false;
}
- VkDeviceSize resultBaseOffset = GetSize() - allocSize;
+ VkDeviceSize resultBaseOffset = size - allocSize;
if(!suballocations2nd.empty())
{
const VmaSuballocation& lastSuballoc = suballocations2nd.back();
@@ -8208,11 +8209,12 @@
return true;
}
}
- else
+ else // !upperAddress
{
if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
{
// Try to allocate at the end of 1st vector.
+
VkDeviceSize resultBaseOffset = 0;
if(!suballocations1st.empty())
{
@@ -8259,7 +8261,7 @@
}
const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
- suballocations2nd.back().offset : GetSize();
+ suballocations2nd.back().offset : size;
// There is enough free space at the end after alignment.
if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
@@ -8300,6 +8302,8 @@
// beginning of 1st vector as the end of free space.
if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
{
+ VMA_ASSERT(!suballocations1st.empty());
+
VkDeviceSize resultBaseOffset = 0;
if(!suballocations2nd.empty())
{
@@ -8345,15 +8349,81 @@
}
}
+ pAllocationRequest->itemsToMakeLostCount = 0;
+ pAllocationRequest->sumItemSize = 0;
+ size_t index1st = m_1stNullItemsBeginCount;
+
+ if(canMakeOtherLost)
+ {
+ while(index1st < suballocations1st.size() &&
+ resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
+ {
+ // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
+ const VmaSuballocation& suballoc = suballocations1st[index1st];
+ if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
+ {
+ // No problem.
+ }
+ else
+ {
+ VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
+ if(suballoc.hAllocation->CanBecomeLost() &&
+ suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+ {
+ ++pAllocationRequest->itemsToMakeLostCount;
+ pAllocationRequest->sumItemSize += suballoc.size;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ ++index1st;
+ }
+
+ // Check next suballocations for BufferImageGranularity conflicts.
+ // If conflict exists, we must mark more allocations lost or fail.
+ if(bufferImageGranularity > 1)
+ {
+ while(index1st < suballocations1st.size())
+ {
+ const VmaSuballocation& suballoc = suballocations1st[index1st];
+ if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
+ {
+ if(suballoc.hAllocation != VK_NULL_HANDLE)
+ {
+ // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
+ if(suballoc.hAllocation->CanBecomeLost() &&
+ suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+ {
+ ++pAllocationRequest->itemsToMakeLostCount;
+ pAllocationRequest->sumItemSize += suballoc.size;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ }
+ else
+ {
+ // Already on next page.
+ break;
+ }
+ ++index1st;
+ }
+ }
+ }
+
// There is enough free space at the end after alignment.
- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
- if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpace2ndTo1stEnd)
+ if(index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size ||
+ index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)
{
// Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, allocation cannot be made here.
if(bufferImageGranularity > 1)
{
- for(size_t nextSuballocIndex = m_1stNullItemsBeginCount;
+ for(size_t nextSuballocIndex = index1st;
nextSuballocIndex < suballocations1st.size();
nextSuballocIndex++)
{
@@ -8375,10 +8445,11 @@
// All tests passed: Success.
pAllocationRequest->offset = resultOffset;
- pAllocationRequest->sumFreeSize = freeSpace2ndTo1stEnd - resultBaseOffset;
- pAllocationRequest->sumItemSize = 0;
+ pAllocationRequest->sumFreeSize =
+ (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
+ - resultBaseOffset
+ - pAllocationRequest->sumItemSize;
// pAllocationRequest->item unused.
- pAllocationRequest->itemsToMakeLostCount = 0;
return true;
}
}
@@ -8392,8 +8463,43 @@
uint32_t frameInUseCount,
VmaAllocationRequest* pAllocationRequest)
{
- VMA_ASSERT(0 && "TODO");
- return false;
+ if(pAllocationRequest->itemsToMakeLostCount == 0)
+ {
+ return true;
+ }
+
+ VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
+
+ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+ size_t index1st = m_1stNullItemsBeginCount;
+ size_t madeLostCount = 0;
+ while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
+ {
+ VMA_ASSERT(index1st < suballocations1st.size());
+ VmaSuballocation& suballoc = suballocations1st[index1st];
+ if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+ {
+ VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
+ VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
+ if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+ {
+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ suballoc.hAllocation = VK_NULL_HANDLE;
+ ++m_1stNullItemsMiddleCount;
+ ++madeLostCount;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ ++index1st;
+ }
+
+ CleanupAfterFree();
+ //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
+
+ return true;
}
uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)