Update Flush and Invalidate Memory calls in vulkan to take offset and size
Bug: skia:
Change-Id: I4faf9f431422f27096fce4605be281c28935df08
Reviewed-on: https://skia-review.googlesource.com/111782
Reviewed-by: Jim Van Verth <jvanverth@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
(cherry picked from commit e35a99ed706dcd0407c7ca4373ed97d21d988069)
Reviewed-on: https://skia-review.googlesource.com/111840
Reviewed-by: Hal Canary <halcanary@google.com>
diff --git a/src/gpu/vk/GrVkBuffer.cpp b/src/gpu/vk/GrVkBuffer.cpp
index 54713cb..f65b15d 100644
--- a/src/gpu/vk/GrVkBuffer.cpp
+++ b/src/gpu/vk/GrVkBuffer.cpp
@@ -206,7 +206,13 @@
SkASSERT(this->vkIsMapped());
if (fDesc.fDynamic) {
- GrVkMemory::FlushMappedAlloc(gpu, this->alloc(), fMappedSize);
+ // We currently don't use fOffset
+ SkASSERT(0 == fOffset);
+ VkDeviceSize flushOffset = this->alloc().fOffset + fOffset;
+ VkDeviceSize flushSize = gpu->vkCaps().canUseWholeSizeOnFlushMappedMemory() ? VK_WHOLE_SIZE
+ : fMappedSize;
+
+ GrVkMemory::FlushMappedAlloc(gpu, this->alloc(), flushOffset, flushSize);
VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
fMapPtr = nullptr;
fMappedSize = 0;
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 36c24ca..9428e13 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -640,7 +640,7 @@
height);
}
- GrVkMemory::FlushMappedAlloc(this, alloc, size);
+ GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
return true;
@@ -1204,7 +1204,7 @@
}
}
}
- GrVkMemory::FlushMappedAlloc(gpu, alloc, mapSize);
+ GrVkMemory::FlushMappedAlloc(gpu, alloc, mapOffset, mapSize);
GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
return true;
}
@@ -2053,7 +2053,8 @@
// we can copy the data out of the buffer.
this->submitCommandBuffer(kForce_SyncQueue);
void* mappedMemory = transferBuffer->map();
- GrVkMemory::InvalidateMappedAlloc(this, transferBuffer->alloc());
+ const GrVkAlloc& transAlloc = transferBuffer->alloc();
+ GrVkMemory::InvalidateMappedAlloc(this, transAlloc, transAlloc.fOffset, VK_WHOLE_SIZE);
if (copyFromOrigin) {
uint32_t skipRows = region.imageExtent.height - height;
diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
index d744a7a..e391d02 100644
--- a/src/gpu/vk/GrVkMemory.cpp
+++ b/src/gpu/vk/GrVkMemory.cpp
@@ -298,32 +298,53 @@
return flags;
}
-void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize size) {
+void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+#ifdef SK_DEBUG
+ SkASSERT(offset >= alloc.fOffset);
+ VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
+ SkASSERT(0 == (offset & (alignment-1)));
+ if (size != VK_WHOLE_SIZE) {
+ SkASSERT(size > 0);
+ SkASSERT(0 == (size & (alignment-1)) ||
+ (offset + size) == (alloc.fOffset + alloc.fSize));
+ SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
+ }
+#endif
+
VkMappedMemoryRange mappedMemoryRange;
memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mappedMemoryRange.memory = alloc.fMemory;
- mappedMemoryRange.offset = alloc.fOffset;
- if (gpu->vkCaps().canUseWholeSizeOnFlushMappedMemory()) {
- mappedMemoryRange.size = VK_WHOLE_SIZE; // Size of what we mapped
- } else {
- SkASSERT(size > 0);
- mappedMemoryRange.size = size;
- }
+ mappedMemoryRange.offset = offset;
+ mappedMemoryRange.size = size;
GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(),
1, &mappedMemoryRange));
}
}
-void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
+void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
+ VkDeviceSize offset, VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+#ifdef SK_DEBUG
+ SkASSERT(offset >= alloc.fOffset);
+ VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
+ SkASSERT(0 == (offset & (alignment-1)));
+ if (size != VK_WHOLE_SIZE) {
+ SkASSERT(size > 0);
+ SkASSERT(0 == (size & (alignment-1)) ||
+ (offset + size) == (alloc.fOffset + alloc.fSize));
+ SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
+ }
+#endif
+
VkMappedMemoryRange mappedMemoryRange;
memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mappedMemoryRange.memory = alloc.fMemory;
- mappedMemoryRange.offset = alloc.fOffset;
- mappedMemoryRange.size = VK_WHOLE_SIZE; // Size of what we mapped
+ mappedMemoryRange.offset = offset;
+ mappedMemoryRange.size = size;
GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(),
1, &mappedMemoryRange));
}
diff --git a/src/gpu/vk/GrVkMemory.h b/src/gpu/vk/GrVkMemory.h
index 8dd43bb..baf843e 100644
--- a/src/gpu/vk/GrVkMemory.h
+++ b/src/gpu/vk/GrVkMemory.h
@@ -38,8 +38,10 @@
VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
- void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize size);
- void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
+ void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size);
+ void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size);
}
class GrVkFreeListAlloc {