Improvements in implementation - used anonymous namespace

Instead of `static`, as suggested in https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/pull/514
diff --git a/include/vk_mem_alloc.h b/include/vk_mem_alloc.h
index 6eba37a..6215de4 100644
--- a/include/vk_mem_alloc.h
+++ b/include/vk_mem_alloc.h
@@ -3153,7 +3153,9 @@
 
 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
 #include <cstdlib>
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace
+{
+void* vma_aligned_alloc(size_t alignment, size_t size)
 {
     // alignment must be >= sizeof(void*)
     if(alignment < sizeof(void*))
@@ -3163,6 +3165,7 @@
 
     return memalign(alignment, size);
 }
+} // namespace
 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
 #include <cstdlib>
 
@@ -3170,7 +3173,9 @@
 #include <AvailabilityMacros.h>
 #endif
 
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace
+{
+void* vma_aligned_alloc(size_t alignment, size_t size)
 {
     // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
     // Therefore, for now disable this specific exception until a proper solution is found.
@@ -3198,35 +3203,46 @@
         return pointer;
     return VMA_NULL;
 }
+} // namespace
 #elif defined(_WIN32)
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace {
+void* vma_aligned_alloc(size_t alignment, size_t size)
 {
     return _aligned_malloc(size, alignment);
 }
+} // namespace
 #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace {
+void* vma_aligned_alloc(size_t alignment, size_t size)
 {
     return aligned_alloc(alignment, size);
 }
+} // namespace
 #else
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace
+{
+void* vma_aligned_alloc(size_t alignment, size_t size)
 {
     VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.");
     return VMA_NULL;
 }
+} // namespace
 #endif
 
+namespace
+{
 #if defined(_WIN32)
-static void vma_aligned_free(void* ptr)
+void vma_aligned_free(void* ptr)
 {
     _aligned_free(ptr);
 }
 #else
-static void vma_aligned_free(void* VMA_NULLABLE ptr)
+void vma_aligned_free(void* VMA_NULLABLE ptr)
 {
     free(ptr);
 }
 #endif
+} // namespace
 
 #ifndef VMA_ALIGN_OF
    #define VMA_ALIGN_OF(type)       (alignof(type))
@@ -3307,18 +3323,20 @@
 
 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
 #if VMA_STATS_STRING_ENABLED
-    static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
+namespace {
+    inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
     {
         snprintf(outStr, strLen, "%" PRIu32, num);
     }
-    static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
+    inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
     {
         snprintf(outStr, strLen, "%" PRIu64, num);
     }
-    static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
+    inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
     {
         snprintf(outStr, strLen, "%p", ptr);
     }
+} // namespace
 #endif
 
 #ifndef VMA_MUTEX
@@ -3513,21 +3531,22 @@
 */
 #endif // _VMA_CONFIGURATION
 
-
-static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
-static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
+namespace
+{
+constexpr uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
+constexpr uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
-static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
+constexpr uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
 
 // Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
-static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
-static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
-static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
-static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
-static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
-static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000U;
-static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
-static const uint32_t VMA_VENDOR_ID_AMD = 4098;
+constexpr uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
+constexpr uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
+constexpr uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
+constexpr uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
+constexpr int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
+constexpr uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000U;
+constexpr uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+constexpr uint32_t VMA_VENDOR_ID_AMD = 4098;
 
 // This one is tricky. Vulkan specification defines this code as available since
 // Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
@@ -3537,7 +3556,7 @@
 
 #if VMA_STATS_STRING_ENABLED
 // Correspond to values of enum VmaSuballocationType.
-static const char* const VMA_SUBALLOCATION_TYPE_NAMES[] =
+const char* const VMA_SUBALLOCATION_TYPE_NAMES[] =
 {
     "FREE",
     "UNKNOWN",
@@ -3548,7 +3567,7 @@
 };
 #endif
 
-static const VkAllocationCallbacks VmaEmptyAllocationCallbacks =
+const VkAllocationCallbacks VmaEmptyAllocationCallbacks =
     { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
 
 
@@ -3583,6 +3602,8 @@
 
 #endif // _VMA_ENUM_DECLARATIONS
 
+} // namespace
+
 #ifndef _VMA_FORWARD_DECLARATIONS
 // Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle);
@@ -3654,6 +3675,9 @@
 
 #ifndef _VMA_FUNCTIONS
 
+namespace
+{
+
 /*
 Returns number of bits set to 1 in (v).
 
@@ -3667,7 +3691,7 @@
 Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
 But you need to check in runtime whether user's CPU supports these, as some old processors don't.
 */
-static inline uint32_t VmaCountBitsSet(uint32_t v)
+inline uint32_t VmaCountBitsSet(uint32_t v)
 {
 #if VMA_CPP20
     return std::popcount(v);
@@ -3681,7 +3705,7 @@
 #endif
 }
 
-static inline uint8_t VmaBitScanLSB(uint64_t mask)
+inline uint8_t VmaBitScanLSB(uint64_t mask)
 {
 #if defined(_MSC_VER) && defined(_WIN64)
     unsigned long pos;
@@ -3707,7 +3731,7 @@
 #endif
 }
 
-static inline uint8_t VmaBitScanLSB(uint32_t mask)
+inline uint8_t VmaBitScanLSB(uint32_t mask)
 {
 #ifdef _MSC_VER
     unsigned long pos;
@@ -3733,7 +3757,7 @@
 #endif
 }
 
-static inline uint8_t VmaBitScanMSB(uint64_t mask)
+inline uint8_t VmaBitScanMSB(uint64_t mask)
 {
 #if defined(_MSC_VER) && defined(_WIN64)
     unsigned long pos;
@@ -3758,7 +3782,7 @@
     return UINT8_MAX;
 }
 
-static inline uint8_t VmaBitScanMSB(uint32_t mask)
+inline uint8_t VmaBitScanMSB(uint32_t mask)
 {
 #ifdef _MSC_VER
     unsigned long pos;
@@ -3797,7 +3821,7 @@
 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
 // Use types like uint32_t, uint64_t as T.
 template <typename T>
-static inline T VmaAlignUp(T val, T alignment)
+inline T VmaAlignUp(T val, T alignment)
 {
     VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
     return (val + alignment - 1) & ~(alignment - 1);
@@ -3806,7 +3830,7 @@
 // Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
 // Use types like uint32_t, uint64_t as T.
 template <typename T>
-static inline T VmaAlignDown(T val, T alignment)
+inline T VmaAlignDown(T val, T alignment)
 {
     VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
     return val & ~(alignment - 1);
@@ -3814,20 +3838,20 @@
 
 // Division with mathematical rounding to nearest number.
 template <typename T>
-static inline T VmaRoundDiv(T x, T y)
+inline T VmaRoundDiv(T x, T y)
 {
     return (x + (y / (T)2)) / y;
 }
 
 // Divide by 'y' and round up to nearest integer.
 template <typename T>
-static inline T VmaDivideRoundingUp(T x, T y)
+inline T VmaDivideRoundingUp(T x, T y)
 {
     return (x + y - (T)1) / y;
 }
 
 // Returns smallest power of 2 greater or equal to v.
-static inline uint32_t VmaNextPow2(uint32_t v)
+inline uint32_t VmaNextPow2(uint32_t v)
 {
     v--;
     v |= v >> 1;
@@ -3839,7 +3863,7 @@
     return v;
 }
 
-static inline uint64_t VmaNextPow2(uint64_t v)
+inline uint64_t VmaNextPow2(uint64_t v)
 {
     v--;
     v |= v >> 1;
@@ -3853,7 +3877,7 @@
 }
 
 // Returns largest power of 2 less or equal to v.
-static inline uint32_t VmaPrevPow2(uint32_t v)
+inline uint32_t VmaPrevPow2(uint32_t v)
 {
     v |= v >> 1;
     v |= v >> 2;
@@ -3864,7 +3888,7 @@
     return v;
 }
 
-static inline uint64_t VmaPrevPow2(uint64_t v)
+inline uint64_t VmaPrevPow2(uint64_t v)
 {
     v |= v >> 1;
     v |= v >> 2;
@@ -3876,7 +3900,7 @@
     return v;
 }
 
-static inline bool VmaStrIsEmpty(const char* pStr)
+inline bool VmaStrIsEmpty(const char* pStr)
 {
     return pStr == VMA_NULL || *pStr == '\0';
 }
@@ -3888,7 +3912,7 @@
 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
 */
-static inline bool VmaBlocksOnSamePage(
+inline bool VmaBlocksOnSamePage(
     VkDeviceSize resourceAOffset,
     VkDeviceSize resourceASize,
     VkDeviceSize resourceBOffset,
@@ -3908,7 +3932,7 @@
 or linear image and another one is optimal image. If type is unknown, behave
 conservatively.
 */
-static inline bool VmaIsBufferImageGranularityConflict(
+inline bool VmaIsBufferImageGranularityConflict(
     VmaSuballocationType suballocType1,
     VmaSuballocationType suballocType2)
 {
@@ -3943,7 +3967,7 @@
     }
 }
 
-static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
+void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
 {
 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
     uint32_t* pDst = (uint32_t*)((char*)pData + offset);
@@ -3957,7 +3981,7 @@
 #endif
 }
 
-static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
+bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
 {
 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
     const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
@@ -3977,7 +4001,7 @@
 Fills structure with parameters of an example buffer to be used for transfers
 during GPU memory defragmentation.
 */
-static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
+void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
 {
     memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
     outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
@@ -3996,7 +4020,7 @@
 new element with value (key) should be inserted.
 */
 template <typename CmpLess, typename IterT, typename KeyT>
-static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
+IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
 {
     size_t down = 0;
     size_t up = size_t(end - beg);
@@ -4034,7 +4058,7 @@
 T must be pointer type, e.g. VmaAllocation, VmaPool.
 */
 template<typename T>
-static bool VmaValidatePointerArray(uint32_t count, const T* arr)
+bool VmaValidatePointerArray(uint32_t count, const T* arr)
 {
     for (uint32_t i = 0; i < count; ++i)
     {
@@ -4055,15 +4079,16 @@
 }
 
 template<typename MainT, typename NewT>
-static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
+inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
 {
     newStruct->pNext = mainStruct->pNext;
     mainStruct->pNext = newStruct;
 }
+
 // Finds structure with s->sType == sType in mainStruct->pNext chain.
 // Returns pointer to it. If not found, returns null.
 template<typename FindT, typename MainT>
-static inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType)
+inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType)
 {
     for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext;
         s != VMA_NULL; s = s->pNext)
@@ -4138,7 +4163,7 @@
 
 // This is the main algorithm that guides the selection of a memory type best for an allocation -
 // converts usage to required/preferred/not preferred flags.
-static bool FindMemoryPreferences(
+bool FindMemoryPreferences(
     bool isIntegratedGPU,
     const VmaAllocationCreateInfo& allocCreateInfo,
     VmaBufferImageUsage bufImgUsage,
@@ -4290,7 +4315,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // Memory allocation
 
-static inline void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
+inline void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
 {
     void* result = VMA_NULL;
     if ((pAllocationCallbacks != VMA_NULL) &&
@@ -4310,7 +4335,7 @@
     return result;
 }
 
-static inline void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
+inline void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
 {
     if ((pAllocationCallbacks != VMA_NULL) &&
         (pAllocationCallbacks->pfnFree != VMA_NULL))
@@ -4324,13 +4349,13 @@
 }
 
 template<typename T>
-static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
+T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
 {
     return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
 }
 
 template<typename T>
-static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
+T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
 {
     return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
 }
@@ -4340,14 +4365,14 @@
 #define vma_new_array(allocator, type, count)   new(VmaAllocateArray<type>((allocator), (count)))(type)
 
 template<typename T>
-static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
+void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
 {
     ptr->~T();
     VmaFree(pAllocationCallbacks, ptr);
 }
 
 template<typename T>
-static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
+void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
 {
     if (ptr != VMA_NULL)
     {
@@ -4359,7 +4384,7 @@
     }
 }
 
-static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
+char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
 {
     if (srcStr != VMA_NULL)
     {
@@ -4372,7 +4397,7 @@
 }
 
 #if VMA_STATS_STRING_ENABLED
-static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
+char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
 {
     if (srcStr != VMA_NULL)
     {
@@ -4385,7 +4410,7 @@
 }
 #endif // VMA_STATS_STRING_ENABLED
 
-static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
+void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
 {
     if (str != VMA_NULL)
     {
@@ -4423,11 +4448,17 @@
     }
     return false;
 }
+
+} // namespace
+
 #endif // _VMA_FUNCTIONS
 
 #ifndef _VMA_STATISTICS_FUNCTIONS
 
-static void VmaClearStatistics(VmaStatistics& outStats)
+namespace
+{
+
+void VmaClearStatistics(VmaStatistics& outStats)
 {
     outStats.blockCount = 0;
     outStats.allocationCount = 0;
@@ -4435,7 +4466,7 @@
     outStats.allocationBytes = 0;
 }
 
-static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
+void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
 {
     inoutStats.blockCount += src.blockCount;
     inoutStats.allocationCount += src.allocationCount;
@@ -4443,7 +4474,7 @@
     inoutStats.allocationBytes += src.allocationBytes;
 }
 
-static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
+void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
 {
     VmaClearStatistics(outStats.statistics);
     outStats.unusedRangeCount = 0;
@@ -4453,7 +4484,7 @@
     outStats.unusedRangeSizeMax = 0;
 }
 
-static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
+void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
 {
     inoutStats.statistics.allocationCount++;
     inoutStats.statistics.allocationBytes += size;
@@ -4461,14 +4492,14 @@
     inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
 }
 
-static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
+void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
 {
     inoutStats.unusedRangeCount++;
     inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
     inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
 }
 
-static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
+void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
 {
     VmaAddStatistics(inoutStats.statistics, src.statistics);
     inoutStats.unusedRangeCount += src.unusedRangeCount;
@@ -4478,6 +4509,8 @@
     inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
 }
 
+} // namespace
+
 #endif // _VMA_STATISTICS_FUNCTIONS
 
 #ifndef _VMA_MUTEX_LOCK
@@ -4794,17 +4827,23 @@
 }
 #endif // _VMA_VECTOR_FUNCTIONS
 
+namespace
+{
+
 template<typename T, typename allocatorT>
-static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
+void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
 {
     vec.insert(index, item);
 }
 
 template<typename T, typename allocatorT>
-static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
+void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
 {
     vec.remove(index);
 }
+
+} // namespace
+
 #endif // _VMA_VECTOR
 
 #ifndef _VMA_SMALL_VECTOR
@@ -6202,7 +6241,10 @@
 }
 #endif // _VMA_JSON_WRITER_FUNCTIONS
 
-static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
+namespace
+{
+
+void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
 {
     json.BeginObject();
 
@@ -6233,6 +6275,9 @@
     }
     json.EndObject();
 }
+
+} // namespace
+
 #endif // _VMA_JSON_WRITER
 
 #ifndef _VMA_MAPPING_HYSTERESIS
@@ -10779,30 +10824,32 @@
 
 
 #ifndef _VMA_MEMORY_FUNCTIONS
-static inline void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
+namespace
+{
+inline void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
 {
     return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
 }
 
-static inline void VmaFree(VmaAllocator hAllocator, void* ptr)
+inline void VmaFree(VmaAllocator hAllocator, void* ptr)
 {
     VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
 }
 
 template<typename T>
-static T* VmaAllocate(VmaAllocator hAllocator)
+T* VmaAllocate(VmaAllocator hAllocator)
 {
     return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
 }
 
 template<typename T>
-static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
+T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
 {
     return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
 }
 
 template<typename T>
-static void vma_delete(VmaAllocator hAllocator, T* ptr)
+void vma_delete(VmaAllocator hAllocator, T* ptr)
 {
     if(ptr != VMA_NULL)
     {
@@ -10812,7 +10859,7 @@
 }
 
 template<typename T>
-static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
+void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
 {
     if(ptr != VMA_NULL)
     {
@@ -10821,6 +10868,7 @@
         VmaFree(hAllocator, ptr);
     }
 }
+} // namespace
 #endif // _VMA_MEMORY_FUNCTIONS
 
 #ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS