implemented recommended MVKVector changes and merge with HEAD
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h
index 1fb53cd..d9be776 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdDraw.h
@@ -44,7 +44,7 @@
MVKCmdBindVertexBuffers(MVKCommandTypePool<MVKCmdBindVertexBuffers>* pool);
protected:
- MVKVector<MVKMTLBufferBinding> _bindings;
+ MVKVectorInline<MVKMTLBufferBinding, 8> _bindings;
};
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h
index 7166d1c..2044f66 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdPipeline.h
@@ -102,8 +102,8 @@
private:
VkPipelineBindPoint _pipelineBindPoint;
MVKPipelineLayout* _pipelineLayout;
- MVKVector<MVKDescriptorSet*> _descriptorSets;
- MVKVector<uint32_t> _dynamicOffsets;
+ MVKVectorInline<MVKDescriptorSet*, 8> _descriptorSets;
+ MVKVectorInline<uint32_t, 8> _dynamicOffsets;
uint32_t _firstSet;
};
@@ -129,7 +129,7 @@
MVKPipelineLayout* _pipelineLayout;
VkShaderStageFlags _stageFlags;
uint32_t _offset;
- MVKVector<char> _pushConstants;
+ MVKVectorInline<char, 128> _pushConstants;
};
@@ -157,7 +157,7 @@
VkPipelineBindPoint _pipelineBindPoint;
MVKPipelineLayout* _pipelineLayout;
- std::vector<VkWriteDescriptorSet> _descriptorWrites;
+ MVKVectorInline<VkWriteDescriptorSet, 8> _descriptorWrites;
uint32_t _set;
};
diff --git a/MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h b/MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h
index 733e290..8de657e 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCmdRenderPass.h
@@ -48,7 +48,7 @@
VkSubpassContents _contents;
MVKRenderPass* _renderPass;
MVKFramebuffer* _framebuffer;
- MVKVector<VkClearValue> _clearValues;
+ MVKVectorInline<VkClearValue, 8> _clearValues;
};
@@ -115,7 +115,7 @@
private:
uint32_t _firstViewport;
- MVKVector<MTLViewport> _mtlViewports;
+ MVKVectorInline<MTLViewport, 8> _mtlViewports;
};
@@ -134,7 +134,7 @@
private:
uint32_t _firstScissor;
- MVKVector<MTLScissorRect> _mtlScissors;
+ MVKVectorInline<MTLScissorRect, 8> _mtlScissors;
};
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
index 11fd50c..9b8d4a5 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandBuffer.h
@@ -403,7 +403,7 @@
uint32_t _renderSubpassIndex;
VkRect2D _renderArea;
MVKActivatedQueries* _pActivatedQueries;
- MVKVector<VkClearValue> _clearValues;
+ MVKVectorInline<VkClearValue, 8> _clearValues;
id<MTLComputeCommandEncoder> _mtlComputeEncoder;
MVKCommandUse _mtlComputeEncoderUse;
id<MTLBlitCommandEncoder> _mtlBlitEncoder;
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h
index f52bcab..a18e6d4 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.h
@@ -147,7 +147,7 @@
void encodeImpl() override;
void resetImpl() override;
- MVKVector<MTLViewport> _mtlViewports;
+ MVKVectorInline<MTLViewport, 8> _mtlViewports;
};
@@ -176,7 +176,7 @@
void encodeImpl() override;
void resetImpl() override;
- MVKVector<MTLScissorRect> _mtlScissors;
+ MVKVectorInline<MTLScissorRect, 8> _mtlScissors;
};
@@ -203,7 +203,7 @@
void encodeImpl() override;
void resetImpl() override;
- MVKVector<char> _pushConstants;
+ MVKVectorInline<char, 128> _pushConstants;
VkShaderStageFlagBits _shaderStage;
uint32_t _mtlBufferIndex = 0;
};
@@ -451,14 +451,14 @@
void resetImpl() override;
void markDirty() override;
- MVKVector<MVKMTLBufferBinding> _vertexBufferBindings;
- MVKVector<MVKMTLBufferBinding> _fragmentBufferBindings;
- MVKVector<MVKMTLTextureBinding> _vertexTextureBindings;
- MVKVector<MVKMTLTextureBinding> _fragmentTextureBindings;
- MVKVector<MVKMTLSamplerStateBinding> _vertexSamplerStateBindings;
- MVKVector<MVKMTLSamplerStateBinding> _fragmentSamplerStateBindings;
- MVKVector<uint32_t> _vertexSwizzleConstants;
- MVKVector<uint32_t> _fragmentSwizzleConstants;
+ MVKVectorInline<MVKMTLBufferBinding, 8> _vertexBufferBindings;
+ MVKVectorInline<MVKMTLBufferBinding, 8> _fragmentBufferBindings;
+ MVKVectorInline<MVKMTLTextureBinding, 8> _vertexTextureBindings;
+ MVKVectorInline<MVKMTLTextureBinding, 8> _fragmentTextureBindings;
+ MVKVectorInline<MVKMTLSamplerStateBinding, 8> _vertexSamplerStateBindings;
+ MVKVectorInline<MVKMTLSamplerStateBinding, 8> _fragmentSamplerStateBindings;
+ MVKVectorInline<uint32_t, 8> _vertexSwizzleConstants;
+ MVKVectorInline<uint32_t, 8> _fragmentSwizzleConstants;
MVKMTLBufferBinding _vertexAuxBufferBinding;
MVKMTLBufferBinding _fragmentAuxBufferBinding;
@@ -504,10 +504,10 @@
void resetImpl() override;
void markDirty() override;
- MVKVector<MVKMTLBufferBinding> _bufferBindings;
- MVKVector<MVKMTLTextureBinding> _textureBindings;
- MVKVector<MVKMTLSamplerStateBinding> _samplerStateBindings;
- MVKVector<uint32_t> _swizzleConstants;
+ MVKVectorDefault<MVKMTLBufferBinding> _bufferBindings;
+ MVKVectorDefault<MVKMTLTextureBinding> _textureBindings;
+ MVKVectorDefault<MVKMTLSamplerStateBinding> _samplerStateBindings;
+ MVKVectorDefault<uint32_t> _swizzleConstants;
MVKMTLBufferBinding _auxBufferBinding;
bool _areBufferBindingsDirty = false;
diff --git a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
index d7eea3b..480fc9d 100644
--- a/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
+++ b/MoltenVK/MoltenVK/Commands/MVKCommandEncoderState.mm
@@ -402,7 +402,8 @@
#pragma mark MVKResourcesCommandEncoderState
// Updates the swizzle for an image in the given vector.
-static void updateSwizzle(MVKVector<uint32_t> &constants, uint32_t index, uint32_t swizzle) {
+template<typename T>
+static void updateSwizzle(T &constants, uint32_t index, uint32_t swizzle) {
if (index >= constants.size()) { constants.resize(index + 1); }
constants[index] = swizzle;
}
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
index 8a33856..b94c9ff 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.h
@@ -143,7 +143,7 @@
/** Encodes this descriptor set layout and the specified descriptor updates on the specified command encoder immediately. */
void pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
- std::vector<VkWriteDescriptorSet>& descriptorWrites,
+ MVKVector<VkWriteDescriptorSet>& descriptorWrites,
MVKShaderResourceBinding& dslMTLRezIdxOffsets);
@@ -171,7 +171,7 @@
friend class MVKPipelineLayout;
friend class MVKDescriptorSet;
- MVKVector<MVKDescriptorSetLayoutBinding> _bindings;
+ MVKVectorInline<MVKDescriptorSetLayoutBinding, 8> _bindings;
std::unordered_map<uint32_t, uint32_t> _bindingToIndex;
MVKShaderResourceBinding _mtlResourceCounts;
bool _isPushDescriptorLayout : 1;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
index d711ab8..4d7e64f 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKDescriptorSet.mm
@@ -559,7 +559,7 @@
}
void MVKDescriptorSetLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
- vector<VkWriteDescriptorSet>& descriptorWrites,
+ MVKVector<VkWriteDescriptorSet>& descriptorWrites,
MVKShaderResourceBinding& dslMTLRezIdxOffsets) {
if (!_isPushDescriptorLayout) return;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
index 71a0146..bdf64a5 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.h
@@ -20,8 +20,8 @@
#include "MVKResource.h"
#include "MVKSync.h"
+#include "MVKVector.h"
#include <mutex>
-#include <list>
#import <IOSurface/IOSurfaceRef.h>
@@ -396,7 +396,7 @@
uint32_t _swapchainIndex;
id<CAMetalDrawable> _mtlDrawable;
std::mutex _availabilityLock;
- std::list<MVKSwapchainSignaler> _availabilitySignalers;
+ MVKVectorInline<MVKSwapchainSignaler, 4> _availabilitySignalers;
MVKSwapchainSignaler _preSignaled;
MVKSwapchainImageAvailability _availability;
};
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
index faa31cb..715470b 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKImage.mm
@@ -1027,7 +1027,8 @@
// If this image is not yet available, extract and signal the first semaphore and fence.
signaler = _availabilitySignalers.front();
- _availabilitySignalers.pop_front();
+ _availabilitySignalers.erase( _availabilitySignalers.begin() );
+ //_availabilitySignalers.pop_front();
}
// Signal the semaphore and fence, and let them know they are no longer being tracked.
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
index 6b6509b..5132a05 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.h
@@ -25,7 +25,6 @@
#include "MVKVector.h"
#include <MoltenVKSPIRVToMSLConverter/SPIRVToMSLConverter.h>
#include <unordered_set>
-#include <vector>
#include <ostream>
#import <Metal/Metal.h>
@@ -59,7 +58,7 @@
/** Updates a descriptor set in a command encoder. */
void pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
- std::vector<VkWriteDescriptorSet>& descriptorWrites,
+ MVKVector<VkWriteDescriptorSet>& descriptorWrites,
uint32_t set);
/** Updates a descriptor set from a template in a command encoder. */
@@ -78,9 +77,9 @@
MVKPipelineLayout(MVKDevice* device, const VkPipelineLayoutCreateInfo* pCreateInfo);
protected:
- std::vector<MVKDescriptorSetLayout> _descriptorSetLayouts;
- std::vector<MVKShaderResourceBinding> _dslMTLResourceIndexOffsets;
- std::vector<VkPushConstantRange> _pushConstants;
+ MVKVectorInline<MVKDescriptorSetLayout, 8> _descriptorSetLayouts;
+ MVKVectorInline<MVKShaderResourceBinding, 8> _dslMTLResourceIndexOffsets;
+ MVKVectorInline<VkPushConstantRange, 8> _pushConstants;
MVKShaderResourceBinding _pushConstantsMTLResourceIndexes;
MVKShaderAuxBufferBinding _auxBufferIndex;
};
@@ -143,8 +142,8 @@
VkPipelineRasterizationStateCreateInfo _rasterInfo;
VkPipelineDepthStencilStateCreateInfo _depthStencilInfo;
- MVKVector<MTLViewport> _mtlViewports;
- MVKVector<MTLScissorRect> _mtlScissors;
+ MVKVectorInline<MTLViewport, 8> _mtlViewports;
+ MVKVectorInline<MTLScissorRect, 8> _mtlScissors;
id<MTLRenderPipelineState> _mtlPipelineState;
MTLCullMode _mtlCullMode;
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
index 9bb5a36..4e1916d 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKPipeline.mm
@@ -55,7 +55,7 @@
}
void MVKPipelineLayout::pushDescriptorSet(MVKCommandEncoder* cmdEncoder,
- vector<VkWriteDescriptorSet>& descriptorWrites,
+ MVKVector<VkWriteDescriptorSet>& descriptorWrites,
uint32_t set) {
_descriptorSetLayouts[set].pushDescriptorSet(cmdEncoder, descriptorWrites,
diff --git a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h
index 1feac0e..580d98e 100644
--- a/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h
+++ b/MoltenVK/MoltenVK/GPUObjects/MVKQueue.h
@@ -170,7 +170,7 @@
MVKQueueSubmission* _prev;
MVKQueueSubmission* _next;
VkResult _submissionResult;
- MVKVector<MVKSemaphore*> _waitSemaphores;
+ MVKVectorInline<MVKSemaphore*, 8> _waitSemaphores;
bool _isAwaitingSemaphores;
};
@@ -205,8 +205,8 @@
void commitActiveMTLCommandBuffer(bool signalCompletion = false);
void finish();
- MVKVector<MVKCommandBuffer*> _cmdBuffers;
- MVKVector<MVKSemaphore*> _signalSemaphores;
+ MVKVectorInline<MVKCommandBuffer*, 16> _cmdBuffers;
+ MVKVectorInline<MVKSemaphore*, 16> _signalSemaphores;
MVKFence* _fence;
MVKCommandUse _cmdBuffUse;
id<MTLCommandBuffer> _activeMTLCommandBuffer;
@@ -228,6 +228,6 @@
const VkPresentInfoKHR* pPresentInfo);
protected:
- MVKVector<MVKSwapchainImage*> _surfaceImages;
+ MVKVectorInline<MVKSwapchainImage*, 4> _surfaceImages;
};
diff --git a/MoltenVK/MoltenVK/Utility/MVKVector.h b/MoltenVK/MoltenVK/Utility/MVKVector.h
index df0b8a9..df2413d 100755
--- a/MoltenVK/MoltenVK/Utility/MVKVector.h
+++ b/MoltenVK/MoltenVK/Utility/MVKVector.h
@@ -1,7 +1,7 @@
/*
- * MVKVectorAllocator.h
+ * MVKVector.h
*
- * Copyright (c) 2012-2018 Dr. Torsten Hans (hans@ipacs.de)
+ * Copyright (c) 2012-2019 Dr. Torsten Hans (hans@ipacs.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,25 +23,57 @@
//
#if 0
+template<typename T, size_t N = 0>
+using MVKVectorInline = std::vector<T>;
+
+template<typename T>
+using MVKVectorDefault = std::vector<T>;
+
template<typename T>
using MVKVector = std::vector<T>;
#else
//
-// a simple std::vector like container with a configurable extra stack space
-// this class supports just the necessary members to be compatible with MoltenVK
-// if C++17 is used, code can be simplified further
-// by default MVKVector used 8 elements from the stack before getting memory from heap
+// MVKVector.h is a sequence container that (optionally) implements a small
+// buffer optimization.
+// It behaves similarly to std::vector, except until a certain number of
+// elements are reserved, it does not use the heap.
+// Like std::vector, MVKVector is guaranteed to use contiguous memory, so if the
+// preallocated number of elements are exceeded, all elements are then in heap.
+// MVKVector supports just the necessary members to be compatible with MoltenVK
+// If C++17 will be the default in the future, code can be simplified quite
+// a bit.
+//
+// Example:
+//
+// MVKVectorInline<int, 3> vector;
+// vector.emplace_back( 1 );
+// vector.emplace_back( 2 );
+// vector.emplace_back( 3 );
+// // adding another element now reserves memory from heap
+// vector.emplace_back( 4 );
+//
+// If you don't need any inline storage use
+// MVKVectorDefault<int> vector; // this is essentially the same as using
+// // std::vector
+//
+// Passing MVKVectorInline to a function would require to use the same template
+// parameters that have been used for declaration. To avoid this MVKVectorInline
+// is derived from MVKVector. If you want to pass MVKVectorInline to a function
+// use MVKVector.
+// Keep in mind MVKVector only supports iterating over the MVKVector, no other
+// operation is supported.
//
#include "MVKVectorAllocator.h"
#include <type_traits>
#include <initializer_list>
#include <utility>
-template<class Type, class Allocator = mvk_vector_allocator_with_stack<Type, 8>> class MVKVector
+
+template<class Type> class MVKVector
{
- Allocator alc;
+ mvk_vector_allocator_base<Type> *alc_ptr;
public:
class iterator
@@ -52,100 +84,134 @@
public:
iterator() = delete;
iterator( const size_t _index, const MVKVector &_vector ) : vector{ &_vector }, index{ _index } { }
+ iterator &operator=( const iterator &it ) = delete;
- iterator &operator=( const iterator &it )
- {
- vector = it.vector;
- index = it.index;
- return *this;
- }
+ Type *operator->() const { return &vector->alc_ptr->ptr[index]; }
+ Type &operator*() const { return vector->alc_ptr->ptr[index]; }
+ operator Type*( ) const { return &vector->alc_ptr->ptr[index]; }
- Type *operator->() const
- {
- return &vector->alc.ptr[index];
- }
+ bool operator==( const iterator &it ) const { return vector == it.vector && index == it.index; }
+ bool operator!=( const iterator &it ) const { return vector != it.vector || index != it.index; }
- Type &operator*() const
- {
- return vector->alc.ptr[index];
- }
+ iterator& operator++() { ++index; return *this; }
+ iterator operator++( int ) { auto t = *this; ++index; return t; }
- operator Type*() const
- {
- return &vector->alc.ptr[index];
- }
-
- bool operator==( const iterator &it ) const
- {
- return ( vector == it.vector ) && ( index == it.index );
- }
-
- bool operator!=( const iterator &it ) const
- {
- return ( vector != it.vector ) || ( index != it.index );
- }
-
- iterator& operator++() { ++index; return *this; }
-
- bool is_valid() const { return index < vector->alc.num_elements_used; }
+ bool is_valid() const { return index < vector->size(); }
size_t get_position() const { return index; }
};
- class reverse_iterator
+public:
+ MVKVector() = delete;
+ MVKVector( mvk_vector_allocator_base<Type> *a ) : alc_ptr{ a } { }
+ virtual ~MVKVector() { }
+
+ iterator begin() const { return iterator( 0, *this ); }
+ iterator end() const { return iterator( alc_ptr->size(), *this ); }
+ size_t size() const { return alc_ptr->size(); }
+ bool empty() const { return alc_ptr->size() == 0; }
+ size_t capacity() const { return alc_ptr->get_capacity(); }
+
+ virtual const Type &operator[]( const size_t i ) const = 0;
+ virtual void push_back( const Type &t ) = 0;
+};
+
+
+template<class Type> class MVKVector<Type *>
+{
+ mvk_vector_allocator_base<Type*> *alc_ptr;
+
+public:
+ class iterator
{
const MVKVector *vector;
size_t index;
public:
- reverse_iterator() = delete;
- reverse_iterator( const size_t _index, const MVKVector &_vector ) : vector{ &_vector }, index{ _index } { }
- reverse_iterator &operator=( const reverse_iterator & ) = delete;
+ iterator() = delete;
+ iterator( const size_t _index, const MVKVector &_vector ) : vector{ &_vector }, index{ _index } { }
+ iterator &operator=( const iterator &it ) = delete;
- Type *operator->() const
+ Type *operator->() const { return vector->alc_ptr->ptr[index]; }
+ Type &operator*() const { return vector->alc_ptr->ptr[index]; }
+ operator Type*&() const { return &vector->alc_ptr->ptr[index]; }
+
+ bool operator==( const iterator &it ) const { return vector == it.vector && index == it.index; }
+ bool operator!=( const iterator &it ) const { return vector != it.vector || index != it.index; }
+
+ iterator& operator++() { ++index; return *this; }
+ iterator operator++( int ) { auto t = *this; ++index; return t; }
+
+ bool is_valid() const { return index < vector->size(); }
+ size_t get_position() const { return index; }
+ };
+
+public:
+ MVKVector() = delete;
+ MVKVector( mvk_vector_allocator_base<Type*> *a ) : alc_ptr{ a } { }
+ virtual ~MVKVector() { }
+
+ iterator begin() const { return iterator( 0, *this ); }
+ iterator end() const { return iterator( alc_ptr->size(), *this ); }
+ size_t size() const { return alc_ptr->size(); }
+ bool empty() const { return alc_ptr->size() == 0; }
+ size_t capacity() const { return alc_ptr->get_capacity(); }
+
+ virtual Type * const &operator[]( const size_t i ) const = 0;
+ virtual void push_back( const Type *t ) = 0;
+};
+
+
+// this is the actual implementation of MVKVector
+template<class Type, typename Allocator = mvk_vector_allocator_default<Type>> class MVKVectorImpl : public MVKVector<Type>
+{
+ friend class MVKVectorImpl;
+
+ Allocator alc;
+
+public:
+ class iterator
+ {
+ const MVKVectorImpl *vector;
+ size_t index;
+
+ public:
+ iterator() = delete;
+ iterator( const size_t _index, const MVKVectorImpl &_vector ) : vector{ &_vector }, index{ _index } { }
+
+ iterator &operator=( const iterator &it )
{
- return &vector->alc.ptr[index];
+ vector = it.vector;
+ index = it.index;
+ return *this;
}
- Type &operator*() const
- {
- return vector->alc.ptr[index];
- }
+ Type *operator->() { return &vector->alc.ptr[index]; }
+ Type &operator*() { return vector->alc.ptr[index]; }
+ operator Type*() { return &vector->alc.ptr[index]; }
- operator Type*() const
- {
- return &vector->alc.ptr[index];
- }
-
- bool operator==( const reverse_iterator &it ) const
- {
- return vector == it.vector && index == it.index;
- }
+ bool operator==( const iterator &it ) const { return vector == it.vector && index == it.index; }
+ bool operator!=( const iterator &it ) const { return vector != it.vector || index != it.index; }
- bool operator!=( const reverse_iterator &it ) const
- {
- return vector != it.vector || index != it.index;
- }
+ iterator& operator++() { ++index; return *this; }
+ iterator operator++( int ) { auto t = *this; ++index; return t; }
- reverse_iterator& operator++() { --index; return *this; }
-
- bool is_valid() const { return index < vector->alc.num_elements_used; }
+ bool is_valid() const { return index < vector->alc.size(); }
size_t get_position() const { return index; }
};
private:
+ // this is the growth strategy -> adjust to your needs
size_t vector_GetNextCapacity() const
{
constexpr auto ELEMENTS_FOR_64_BYTES = 64 / sizeof( Type );
constexpr auto MINIMUM_CAPACITY = ELEMENTS_FOR_64_BYTES > 4 ? ELEMENTS_FOR_64_BYTES : 4;
const auto current_capacity = capacity();
- //if( current_capacity < 256 )
- // return MINIMUM_CAPACITY + 2 * current_capacity;
return MINIMUM_CAPACITY + ( 3 * current_capacity ) / 2;
}
void vector_Allocate( const size_t s )
{
- const auto new_reserved_size = tm_max( s, alc.num_elements_used );
+ const auto new_reserved_size = tm_max( s, size() );
alc.allocate( new_reserved_size );
}
@@ -156,11 +222,11 @@
}
public:
- MVKVector()
+ MVKVectorImpl() : MVKVector<Type>{ &alc }
{
}
- MVKVector( const size_t n, const Type t )
+ MVKVectorImpl( const size_t n, const Type t ) : MVKVector<Type>{ &alc }
{
if( n > 0 )
{
@@ -175,7 +241,7 @@
}
}
- MVKVector( const MVKVector &a )
+ MVKVectorImpl( const MVKVectorImpl &a ) : MVKVector<Type>{ &alc }
{
const size_t n = a.size();
@@ -192,11 +258,29 @@
}
}
- MVKVector( MVKVector &&a ) : alc{ std::move( a.alc ) }
+ template<typename U>
+ MVKVectorImpl( const U &a ) : MVKVector<Type>{ &alc }
+ {
+ const size_t n = a.size();
+
+ if( n > 0 )
+ {
+ alc.allocate( n );
+
+ for( size_t i = 0; i < n; ++i )
+ {
+ alc.construct( &alc.ptr[i], a[i] );
+ }
+
+ alc.num_elements_used = n;
+ }
+ }
+
+ MVKVectorImpl( MVKVectorImpl &&a ) : MVKVector<Type>{ &alc }, alc{ std::move( a.alc ) }
{
}
- MVKVector( std::initializer_list<Type> vector )
+ MVKVectorImpl( std::initializer_list<Type> vector ) : MVKVector<Type>{ &alc }
{
if( vector.size() > capacity() )
{
@@ -211,15 +295,18 @@
}
}
- ~MVKVector()
+ ~MVKVectorImpl()
{
}
- MVKVector& operator=( const MVKVector &a )
+ template<typename U>
+ MVKVectorImpl& operator=( const U &a )
{
- if( this != &a )
+ static_assert( std::is_base_of<MVKVector<Type>, U>::value, "argument is not of type MVKVector" );
+
+ if( this != reinterpret_cast<const MVKVector<Type>*>( &a ) )
{
- const auto n = a.alc.num_elements_used;
+ const auto n = a.size();
if( alc.num_elements_used == n )
{
@@ -241,7 +328,7 @@
for( size_t i = 0; i < n; ++i )
{
- alc.construct( &alc.ptr[i], a.alc.ptr[i] );
+ alc.construct( &alc.ptr[i], a[i] );
}
alc.num_elements_used = n;
@@ -251,37 +338,37 @@
return *this;
}
- MVKVector& operator=( MVKVector &&a )
+ MVKVectorImpl& operator=( MVKVectorImpl &&a )
{
alc.swap( a.alc );
return *this;
}
- bool operator==( const MVKVector &a ) const
+ bool operator==( const MVKVectorImpl &a ) const
{
if( alc.num_elements_used != a.alc.num_elements_used )
return false;
for( size_t i = 0; i < alc.num_elements_used; ++i )
{
- if( alc.ptr[i] != a.alc.ptr[i] )
+ if( alc[i] != a.alc[i] )
return false;
}
return true;
}
- bool operator!=( const MVKVector &a ) const
+ bool operator!=( const MVKVectorImpl &a ) const
{
if( alc.num_elements_used != a.alc.num_elements_used )
return true;
for( size_t i = 0; i < alc.num_elements_used; ++i )
{
- if( alc.ptr[i] != a.alc.ptr[i] )
+ if( alc.ptr[i] != a.alc[i] )
return true;
}
return false;
}
- void swap( MVKVector &a )
+ void swap( MVKVectorImpl &a )
{
alc.swap( a.alc );
}
@@ -298,8 +385,6 @@
iterator begin() const { return iterator( 0, *this ); }
iterator end() const { return iterator( alc.num_elements_used, *this ); }
- reverse_iterator rbegin() const { return reverse_iterator( alc.num_elements_used - 1, *this ); }
- reverse_iterator rend() const { return reverse_iterator( size_t( -1 ), *this ); }
size_t size() const { return alc.num_elements_used; }
bool empty() const { return alc.num_elements_used == 0; }
@@ -308,7 +393,7 @@
return alc.ptr[i];
}
- const Type &operator[]( const size_t i ) const
+ const Type &operator[]( const size_t i ) const override
{
return alc.ptr[i];
}
@@ -393,7 +478,7 @@
void assign( InputIterator first, InputIterator last )
{
clear();
-
+
while( first != last )
{
emplace_back( *first );
@@ -456,7 +541,7 @@
{
--alc.num_elements_used;
- for( size_t i = it.GetIndex(); i < alc.num_elements_used; ++i )
+ for( size_t i = it.get_position(); i < alc.num_elements_used; ++i )
{
alc.ptr[i] = std::move( alc.ptr[i + 1] );
}
@@ -493,7 +578,7 @@
}
}
- void push_back( const Type &t )
+ void push_back( const Type &t ) override
{
if( alc.num_elements_used == capacity() )
vector_ReAllocate( vector_GetNextCapacity() );
@@ -524,5 +609,377 @@
}
};
+// specialization for pointer types
+template<class Type, typename Allocator> class MVKVectorImpl<Type*, Allocator> : public MVKVector<Type*>
+{
+ friend class MVKVectorImpl;
+
+ Allocator alc;
+
+public:
+ class iterator
+ {
+ MVKVectorImpl *vector;
+ size_t index;
+
+ public:
+ iterator() = delete;
+ iterator( const size_t _index, MVKVectorImpl &_vector ) : vector{ &_vector }, index{ _index } { }
+
+ iterator &operator=( const iterator &it )
+ {
+ vector = it.vector;
+ index = it.index;
+ return *this;
+ }
+
+ Type *&operator*() { return vector->alc[index]; }
+
+ bool operator==( const iterator &it ) const { return vector == it.vector && index == it.index; }
+ bool operator!=( const iterator &it ) const { return vector != it.vector || index != it.index; }
+
+ iterator& operator++() { ++index; return *this; }
+ iterator operator++( int ) { auto t = *this; ++index; return t; }
+
+ bool is_valid() const { return index < vector->alc.size(); }
+ size_t get_position() const { return index; }
+ };
+
+private:
+ // this is the growth strategy -> adjust to your needs
+ size_t vector_GetNextCapacity() const
+ {
+ constexpr auto ELEMENTS_FOR_64_BYTES = 64 / sizeof( Type );
+ constexpr auto MINIMUM_CAPACITY = ELEMENTS_FOR_64_BYTES > 4 ? ELEMENTS_FOR_64_BYTES : 4;
+ const auto current_capacity = capacity();
+ return MINIMUM_CAPACITY + ( 3 * current_capacity ) / 2;
+ }
+
+ void vector_Allocate( const size_t s )
+ {
+ const auto new_reserved_size = tm_max( s, size() );
+
+ alc.allocate( new_reserved_size );
+ }
+
+ void vector_ReAllocate( const size_t s )
+ {
+ alc.re_allocate( s );
+ }
+
+public:
+ MVKVectorImpl() : MVKVector<Type*>{ &alc }
+ {
+ }
+
+ MVKVectorImpl( const size_t n, const Type *t ) : MVKVector<Type*>{ &alc }
+ {
+ if ( n > 0 )
+ {
+ alc.allocate( n );
+
+ for ( size_t i = 0; i < n; ++i )
+ {
+ alc.ptr[i] = t;
+ }
+
+ alc.num_elements_used = n;
+ }
+ }
+
+ MVKVectorImpl( const MVKVectorImpl &a ) : MVKVector<Type*>{ &alc }
+ {
+ const size_t n = a.size();
+
+ if ( n > 0 )
+ {
+ alc.allocate( n );
+
+ for ( size_t i = 0; i < n; ++i )
+ {
+ alc.ptr[i] = a.alc.ptr[i];
+ }
+
+ alc.num_elements_used = n;
+ }
+ }
+
+ MVKVectorImpl( MVKVectorImpl &&a ) : MVKVector<Type*>{ &alc }, alc{ std::move( a.alc ) }
+ {
+ }
+
+ MVKVectorImpl( std::initializer_list<Type*> vector ) : MVKVector<Type*>{ &alc }
+ {
+ if ( vector.size() > capacity() )
+ {
+ vector_Allocate( vector.size() );
+ }
+
+ // std::initializer_list does not yet support std::move, we use it anyway but it has no effect
+ for ( auto element : vector )
+ {
+ alc.ptr[alc.num_elements_used] = element;
+ ++alc.num_elements_used;
+ }
+ }
+
+ ~MVKVectorImpl()
+ {
+ }
+
+ template<typename U>
+ MVKVectorImpl& operator=( const U &a )
+ {
+ static_assert( std::is_base_of<MVKVector<U>, U>::value, "argument is not of type MVKVector" );
+
+ if ( this != reinterpret_cast< const MVKVector<Type>* >( &a ) )
+ {
+ const auto n = a.size();
+
+ if ( alc.num_elements_used == n )
+ {
+ for ( size_t i = 0; i < n; ++i )
+ {
+ alc.ptr[i] = a.alc.ptr[i];
+ }
+ }
+ else
+ {
+ if ( n > capacity() )
+ {
+ vector_ReAllocate( n );
+ }
+
+ for ( size_t i = 0; i < n; ++i )
+ {
+ alc.ptr[i] = a[i];
+ }
+
+ alc.num_elements_used = n;
+ }
+ }
+
+ return *this;
+ }
+
+ MVKVectorImpl& operator=( MVKVectorImpl &&a )
+ {
+ alc.swap( a.alc );
+ return *this;
+ }
+
+ bool operator==( const MVKVectorImpl &a ) const
+ {
+ if ( alc.num_elements_used != a.alc.num_elements_used )
+ return false;
+ for ( size_t i = 0; i < alc.num_elements_used; ++i )
+ {
+ if ( alc[i] != a.alc[i] )
+ return false;
+ }
+ return true;
+ }
+
+ bool operator!=( const MVKVectorImpl &a ) const
+ {
+ if ( alc.num_elements_used != a.alc.num_elements_used )
+ return true;
+ for ( size_t i = 0; i < alc.num_elements_used; ++i )
+ {
+ if ( alc.ptr[i] != a.alc[i] )
+ return true;
+ }
+ return false;
+ }
+
+ void swap( MVKVectorImpl &a )
+ {
+ alc.swap( a.alc );
+ }
+
+ void clear()
+ {
+ alc.num_elements_used = 0;
+ }
+
+ void reset()
+ {
+ alc.deallocate();
+ }
+
+ iterator begin() { return iterator( 0, *this ); }
+ iterator end() { return iterator( alc.num_elements_used, *this ); }
+ size_t size() const { return alc.num_elements_used; }
+ bool empty() const { return alc.num_elements_used == 0; }
+
+ Type *at( const size_t i ) const
+ {
+ return alc.ptr[i];
+ }
+
+ Type * const &operator[]( const size_t i ) const override
+ {
+ return alc.ptr[i];
+ }
+
+ Type *&operator[]( const size_t i )
+ {
+ return alc.ptr[i];
+ }
+
+ const Type * const *data() const
+ {
+ return &alc.ptr[0];
+ }
+
+ Type **data()
+ {
+ return &alc.ptr[0];
+ }
+
+ size_t capacity() const
+ {
+ return alc.get_capacity();
+ }
+
+ const Type *back() const
+ {
+ return alc.ptr[alc.num_elements_used - 1];
+ }
+
+ Type *back()
+ {
+ return alc.ptr[alc.num_elements_used - 1];
+ }
+
+ void pop_back()
+ {
+ if ( alc.num_elements_used > 0 )
+ {
+ --alc.num_elements_used;
+ }
+ }
+
+ void reserve( const size_t new_size )
+ {
+ if ( new_size > capacity() )
+ {
+ vector_ReAllocate( new_size );
+ }
+ }
+
+ void assign( const size_t new_size, const Type *t )
+ {
+ if ( new_size <= capacity() )
+ {
+ clear();
+ }
+ else
+ {
+ vector_Allocate( new_size );
+ }
+
+ for ( size_t i = 0; i < new_size; ++i )
+ {
+ alc.ptr[i] = const_cast< Type* >( t );
+ }
+
+ alc.num_elements_used = new_size;
+ }
+
+ void resize( const size_t new_size )
+ {
+ if ( new_size == alc.num_elements_used )
+ {
+ return;
+ }
+
+ if ( new_size == 0 )
+ {
+ clear();
+ return;
+ }
+
+ if ( new_size > alc.num_elements_used )
+ {
+ if ( new_size > capacity() )
+ {
+ vector_ReAllocate( new_size );
+ }
+
+ while ( alc.num_elements_used < new_size )
+ {
+ alc.ptr[alc.num_elements_used] = nullptr;
+ ++alc.num_elements_used;
+ }
+ }
+ else
+ {
+ alc.num_elements_used = new_size;
+ }
+ }
+
+ // trims the capacity of the MVKVector to the number of used elements
+ void shrink_to_fit()
+ {
+ alc.shrink_to_fit();
+ }
+
+ void erase( const iterator it )
+ {
+ if ( it.is_valid() )
+ {
+ --alc.num_elements_used;
+
+ for ( size_t i = it.get_position(); i < alc.num_elements_used; ++i )
+ {
+ alc.ptr[i] = alc.ptr[i + 1];
+ }
+ }
+ }
+
+ // adds t before position it and automatically resizes vector if necessary
+ void insert( const iterator it, const Type *t )
+ {
+ if ( !it.is_valid() || alc.num_elements_used == 0 )
+ {
+ push_back( t );
+ }
+ else
+ {
+ if ( alc.num_elements_used == capacity() )
+ vector_ReAllocate( vector_GetNextCapacity() );
+
+ // move the remaining elements
+ const size_t it_position = it.get_position();
+ for ( size_t i = alc.num_elements_used; i > it_position; --i )
+ {
+ alc.ptr[i] = alc.ptr[i - 1];
+ }
+
+ alc.ptr[it_position] = const_cast< Type* >( t );
+ ++alc.num_elements_used;
+ }
+ }
+
+ void push_back( const Type *t ) override
+ {
+ if ( alc.num_elements_used == capacity() )
+ vector_ReAllocate( vector_GetNextCapacity() );
+
+ alc.ptr[alc.num_elements_used] = const_cast< Type* >( t );
+ ++alc.num_elements_used;
+ }
+};
+
+
+template<typename Type>
+using MVKVectorDefault = MVKVectorImpl<Type, mvk_vector_allocator_default<Type>>;
+
+template<typename Type, size_t N = 8>
+using MVKVectorInline = MVKVectorImpl<Type, mvk_vector_allocator_with_stack<Type, N>>;
+
+
#endif
+
diff --git a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h
index 442e0ac..a5c320a 100755
--- a/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h
+++ b/MoltenVK/MoltenVK/Utility/MVKVectorAllocator.h
@@ -1,7 +1,7 @@
/*
* MVKVectorAllocator.h
*
- * Copyright (c) 2012-2018 Dr. Torsten Hans (hans@ipacs.de)
+ * Copyright (c) 2012-2019 Dr. Torsten Hans (hans@ipacs.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -37,16 +37,42 @@
//////////////////////////////////////////////////////////////////////////////////////////
//
-// mvk_vector_allocator_default -> malloc based allocator for MVKVector
+// mvk_vector_allocator_base -> base class so we can use MVKVector with template parameter
//
//////////////////////////////////////////////////////////////////////////////////////////
-template <typename T>
-class mvk_vector_allocator_default final
+template<typename T>
+class mvk_vector_allocator_base
{
public:
T *ptr;
size_t num_elements_used;
+public:
+ mvk_vector_allocator_base() : ptr{ nullptr }, num_elements_used{ 0 } { }
+ mvk_vector_allocator_base( T *_ptr, const size_t _num_elements_used ) : ptr{ _ptr }, num_elements_used{ _num_elements_used } { }
+ virtual ~mvk_vector_allocator_base() { }
+
+ const T &operator[]( const size_t i ) const { return ptr[i]; }
+ T &operator[]( const size_t i ) { return ptr[i]; }
+
+ size_t size() const { return num_elements_used; }
+
+ virtual size_t get_capacity() const = 0;
+ virtual void allocate( const size_t num_elements_to_reserve ) = 0;
+ virtual void re_allocate( const size_t num_elements_to_reserve ) = 0;
+ virtual void shrink_to_fit() = 0;
+ virtual void deallocate() = 0;
+};
+
+
+//////////////////////////////////////////////////////////////////////////////////////////
+//
+// mvk_vector_allocator_default -> malloc based allocator for MVKVector
+//
+//////////////////////////////////////////////////////////////////////////////////////////
+template <typename T>
+class mvk_vector_allocator_default final : public mvk_vector_allocator_base<T>
+{
private:
size_t num_elements_reserved;
@@ -77,38 +103,38 @@
template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type
destruct_all()
{
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<S>::num_elements_used; ++i )
{
- ptr[i].~S();
+ mvk_vector_allocator_base<S>::ptr[i].~S();
}
- num_elements_used = 0;
+ mvk_vector_allocator_base<S>::num_elements_used = 0;
}
template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type
destruct_all()
{
- num_elements_used = 0;
+ mvk_vector_allocator_base<T>::num_elements_used = 0;
}
public:
- constexpr mvk_vector_allocator_default() : ptr{ nullptr }, num_elements_used{ 0 }, num_elements_reserved{ 0 }
+ constexpr mvk_vector_allocator_default() : mvk_vector_allocator_base<T>{}, num_elements_reserved{ 0 }
{
}
- mvk_vector_allocator_default( mvk_vector_allocator_default &&a ) : ptr{ a.ptr }, num_elements_used{ a.num_elements_used }, num_elements_reserved{ a.num_elements_reserved }
+ mvk_vector_allocator_default( mvk_vector_allocator_default &&a ) : mvk_vector_allocator_base<T>{ a.ptr, a.num_elements_used }, num_elements_reserved{ a.num_elements_reserved }
{
- a.ptr = nullptr;
- a.num_elements_used = 0;
+ a.ptr = nullptr;
+ a.num_elements_used = 0;
a.num_elements_reserved = 0;
}
- ~mvk_vector_allocator_default()
+ virtual ~mvk_vector_allocator_default()
{
deallocate();
}
- size_t get_capacity() const
+ size_t get_capacity() const override
{
return num_elements_reserved;
}
@@ -119,25 +145,25 @@
const auto copy_num_elements_used = a.num_elements_used;
const auto copy_num_elements_reserved = a.num_elements_reserved;
- a.ptr = ptr;
- a.num_elements_used = num_elements_used;
+ a.ptr = mvk_vector_allocator_base<T>::ptr;
+ a.num_elements_used = mvk_vector_allocator_base<T>::num_elements_used;
a.num_elements_reserved = num_elements_reserved;
- ptr = copy_ptr;
- num_elements_used = copy_num_elements_used;
+ mvk_vector_allocator_base<T>::ptr = copy_ptr;
+ mvk_vector_allocator_base<T>::num_elements_used = copy_num_elements_used;
num_elements_reserved = copy_num_elements_reserved;
}
- void allocate( const size_t num_elements_to_reserve )
+ void allocate( const size_t num_elements_to_reserve ) override
{
deallocate();
- ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
- num_elements_used = 0;
+ mvk_vector_allocator_base<T>::ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
+ mvk_vector_allocator_base<T>::num_elements_used = 0;
num_elements_reserved = num_elements_to_reserve;
}
- void re_allocate( const size_t num_elements_to_reserve )
+ void re_allocate( const size_t num_elements_to_reserve ) override
{
//if constexpr( std::is_trivially_copyable<T>::value )
//{
@@ -147,53 +173,53 @@
{
auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<T>::num_elements_used; ++i )
{
- construct( &new_ptr[i], std::move( ptr[i] ) );
- destruct( &ptr[i] );
+ construct( &new_ptr[i], std::move( mvk_vector_allocator_base<T>::ptr[i] ) );
+ destruct( &mvk_vector_allocator_base<T>::ptr[i] );
}
//if ( ptr != nullptr )
{
- mvk_memory_allocator::free( ptr );
+ mvk_memory_allocator::free( mvk_vector_allocator_base<T>::ptr );
}
- ptr = new_ptr;
+ mvk_vector_allocator_base<T>::ptr = new_ptr;
}
num_elements_reserved = num_elements_to_reserve;
}
- void shrink_to_fit()
+ void shrink_to_fit() override
{
- if( num_elements_used == 0 )
+ if( mvk_vector_allocator_base<T>::num_elements_used == 0 )
{
deallocate();
}
else
{
- auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_used * sizeof( T ) ) );
+ auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( mvk_vector_allocator_base<T>::num_elements_used * sizeof( T ) ) );
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<T>::num_elements_used; ++i )
{
- construct( &new_ptr[i], std::move( ptr[i] ) );
- destruct( &ptr[i] );
+ construct( &new_ptr[i], std::move( mvk_vector_allocator_base<T>::ptr[i] ) );
+ destruct( &mvk_vector_allocator_base<T>::ptr[i] );
}
- mvk_memory_allocator::free( ptr );
+ mvk_memory_allocator::free( mvk_vector_allocator_base<T>::ptr );
- ptr = new_ptr;
- num_elements_reserved = num_elements_used;
+ mvk_vector_allocator_base<T>::ptr = new_ptr;
+ num_elements_reserved = mvk_vector_allocator_base<T>::num_elements_used;
}
}
- void deallocate()
+ void deallocate() override
{
destruct_all<T>();
- mvk_memory_allocator::free( ptr );
+ mvk_memory_allocator::free( mvk_vector_allocator_base<T>::ptr );
- ptr = nullptr;
+ mvk_vector_allocator_base<T>::ptr = nullptr;
num_elements_reserved = 0;
}
};
@@ -201,16 +227,12 @@
//////////////////////////////////////////////////////////////////////////////////////////
//
-// mvk_vector_allocator_with_stack -> malloc based MVKVector allocator with stack storage
+// mvk_vector_allocator_with_stack -> malloc based MVKVector allocator with preallocated storage
//
//////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N>
-class mvk_vector_allocator_with_stack
+class mvk_vector_allocator_with_stack final : public mvk_vector_allocator_base<T>
{
-public:
- T *ptr;
- size_t num_elements_used;
-
private:
//size_t num_elements_reserved; // uhh, num_elements_reserved is mapped onto the stack elements, let the fun begin
alignas( alignof( T ) ) unsigned char elements_stack[N * sizeof( T )];
@@ -219,10 +241,9 @@
void set_num_elements_reserved( const size_t num_elements_reserved )
{
- *reinterpret_cast< size_t* >( &elements_stack[0] ) = num_elements_reserved;
+ *reinterpret_cast<size_t*>( &elements_stack[0] ) = num_elements_reserved;
}
-
public:
//
// faster element construction and destruction using type traits
@@ -253,18 +274,18 @@
template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type
destruct_all()
{
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<S>::num_elements_used; ++i )
{
- ptr[i].~S();
+ mvk_vector_allocator_base<S>::ptr[i].~S();
}
- num_elements_used = 0;
+ mvk_vector_allocator_base<S>::num_elements_used = 0;
}
template<class S> typename std::enable_if< std::is_trivially_destructible<S>::value >::type
destruct_all()
{
- num_elements_used = 0;
+ mvk_vector_allocator_base<S>::num_elements_used = 0;
}
template<class S> typename std::enable_if< !std::is_trivially_destructible<S>::value >::type
@@ -272,19 +293,19 @@
{
T stack_copy[N];
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<S>::num_elements_used; ++i )
{
- construct( &stack_copy[i], std::move( ptr[i] ) );
- destruct( &ptr[i] );
+ construct( &stack_copy[i], std::move( S::ptr[i] ) );
+ destruct( &mvk_vector_allocator_base<S>::ptr[i] );
}
for( size_t i = 0; i < a.num_elements_used; ++i )
{
- construct( &ptr[i], std::move( a.ptr[i] ) );
- destruct( &ptr[i] );
+ construct( &mvk_vector_allocator_base<S>::ptr[i], std::move( a.ptr[i] ) );
+ destruct( &mvk_vector_allocator_base<S>::ptr[i] );
}
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<S>::num_elements_used; ++i )
{
construct( &a.ptr[i], std::move( stack_copy[i] ) );
destruct( &stack_copy[i] );
@@ -304,26 +325,26 @@
}
public:
- mvk_vector_allocator_with_stack() : ptr{ reinterpret_cast< T* >( &elements_stack[0] ) }, num_elements_used{ 0 }
+ mvk_vector_allocator_with_stack() : mvk_vector_allocator_base<T>{ reinterpret_cast<T*>( &elements_stack[0] ), 0 }
{
}
- mvk_vector_allocator_with_stack( mvk_vector_allocator_with_stack &&a ) : num_elements_used{ a.num_elements_used }
+ mvk_vector_allocator_with_stack( mvk_vector_allocator_with_stack &&a ) : mvk_vector_allocator_base<T>{ nullptr, a.num_elements_used }
{
// is a heap based -> steal ptr from a
if( !a.get_data_on_stack() )
{
- ptr = a.ptr;
+ mvk_vector_allocator_base<T>::ptr = a.ptr;
set_num_elements_reserved( a.get_capacity() );
a.ptr = a.get_default_ptr();
}
else
{
- ptr = get_default_ptr();
+ mvk_vector_allocator_base<T>::ptr = get_default_ptr();
for( size_t i = 0; i < a.num_elements_used; ++i )
{
- construct( &ptr[i], std::move( a.ptr[i] ) );
+ construct( &mvk_vector_allocator_base<T>::ptr[i], std::move( a.ptr[i] ) );
destruct( &a.ptr[i] );
}
}
@@ -336,9 +357,9 @@
deallocate();
}
- size_t get_capacity() const
+ size_t get_capacity() const override
{
- return get_data_on_stack() ? N : *reinterpret_cast< const size_t* >( &elements_stack[0] );
+ return get_data_on_stack() ? N : *reinterpret_cast<const size_t*>( &elements_stack[0] );
}
constexpr T *get_default_ptr() const
@@ -348,7 +369,7 @@
bool get_data_on_stack() const
{
- return ptr == get_default_ptr();
+ return mvk_vector_allocator_base<T>::ptr == get_default_ptr();
}
void swap( mvk_vector_allocator_with_stack &a )
@@ -356,9 +377,9 @@
// both allocators on heap -> easy case
if( !get_data_on_stack() && !a.get_data_on_stack() )
{
- auto copy_ptr = ptr;
+ auto copy_ptr = mvk_vector_allocator_base<T>::ptr;
auto copy_num_elements_reserved = get_capacity();
- ptr = a.ptr;
+ mvk_vector_allocator_base<T>::ptr = a.ptr;
set_num_elements_reserved( a.get_capacity() );
a.ptr = copy_ptr;
a.set_num_elements_reserved( copy_num_elements_reserved );
@@ -374,24 +395,24 @@
auto copy_num_elements_reserved = a.get_capacity();
a.ptr = a.get_default_ptr();
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<T>::num_elements_used; ++i )
{
- construct( &a.ptr[i], std::move( ptr[i] ) );
- destruct( &ptr[i] );
+ construct( &a.ptr[i], std::move( mvk_vector_allocator_base<T>::ptr[i] ) );
+ destruct( &mvk_vector_allocator_base<T>::ptr[i] );
}
- ptr = copy_ptr;
+ mvk_vector_allocator_base<T>::ptr = copy_ptr;
set_num_elements_reserved( copy_num_elements_reserved );
}
else if( !get_data_on_stack() && a.get_data_on_stack() )
{
- auto copy_ptr = ptr;
+ auto copy_ptr = mvk_vector_allocator_base<T>::ptr;
auto copy_num_elements_reserved = get_capacity();
- ptr = get_default_ptr();
+ mvk_vector_allocator_base<T>::ptr = get_default_ptr();
for( size_t i = 0; i < a.num_elements_used; ++i )
{
- construct( &ptr[i], std::move( a.ptr[i] ) );
+ construct( &mvk_vector_allocator_base<T>::ptr[i], std::move( a.ptr[i] ) );
destruct( &a.ptr[i] );
}
@@ -399,15 +420,15 @@
a.set_num_elements_reserved( copy_num_elements_reserved );
}
- auto copy_num_elements_used = num_elements_used;
- num_elements_used = a.num_elements_used;
+ auto copy_num_elements_used = mvk_vector_allocator_base<T>::num_elements_used;
+ mvk_vector_allocator_base<T>::num_elements_used = a.num_elements_used;
a.num_elements_used = copy_num_elements_used;
}
//
// allocates rounded up to the defined alignment the number of bytes / if the system cannot allocate the specified amount of memory then a null block is returned
//
- void allocate( const size_t num_elements_to_reserve )
+ void allocate( const size_t num_elements_to_reserve ) override
{
deallocate();
@@ -417,8 +438,8 @@
return;
}
- ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
- num_elements_used = 0;
+ mvk_vector_allocator_base<T>::ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
+ mvk_vector_allocator_base<T>::num_elements_used = 0;
set_num_elements_reserved( num_elements_to_reserve );
}
@@ -427,18 +448,18 @@
{
auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( num_elements_to_reserve * sizeof( T ) ) );
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<T>::num_elements_used; ++i )
{
- construct( &new_ptr[i], std::move( ptr[i] ) );
- destruct( &ptr[i] );
+ construct( &new_ptr[i], std::move( mvk_vector_allocator_base<T>::ptr[i] ) );
+ destruct( &mvk_vector_allocator_base<T>::ptr[i] );
}
- if( ptr != get_default_ptr() )
+ if( mvk_vector_allocator_base<T>::ptr != get_default_ptr() )
{
- mvk_memory_allocator::free( ptr );
+ mvk_memory_allocator::free( mvk_vector_allocator_base<T>::ptr );
}
- ptr = new_ptr;
+ mvk_vector_allocator_base<T>::ptr = new_ptr;
set_num_elements_reserved( num_elements_to_reserve );
}
@@ -460,7 +481,7 @@
// set_num_elements_reserved( num_elements_to_reserve );
//}
- void re_allocate( const size_t num_elements_to_reserve )
+ void re_allocate( const size_t num_elements_to_reserve ) override
{
//TM_ASSERT( num_elements_to_reserve > get_capacity() );
@@ -470,56 +491,56 @@
}
}
- void shrink_to_fit()
+ void shrink_to_fit() override
{
// nothing to do if data is on stack already
if( get_data_on_stack() )
return;
// move elements to stack space
- if( num_elements_used <= N )
+ if( mvk_vector_allocator_base<T>::num_elements_used <= N )
{
- const auto num_elements_reserved = get_capacity();
+ //const auto num_elements_reserved = get_capacity();
auto *stack_ptr = get_default_ptr();
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<T>::num_elements_used; ++i )
{
- construct( &stack_ptr[i], std::move( ptr[i] ) );
- destruct( &ptr[i] );
+ construct( &stack_ptr[i], std::move( mvk_vector_allocator_base<T>::ptr[i] ) );
+ destruct( &mvk_vector_allocator_base<T>::ptr[i] );
}
- mvk_memory_allocator::free( ptr );
+ mvk_memory_allocator::free( mvk_vector_allocator_base<T>::ptr );
- ptr = stack_ptr;
+ mvk_vector_allocator_base<T>::ptr = stack_ptr;
}
else
{
- auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( ptr, num_elements_used * sizeof( T ) ) );
+ auto *new_ptr = reinterpret_cast< T* >( mvk_memory_allocator::alloc( mvk_vector_allocator_base<T>::num_elements_used * sizeof( T ) ) );
- for( size_t i = 0; i < num_elements_used; ++i )
+ for( size_t i = 0; i < mvk_vector_allocator_base<T>::num_elements_used; ++i )
{
- construct( &new_ptr[i], std::move( ptr[i] ) );
- destruct( &ptr[i] );
+ construct( &new_ptr[i], std::move( mvk_vector_allocator_base<T>::ptr[i] ) );
+ destruct( &mvk_vector_allocator_base<T>::ptr[i] );
}
- mvk_memory_allocator::free( ptr );
+ mvk_memory_allocator::free( mvk_vector_allocator_base<T>::ptr );
- ptr = new_ptr;
- set_num_elements_reserved( num_elements_used );
+ mvk_vector_allocator_base<T>::ptr = new_ptr;
+ set_num_elements_reserved( mvk_vector_allocator_base<T>::num_elements_used );
}
}
- void deallocate()
+ void deallocate() override
{
destruct_all<T>();
if( !get_data_on_stack() )
{
- mvk_memory_allocator::free( ptr );
+ mvk_memory_allocator::free( mvk_vector_allocator_base<T>::ptr );
}
- ptr = get_default_ptr();
- num_elements_used = 0;
+ mvk_vector_allocator_base<T>::ptr = get_default_ptr();
+ mvk_vector_allocator_base<T>::num_elements_used = 0;
}
};