blob: 6173e7c721d6496f6f6b97aa64f858364edd9b77 [file] [log] [blame]
/*
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef skgpu_graphite_PipelineDataCache_DEFINED
#define skgpu_graphite_PipelineDataCache_DEFINED
#include "include/core/SkRefCnt.h"
#include "src/core/SkArenaAlloc.h"
#include "src/core/SkPipelineData.h"
#include <unordered_map>
#include <vector>
namespace skgpu::graphite {
// Add a block of data to the cache and return a unique ID that corresponds to its
// contents. If an identical block of data is already in the cache, that unique ID is returned.
// A StorageT captures how the memory of the BaseT is managed in the cache
// A BaseT captures the datatype that is stored in the cache and must have:
// uint32_t hash() const;
// operator==
// static StorageT Make(const BaseT&, SkArenaAlloc*);
//
// Note: The baseT/storageT split is only required until the SkTextureDataBlock is also stored
// in an arena.
template<typename StorageT, typename BaseT>
class PipelineDataCache {
public:
static constexpr uint32_t kInvalidIndex = 0;
PipelineDataCache() {
// kInvalidIndex is reserved
static_assert(kInvalidIndex == 0);
fDataBlocks.push_back({});
fDataBlockIDs.insert({nullptr, Index()});
}
// TODO: For the uniform data version of this cache we should revisit the insert and Make APIs:
// 1. UniformData::Make requires knowing the data size up front, which involves two invocations
// of the UniformManager. Ideally, we could align uniforms on the fly into a dynamic buffer.
// 2. UniformData stores the offsets for each uniform, but these aren't needed after we've
// filled out the buffer. If we remember layout offsets, it should be stored per Combination
// or RenderStep that defines the uniform set.
// 3. UniformCache's ids are only fundamentally limited by the number of draws that can be
// recorded into a DrawPass, which means a very large recording with multiple passes could
// exceed uint32_t across all the passes.
// 4. The check to know if a UniformData is present in the cache is practically the same for
// checking if the data needs to be uploaded to the GPU, so UniformCache could remember the
// associated BufferBindInfos as well.
// 5. Because UniformCache only cares about the content byte hash/equality, and can memcpy to
// the GPU buffer, the cached data contents could all go into a shared byte array, instead of
// needing to extend SkRefCnt.
// 6. insert() as a name can imply that the value is always added, so we may want a better one.
// It can be a little less generic if UniformCache returns id and bind buffer info. On the
// other hand unordered_map::insert has the same semantics as this insert, so maybe it's fine
// Simple wrapper around the returned index to keep all the uint32_ts straight
class Index {
public:
Index() : fIndex(kInvalidIndex) {}
explicit Index(uint32_t index) : fIndex(index) {}
bool operator==(const Index& that) const { return fIndex == that.fIndex; }
bool operator!=(const Index& that) const { return !(*this == that); }
bool isValid() const { return fIndex != kInvalidIndex; }
uint32_t asUInt() const { return fIndex; }
private:
uint32_t fIndex;
};
Index insert(const BaseT& dataBlock) {
auto kv = fDataBlockIDs.find(const_cast<BaseT*>(&dataBlock));
if (kv != fDataBlockIDs.end()) {
return kv->second;
}
Index id(SkTo<uint32_t>(fDataBlocks.size()));
SkASSERT(id.isValid());
StorageT tmp(BaseT::Make(dataBlock, &fArena));
fDataBlockIDs.insert({tmp.get(), id});
fDataBlocks.push_back(std::move(tmp));
this->validate();
return id;
}
const BaseT* lookup(Index uniqueID) {
SkASSERT(uniqueID.asUInt() < fDataBlocks.size());
return fDataBlocks[uniqueID.asUInt()].get();
}
// The number of unique BaseT objects in the cache
size_t count() const {
SkASSERT(fDataBlocks.size() == fDataBlockIDs.size() && fDataBlocks.size() > 0);
return fDataBlocks.size() - 1; /* -1 to discount the invalidID's entry */
}
private:
struct Hash {
// This hash operator de-references and hashes the data contents
size_t operator()(const BaseT* dataBlock) const {
if (!dataBlock) {
return 0;
}
return dataBlock->hash();
}
};
struct Eq {
// This equality operator de-references and compares the actual data contents
bool operator()(const BaseT* a, const BaseT* b) const {
if (!a || !b) {
return !a && !b;
}
return *a == *b;
}
};
// Note: the unique IDs are only unique w/in a Recorder or a Recording _not_ globally
std::unordered_map<const BaseT*, Index, Hash, Eq> fDataBlockIDs;
std::vector<StorageT> fDataBlocks;
SkArenaAlloc fArena{0};
void validate() const {
#ifdef SK_DEBUG
for (size_t i = 0; i < fDataBlocks.size(); ++i) {
auto kv = fDataBlockIDs.find(fDataBlocks[i].get());
SkASSERT(kv != fDataBlockIDs.end());
SkASSERT(kv->first == fDataBlocks[i].get());
SkASSERT(SkTo<uint32_t>(i) == kv->second.asUInt());
}
#endif
}
};
// A UniformDataCache lives for the entire duration of a Recorder. As such it has a greater
// likelihood of overflowing a uint32_t index.
using UniformDataCache = PipelineDataCache<SkUniformDataBlockPassThrough, SkUniformDataBlock>;
// A TextureDataCache only lives for a single Recording. When a Recording is snapped it is pulled
// off of the Recorder and goes with the Recording as a record of the required Textures and
// Samplers.
using TextureDataCache = PipelineDataCache<std::unique_ptr<SkTextureDataBlock>, SkTextureDataBlock>;
} // namespace skgpu::graphite
#endif // skgpu_graphite_PipelineDataCache_DEFINED