blob: 1093baeb552775ee5d453862a9cf8104ece3640d [file] [log] [blame]
/*
* Copyright 2020 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrThreadSafeUniquelyKeyedProxyViewCache_DEFINED
#define GrThreadSafeUniquelyKeyedProxyViewCache_DEFINED
#include "include/private/SkSpinlock.h"
#include "src/core/SkArenaAlloc.h"
#include "src/core/SkTDynamicHash.h"
#include "src/core/SkTInternalLList.h"
#include "src/gpu/GrSurfaceProxyView.h"
// Ganesh creates a lot of utility textures (e.g., blurred-rrect masks) that need to be shared
// between the direct context and all the DDL recording contexts. This thread-safe cache
// allows this sharing.
//
// In operation, each thread will first check if the threaded cache possesses the required texture.
//
// If a DDL thread doesn't find a needed texture it will go off and create it on the cpu and then
// attempt to add it to the cache. If another thread had added it in the interim, the losing thread
// will discard its work and use the texture the winning thread had created.
//
// If the thread in possession of the direct context doesn't find the needed texture it should
// add a place holder view and then queue up the draw calls to complete it. In this way the
// gpu-thread has precedence over the recording threads.
//
// The invariants for this cache differ a bit from those of the proxy and resource caches.
// For this cache:
//
// only this cache knows the unique key - neither the proxy nor backing resource should
// be discoverable in any other cache by the unique key
// if a backing resource resides in the resource cache then there should be an entry in this
// cache
// an entry in this cache, however, doesn't guarantee that there is a corresponding entry in
// the resource cache - although the entry here should be able to generate that entry
// (i.e., be a lazy proxy)
//
// Wrt interactions w/ GrContext/GrResourceCache purging, we have:
//
// Both GrContext::abandonContext and GrContext::releaseResourcesAndAbandonContext will cause
// all the refs held in this cache to be dropped prior to clearing out the resource cache.
//
// For the size_t-variant of GrContext::purgeUnlockedResources, after an initial attempt
// to purge the requested amount of resources fails, uniquely held resources in this cache
// will be dropped in LRU to MRU order until the cache is under budget. Note that this
// prioritizes the survival of resources in this cache over those just in the resource cache.
class GrThreadSafeUniquelyKeyedProxyViewCache {
public:
GrThreadSafeUniquelyKeyedProxyViewCache();
~GrThreadSafeUniquelyKeyedProxyViewCache();
#if GR_TEST_UTILS
int numEntries() const SK_EXCLUDES(fSpinLock);
size_t approxBytesUsedForHash() const SK_EXCLUDES(fSpinLock);
#endif
void dropAllRefs() SK_EXCLUDES(fSpinLock);
void dropAllUniqueRefs(GrResourceCache* resourceCache) SK_EXCLUDES(fSpinLock);
GrSurfaceProxyView find(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
GrSurfaceProxyView add(const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
private:
struct Entry {
Entry(const GrUniqueKey& key, const GrSurfaceProxyView& view) : fKey(key), fView(view) {}
// Note: the unique key is stored here bc it is never attached to a proxy or a GrTexture
GrUniqueKey fKey;
GrSurfaceProxyView fView;
SK_DECLARE_INTERNAL_LLIST_INTERFACE(Entry);
// for SkTDynamicHash
static const GrUniqueKey& GetKey(const Entry& e) { return e.fKey; }
static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
};
Entry* getEntry(const GrUniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
void recycleEntry(Entry*) SK_REQUIRES(fSpinLock);
GrSurfaceProxyView internalAdd(const GrUniqueKey&,
const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
mutable SkSpinlock fSpinLock;
SkTDynamicHash<Entry, GrUniqueKey> fUniquelyKeyedProxyViewMap SK_GUARDED_BY(fSpinLock);
// The head of this list is the MRU
SkTInternalLList<Entry> fUniquelyKeyedProxyViewList SK_GUARDED_BY(fSpinLock);
// TODO: empirically determine this from the skps
static const int kInitialArenaSize = 64 * sizeof(Entry);
char fStorage[kInitialArenaSize];
SkArenaAlloc fEntryAllocator{fStorage, kInitialArenaSize, kInitialArenaSize};
Entry* fFreeEntryList SK_GUARDED_BY(fSpinLock);
};
#endif // GrThreadSafeUniquelyKeyedProxyViewCache_DEFINED