Have SkScalerCache return memory used
The SkScalerCache now returns the amount of memory allocated
for each operation. The SkStrikeCache can now start using this
information to track cache size.
Change-Id: Ie0e8138015c6692257126a7bb02a3773b10a7b0a
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/271476
Reviewed-by: Ben Wagner <bungeman@google.com>
Commit-Queue: Herb Derby <herb@google.com>
diff --git a/src/core/SkRemoteGlyphCache.cpp b/src/core/SkRemoteGlyphCache.cpp
index 211344e..cc250d5 100644
--- a/src/core/SkRemoteGlyphCache.cpp
+++ b/src/core/SkRemoteGlyphCache.cpp
@@ -993,7 +993,7 @@
pathPtr = &path;
}
- strike->preparePath(allocatedGlyph, pathPtr);
+ strike->mergePath(allocatedGlyph, pathPtr);
}
}
diff --git a/src/core/SkScalerCache.cpp b/src/core/SkScalerCache.cpp
index d994485..52fc5b1 100644
--- a/src/core/SkScalerCache.cpp
+++ b/src/core/SkScalerCache.cpp
@@ -46,36 +46,39 @@
#endif
// -- glyph creation -------------------------------------------------------------------------------
-SkGlyph* SkScalerCache::makeGlyph(SkPackedGlyphID packedGlyphID) {
- fMemoryUsed += sizeof(SkGlyph);
+std::tuple<SkGlyph*, size_t> SkScalerCache::makeGlyph(SkPackedGlyphID packedGlyphID) {
SkGlyph* glyph = fAlloc.make<SkGlyph>(packedGlyphID);
fGlyphMap.set(glyph);
- return glyph;
+ return {glyph, sizeof(SkGlyph)};
}
-SkGlyph* SkScalerCache::glyph(SkPackedGlyphID packedGlyphID) {
+std::tuple<SkGlyph*, size_t> SkScalerCache::glyph(SkPackedGlyphID packedGlyphID) {
VALIDATE();
SkGlyph* glyph = fGlyphMap.findOrNull(packedGlyphID);
+ size_t bytes = 0;
if (glyph == nullptr) {
- glyph = this->makeGlyph(packedGlyphID);
+ std::tie(glyph, bytes) = this->makeGlyph(packedGlyphID);
fScalerContext->getMetrics(glyph);
}
- return glyph;
+ return {glyph, bytes};
}
-const SkPath* SkScalerCache::preparePath(SkGlyph* glyph) {
+std::tuple<const SkPath*, size_t> SkScalerCache::preparePath(SkGlyph* glyph) {
+ size_t delta = 0;
if (glyph->setPath(&fAlloc, fScalerContext.get())) {
- fMemoryUsed += glyph->path()->approximateBytesUsed();
+ delta = glyph->path()->approximateBytesUsed();
}
- return glyph->path();
+ return {glyph->path(), delta};
}
-const SkPath* SkScalerCache::preparePath(SkGlyph* glyph, const SkPath* path) {
+std::tuple<const SkPath*, size_t> SkScalerCache::mergePath(SkGlyph* glyph, const SkPath* path) {
SkAutoMutexExclusive lock{fMu};
+ size_t pathDelta = 0;
if (glyph->setPath(&fAlloc, path)) {
- fMemoryUsed += glyph->path()->approximateBytesUsed();
+ pathDelta = glyph->path()->approximateBytesUsed();
}
- return glyph->path();
+ fMemoryUsed += pathDelta;
+ return {glyph->path(), pathDelta};
}
const SkDescriptor& SkScalerCache::getDescriptor() const {
@@ -87,96 +90,116 @@
return fGlyphMap.count();
}
-SkGlyph* SkScalerCache::internalGlyphOrNull(SkPackedGlyphID id) const {
- return fGlyphMap.findOrNull(id);
-}
-
-SkSpan<const SkGlyph*> SkScalerCache::internalPrepare(
+std::tuple<SkSpan<const SkGlyph*>, size_t> SkScalerCache::internalPrepare(
SkSpan<const SkGlyphID> glyphIDs, PathDetail pathDetail, const SkGlyph** results) {
const SkGlyph** cursor = results;
+ size_t delta = 0;
for (auto glyphID : glyphIDs) {
- SkGlyph* glyphPtr = this->glyph(SkPackedGlyphID{glyphID});
+ auto [glyph, size] = this->glyph(SkPackedGlyphID{glyphID});
+ delta += size;
if (pathDetail == kMetricsAndPath) {
- this->preparePath(glyphPtr);
+ auto [_, pathSize] = this->preparePath(glyph);
+ delta += pathSize;
}
- *cursor++ = glyphPtr;
+ *cursor++ = glyph;
}
- return {results, glyphIDs.size()};
+ return {{results, glyphIDs.size()}, delta};
}
-const void* SkScalerCache::prepareImage(SkGlyph* glyph) {
+std::tuple<const void*, size_t> SkScalerCache::prepareImage(SkGlyph* glyph) {
+ size_t delta = 0;
if (glyph->setImage(&fAlloc, fScalerContext.get())) {
- fMemoryUsed += glyph->imageSize();
+ delta = glyph->imageSize();
}
- return glyph->image();
+ return {glyph->image(), delta};
}
-SkGlyph* SkScalerCache::mergeGlyphAndImage(SkPackedGlyphID toID, const SkGlyph& from) {
+std::tuple<SkGlyph*, size_t> SkScalerCache::mergeGlyphAndImage(
+ SkPackedGlyphID toID, const SkGlyph& from) {
SkAutoMutexExclusive lock{fMu};
+ size_t delta = 0;
+ size_t imageDelta = 0;
SkGlyph* glyph = fGlyphMap.findOrNull(toID);
if (glyph == nullptr) {
- glyph = this->makeGlyph(toID);
+ std::tie(glyph, delta) = this->makeGlyph(toID);
}
if (glyph->setMetricsAndImage(&fAlloc, from)) {
- fMemoryUsed += glyph->imageSize();
+ imageDelta= glyph->imageSize();
}
- return glyph;
+ fMemoryUsed += delta + imageDelta;
+ return {glyph, delta + imageDelta};
}
-SkSpan<const SkGlyph*> SkScalerCache::metrics(SkSpan<const SkGlyphID> glyphIDs,
- const SkGlyph* results[]) {
+std::tuple<SkSpan<const SkGlyph*>, size_t> SkScalerCache::metrics(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) {
SkAutoMutexExclusive lock{fMu};
- return this->internalPrepare(glyphIDs, kMetricsOnly, results);
+ auto [glyphs, delta] = this->internalPrepare(glyphIDs, kMetricsOnly, results);
+ fMemoryUsed += delta;
+ return {glyphs, delta};
}
-SkSpan<const SkGlyph*> SkScalerCache::preparePaths(SkSpan<const SkGlyphID> glyphIDs,
- const SkGlyph* results[]) {
+std::tuple<SkSpan<const SkGlyph*>, size_t> SkScalerCache::preparePaths(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) {
SkAutoMutexExclusive lock{fMu};
- return this->internalPrepare(glyphIDs, kMetricsAndPath, results);
+ auto [glyphs, delta] = this->internalPrepare(glyphIDs, kMetricsAndPath, results);
+ fMemoryUsed += delta;
+ return {glyphs, delta};
}
-SkSpan<const SkGlyph*>
-SkScalerCache::prepareImages(SkSpan<const SkPackedGlyphID> glyphIDs, const SkGlyph* results[]) {
+std::tuple<SkSpan<const SkGlyph*>, size_t> SkScalerCache::prepareImages(
+ SkSpan<const SkPackedGlyphID> glyphIDs, const SkGlyph* results[]) {
const SkGlyph** cursor = results;
SkAutoMutexExclusive lock{fMu};
+ size_t delta = 0;
for (auto glyphID : glyphIDs) {
- SkGlyph* glyphPtr = this->glyph(glyphID);
- (void)this->prepareImage(glyphPtr);
- *cursor++ = glyphPtr;
+ auto[glyph, glyphSize] = this->glyph(glyphID);
+ auto[_, imageSize] = this->prepareImage(glyph);
+ delta += glyphSize + imageSize;
+ *cursor++ = glyph;
}
- return {results, glyphIDs.size()};
+ fMemoryUsed += delta;
+ return {{results, glyphIDs.size()}, delta};
}
template <typename Fn>
-void SkScalerCache::commonFilterLoop(SkDrawableGlyphBuffer* drawables, Fn&& fn) {
+size_t SkScalerCache::commonFilterLoop(SkDrawableGlyphBuffer* drawables, Fn&& fn) {
+ size_t total = 0;
for (auto [i, packedID, pos] : SkMakeEnumerate(drawables->input())) {
if (SkScalarsAreFinite(pos.x(), pos.y())) {
- SkGlyph* glyph = this->glyph(packedID);
+ auto [glyph, size] = this->glyph(packedID);
+ total += size;
if (!glyph->isEmpty()) {
fn(i, glyph, pos);
}
}
}
+ return total;
}
-void SkScalerCache::prepareForDrawingMasksCPU(SkDrawableGlyphBuffer* drawables) {
+size_t SkScalerCache::prepareForDrawingMasksCPU(SkDrawableGlyphBuffer* drawables) {
SkAutoMutexExclusive lock{fMu};
- this->commonFilterLoop(drawables,
+ size_t imageDelta = 0;
+ size_t delta = this->commonFilterLoop(drawables,
[&](size_t i, SkGlyph* glyph, SkPoint pos) SK_REQUIRES(fMu) {
// If the glyph is too large, then no image is created.
- if (this->prepareImage(glyph) != nullptr) {
+ auto [image, imageSize] = this->prepareImage(glyph);
+ if (image != nullptr) {
drawables->push_back(glyph, i);
+ imageDelta += imageSize;
}
});
+
+ fMemoryUsed += delta + imageDelta;
+ return delta + imageDelta;
}
// Note: this does not actually fill out the image. That happens at atlas building time.
-void SkScalerCache::prepareForMaskDrawing(
+size_t SkScalerCache::prepareForMaskDrawing(
SkDrawableGlyphBuffer* drawables, SkSourceGlyphBuffer* rejects) {
SkAutoMutexExclusive lock{fMu};
- this->commonFilterLoop(drawables,
+ size_t delta = this->commonFilterLoop(drawables,
[&](size_t i, SkGlyph* glyph, SkPoint pos) {
if (SkStrikeForGPU::CanDrawAsMask(*glyph)) {
drawables->push_back(glyph, i);
@@ -184,12 +207,14 @@
rejects->reject(i);
}
});
+ fMemoryUsed += delta;
+ return delta;
}
-void SkScalerCache::prepareForSDFTDrawing(
+size_t SkScalerCache::prepareForSDFTDrawing(
SkDrawableGlyphBuffer* drawables, SkSourceGlyphBuffer* rejects) {
SkAutoMutexExclusive lock{fMu};
- this->commonFilterLoop(drawables,
+ size_t delta = this->commonFilterLoop(drawables,
[&](size_t i, SkGlyph* glyph, SkPoint pos) {
if (SkStrikeForGPU::CanDrawAsSDFT(*glyph)) {
drawables->push_back(glyph, i);
@@ -197,15 +222,19 @@
rejects->reject(i);
}
});
+ fMemoryUsed += delta;
+ return delta;
}
-void SkScalerCache::prepareForPathDrawing(
+size_t SkScalerCache::prepareForPathDrawing(
SkDrawableGlyphBuffer* drawables, SkSourceGlyphBuffer* rejects) {
SkAutoMutexExclusive lock{fMu};
- this->commonFilterLoop(drawables,
+ size_t pathDelta = 0;
+ size_t delta = this->commonFilterLoop(drawables,
[&](size_t i, SkGlyph* glyph, SkPoint pos) SK_REQUIRES(fMu) {
if (!glyph->isColor()) {
- const SkPath* path = this->preparePath(glyph);
+ auto [path, pathSize] = this->preparePath(glyph);
+ pathDelta += pathSize;
if (path != nullptr) {
// Save off the path to draw later.
drawables->push_back(path, i);
@@ -218,6 +247,8 @@
rejects->reject(i, glyph->maxDimension());
}
});
+ fMemoryUsed += delta + pathDelta;
+ return delta + pathDelta;
}
void SkScalerCache::findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
diff --git a/src/core/SkScalerCache.h b/src/core/SkScalerCache.h
index 104454f..5c2ab99 100644
--- a/src/core/SkScalerCache.h
+++ b/src/core/SkScalerCache.h
@@ -33,10 +33,12 @@
// an image, then use the information in from to initialize the width, height top, left,
// format and image of the toGlyph. This is mainly used preserving the glyph if it was
// created by a search of desperation.
- SkGlyph* mergeGlyphAndImage(SkPackedGlyphID toID, const SkGlyph& from) SK_EXCLUDES(fMu);
+ std::tuple<SkGlyph*, size_t> mergeGlyphAndImage(
+ SkPackedGlyphID toID, const SkGlyph& from) SK_EXCLUDES(fMu);
// If the path has never been set, then add a path to glyph.
- const SkPath* preparePath(SkGlyph* glyph, const SkPath* path) SK_EXCLUDES(fMu);
+ std::tuple<const SkPath*, size_t> mergePath(
+ SkGlyph* glyph, const SkPath* path) SK_EXCLUDES(fMu);
/** Return the number of glyphs currently cached. */
int countCachedGlyphs() const SK_EXCLUDES(fMu);
@@ -51,17 +53,16 @@
return fFontMetrics;
}
+ std::tuple<SkSpan<const SkGlyph*>, size_t> metrics(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) SK_EXCLUDES(fMu);
- SkSpan<const SkGlyph*> metrics(SkSpan<const SkGlyphID> glyphIDs,
- const SkGlyph* results[]) SK_EXCLUDES(fMu);
+ std::tuple<SkSpan<const SkGlyph*>, size_t> preparePaths(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) SK_EXCLUDES(fMu);
- SkSpan<const SkGlyph*> preparePaths(SkSpan<const SkGlyphID> glyphIDs,
- const SkGlyph* results[]) SK_EXCLUDES(fMu);
+ std::tuple<SkSpan<const SkGlyph*>, size_t> prepareImages(
+ SkSpan<const SkPackedGlyphID> glyphIDs, const SkGlyph* results[]) SK_EXCLUDES(fMu);
- SkSpan<const SkGlyph*> prepareImages(SkSpan<const SkPackedGlyphID> glyphIDs,
- const SkGlyph* results[]) SK_EXCLUDES(fMu);
-
- void prepareForDrawingMasksCPU(SkDrawableGlyphBuffer* drawables) SK_EXCLUDES(fMu);
+ size_t prepareForDrawingMasksCPU(SkDrawableGlyphBuffer* drawables) SK_EXCLUDES(fMu);
// SkStrikeForGPU APIs
const SkGlyphPositionRoundingSpec& roundingSpec() const {
@@ -70,13 +71,13 @@
const SkDescriptor& getDescriptor() const;
- void prepareForMaskDrawing(
+ size_t prepareForMaskDrawing(
SkDrawableGlyphBuffer* drawables, SkSourceGlyphBuffer* rejects) SK_EXCLUDES(fMu);
- void prepareForSDFTDrawing(
+ size_t prepareForSDFTDrawing(
SkDrawableGlyphBuffer* drawables, SkSourceGlyphBuffer* rejects) SK_EXCLUDES(fMu);
- void prepareForPathDrawing(
+ size_t prepareForPathDrawing(
SkDrawableGlyphBuffer* drawables, SkSourceGlyphBuffer* rejects) SK_EXCLUDES(fMu);
/** Return the approx RAM usage for this cache. */
@@ -126,21 +127,19 @@
}
};
- SkGlyph* makeGlyph(SkPackedGlyphID) SK_REQUIRES(fMu);
+ std::tuple<SkGlyph*, size_t> makeGlyph(SkPackedGlyphID) SK_REQUIRES(fMu);
template <typename Fn>
- void commonFilterLoop(SkDrawableGlyphBuffer* drawables, Fn&& fn) SK_REQUIRES(fMu);
+ size_t commonFilterLoop(SkDrawableGlyphBuffer* drawables, Fn&& fn) SK_REQUIRES(fMu);
// Return a glyph. Create it if it doesn't exist, and initialize the glyph with metrics and
// advances using a scaler.
- SkGlyph* glyph(SkPackedGlyphID) SK_REQUIRES(fMu);
+ std::tuple<SkGlyph*, size_t> glyph(SkPackedGlyphID) SK_REQUIRES(fMu);
- const void* prepareImage(SkGlyph* glyph) SK_REQUIRES(fMu);
+ std::tuple<const void*, size_t> prepareImage(SkGlyph* glyph) SK_REQUIRES(fMu);
// If the path has never been set, then use the scaler context to add the glyph.
- const SkPath* preparePath(SkGlyph*) SK_REQUIRES(fMu);
-
- SkGlyph* internalGlyphOrNull(SkPackedGlyphID) const SK_REQUIRES(fMu);
+ std::tuple<const SkPath*, size_t> preparePath(SkGlyph*) SK_REQUIRES(fMu);
enum PathDetail {
kMetricsOnly,
@@ -148,7 +147,7 @@
};
// internalPrepare will only be called with a mutex already held.
- SkSpan<const SkGlyph*> internalPrepare(
+ std::tuple<SkSpan<const SkGlyph*>, size_t> internalPrepare(
SkSpan<const SkGlyphID> glyphIDs,
PathDetail pathDetail,
const SkGlyph** results) SK_REQUIRES(fMu);
diff --git a/src/core/SkStrikeCache.h b/src/core/SkStrikeCache.h
index e589d00..f9ff70d 100644
--- a/src/core/SkStrikeCache.h
+++ b/src/core/SkStrikeCache.h
@@ -55,21 +55,19 @@
, fPinner{std::move(pinner)} {}
SkGlyph* mergeGlyphAndImage(SkPackedGlyphID toID, const SkGlyph& from) {
- return fScalerCache.mergeGlyphAndImage(toID, from);
+ auto [glyph, delta] = fScalerCache.mergeGlyphAndImage(toID, from);
+ return glyph;
}
- const SkPath* preparePath(SkGlyph* glyph, const SkPath* path) {
- return fScalerCache.preparePath(glyph, path);
+ const SkPath* mergePath(SkGlyph* glyph, const SkPath* path) {
+ auto [glyphPath, pathDelta] = fScalerCache.mergePath(glyph, path);
+ return glyphPath;
}
SkScalerContext* getScalerContext() const {
return fScalerCache.getScalerContext();
}
- int countCachedGlyphs() {
- return fScalerCache.countCachedGlyphs();
- }
-
void findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
SkGlyph* glyph, SkScalar* array, int* count) {
fScalerCache.findIntercepts(bounds, scale, xPos, glyph, array, count);
@@ -81,21 +79,25 @@
SkSpan<const SkGlyph*> metrics(SkSpan<const SkGlyphID> glyphIDs,
const SkGlyph* results[]) {
- return fScalerCache.metrics(glyphIDs, results);
+ auto [glyphs, delta] = fScalerCache.metrics(glyphIDs, results);
+ return glyphs;
}
SkSpan<const SkGlyph*> preparePaths(SkSpan<const SkGlyphID> glyphIDs,
const SkGlyph* results[]) {
- return fScalerCache.preparePaths(glyphIDs, results);
+ auto [glyphs, delta] = fScalerCache.preparePaths(glyphIDs, results);
+ return glyphs;
}
SkSpan<const SkGlyph*> prepareImages(SkSpan<const SkPackedGlyphID> glyphIDs,
const SkGlyph* results[]) {
- return fScalerCache.prepareImages(glyphIDs, results);
+ auto [glyphs, delta] = fScalerCache.prepareImages(glyphIDs, results);
+ return glyphs;
}
void prepareForDrawingMasksCPU(SkDrawableGlyphBuffer* drawables) {
- return fScalerCache.prepareForDrawingMasksCPU(drawables);
+ // Delta is passed back.
+ (void)fScalerCache.prepareForDrawingMasksCPU(drawables);
}
const SkGlyphPositionRoundingSpec& roundingSpec() const override {
@@ -108,17 +110,20 @@
void prepareForMaskDrawing(
SkDrawableGlyphBuffer* drawbles, SkSourceGlyphBuffer* rejects) override {
- fScalerCache.prepareForMaskDrawing(drawbles, rejects);
+ // Delta is returned.
+ (void)fScalerCache.prepareForMaskDrawing(drawbles, rejects);
}
void prepareForSDFTDrawing(
SkDrawableGlyphBuffer* drawbles, SkSourceGlyphBuffer* rejects) override {
- fScalerCache.prepareForSDFTDrawing(drawbles, rejects);
+ // Delta is returned.
+ (void)fScalerCache.prepareForSDFTDrawing(drawbles, rejects);
}
void prepareForPathDrawing(
SkDrawableGlyphBuffer* drawbles, SkSourceGlyphBuffer* rejects) override {
- fScalerCache.prepareForPathDrawing(drawbles, rejects);
+ // Delta is returned.
+ (void)fScalerCache.prepareForPathDrawing(drawbles, rejects);
}
void onAboutToExitScope() override {