ccpr: Don't consider sub-pixel translation for caching on Android

Bug: skia:
Change-Id: I453400bd1ca1f122d9af526f55102e8712119d2b
Reviewed-on: https://skia-review.googlesource.com/135540
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index e3010f9..4eddd84 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -50,8 +50,7 @@
         : GrDrawOp(ClassID())
         , fViewMatrixIfUsingLocalCoords(has_coord_transforms(paint) ? m : SkMatrix::I())
         , fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(paint))
-        , fDraws({looseClippedIBounds, m, shape, paint.getColor(), nullptr, nullptr, {0, 0},
-                  canStashPathMask, nullptr})
+        , fDraws(looseClippedIBounds, m, shape, paint.getColor(), canStashPathMask)
         , fProcessors(std::move(paint)) {  // Paint must be moved after fetching its color above.
     SkDEBUGCODE(fBaseInstance = -1);
     // FIXME: intersect with clip bounds to (hopefully) improve batching.
@@ -66,6 +65,23 @@
     }
 }
 
+GrCCDrawPathsOp::SingleDraw::SingleDraw(const SkIRect& clippedDevIBounds, const SkMatrix& m,
+                                        const GrShape& shape, GrColor color, bool canStashPathMask)
+        : fLooseClippedIBounds(clippedDevIBounds)
+        , fMatrix(m)
+        , fShape(shape)
+        , fColor(color)
+        , fCanStashPathMask(canStashPathMask) {
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+    if (fShape.hasUnstyledKey()) {
+        // On AOSP we round view matrix translates to integer values for cachable paths. We do this
+        // to match HWUI's cache hit ratio, which doesn't consider the matrix when caching paths.
+        fMatrix.setTranslateX(SkScalarRoundToScalar(fMatrix.getTranslateX()));
+        fMatrix.setTranslateY(SkScalarRoundToScalar(fMatrix.getTranslateY()));
+    }
+#endif
+}
+
 GrCCDrawPathsOp::SingleDraw::~SingleDraw() {
     if (fCacheEntry) {
         // All currFlushAtlas references must be reset back to null before the flush is finished.
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index a779475..e26b0f7 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -80,21 +80,23 @@
     const uint32_t fSRGBFlags;
 
     struct SingleDraw {
+        SingleDraw(const SkIRect& clippedDevIBounds, const SkMatrix&, const GrShape&, GrColor,
+                   bool canStashPathMask);
         ~SingleDraw();
 
-        SkIRect fLooseClippedIBounds;
+        const SkIRect fLooseClippedIBounds;
         SkMatrix fMatrix;
-        GrShape fShape;
+        const GrShape fShape;
         GrColor fColor;
 
+        // If we render the path, can we stash its atlas and copy to the resource cache next flush?
+        const bool fCanStashPathMask;
+
         sk_sp<GrCCPathCacheEntry> fCacheEntry;
         sk_sp<GrTextureProxy> fCachedAtlasProxy;
         SkIVector fCachedMaskShift;
 
-        // If we render the path, can we stash its atlas and copy to the resource cache next flush?
-        bool fCanStashPathMask;
-
-        SingleDraw* fNext;
+        SingleDraw* fNext = nullptr;
     };
 
     GrCCSTLList<SingleDraw> fDraws;
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index a2b6416..36e824e 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -17,19 +17,28 @@
 GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
         : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
     SkASSERT(!m.hasPerspective());
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
     Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
     Sk2f floor = translate.floor();
     (translate - floor).store(fSubpixelTranslate);
     shift->set((int)floor[0], (int)floor[1]);
     SkASSERT((float)shift->fX == floor[0]);
     SkASSERT((float)shift->fY == floor[1]);
+#endif
 }
 
 inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
                                 const GrCCPathCache::MaskTransform& b) {
-    return (Sk4f::Load(a.fMatrix2x2) == Sk4f::Load(b.fMatrix2x2)).allTrue() &&
-           ((Sk2f::Load(a.fSubpixelTranslate) -
-             Sk2f::Load(b.fSubpixelTranslate)).abs() < 1.f/256).allTrue();
+    if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
+        return false;
+    }
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+    if (((Sk2f::Load(a.fSubpixelTranslate) -
+          Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
+        return false;
+    }
+#endif
+    return true;
 }
 
 inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* cache, const MaskTransform& m,
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index e6e25d2..0e6f4f0 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -38,7 +38,11 @@
     struct MaskTransform {
         MaskTransform(const SkMatrix& m, SkIVector* shift);
         float fMatrix2x2[4];
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+        // Except on AOSP, cache hits must have matching subpixel portions of their view matrix.
+        // On AOSP we follow after HWUI and ignore the subpixel translate.
         float fSubpixelTranslate[2];
+#endif
     };
 
     enum class CreateIfAbsent : bool {
diff --git a/src/gpu/ccpr/GrCCSTLList.h b/src/gpu/ccpr/GrCCSTLList.h
index dec257d..039b06e 100644
--- a/src/gpu/ccpr/GrCCSTLList.h
+++ b/src/gpu/ccpr/GrCCSTLList.h
@@ -18,7 +18,8 @@
  */
 template<typename T> class GrCCSTLList {
 public:
-    GrCCSTLList(T&& head) : fHead(std::move(head)) {}
+    template <typename ...Args>
+    GrCCSTLList(Args&&... args) : fHead(std::forward<Args>(args)...) {}
 
     ~GrCCSTLList() {
         T* draw = fHead.fNext; // fHead will be destructed automatically.