div255(x) as ((x+128)*257)>>16 with SSE

_mm_mulhi_epu16 makes the (...*257)>>16 part simple.
This seems to speed up every transfermode that uses div255(),
in the 7-25% range.

It even appears to obviate the need for approxMulDiv255() on SSE.
I'm not sure about NEON yet, so I'll keep approxMulDiv255() for now.

Should be no pixels change:
https://gold.skia.org/search2?issue=1452903004&unt=true&query=source_type%3Dgm&master=false

BUG=skia:
CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot

Review URL: https://codereview.chromium.org/1452903004
diff --git a/src/core/Sk4px.h b/src/core/Sk4px.h
index a7f5c9f..3755488 100644
--- a/src/core/Sk4px.h
+++ b/src/core/Sk4px.h
@@ -66,11 +66,7 @@
         Sk4px addNarrowHi(const Sk16h&) const;
 
         // Rounds, i.e. (x+127) / 255.
-        Sk4px div255() const {
-            // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
-            auto v = *this + Sk16h(128);
-            return v.addNarrowHi(v >> 8);
-        }
+        Sk4px div255() const;
 
         // These just keep the types as Wide so the user doesn't have to keep casting.
         Wide operator * (const Wide& o) const { return INHERITED::operator*(o); }
diff --git a/src/opts/Sk4px_NEON.h b/src/opts/Sk4px_NEON.h
index 89841d9..c27bb13 100644
--- a/src/opts/Sk4px_NEON.h
+++ b/src/opts/Sk4px_NEON.h
@@ -57,6 +57,12 @@
                              vaddhn_u16(this->fHi.fVec, o.fHi.fVec)));
 }
 
+inline Sk4px Sk4px::Wide::div255() const {
+    // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+    auto v = *this + Sk16h(128);
+    return v.addNarrowHi(v>>8);
+}
+
 inline Sk4px Sk4px::alphas() const {
     auto as = vshrq_n_u32((uint32x4_t)fVec, SK_A32_SHIFT);  // ___3 ___2 ___1 ___0
     return Sk16b((uint8x16_t)vmulq_n_u32(as, 0x01010101));  // 3333 2222 1111 0000
diff --git a/src/opts/Sk4px_SSE2.h b/src/opts/Sk4px_SSE2.h
index 9c3eb12..96f21db 100644
--- a/src/opts/Sk4px_SSE2.h
+++ b/src/opts/Sk4px_SSE2.h
@@ -45,6 +45,15 @@
     return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
 }
 
+inline Sk4px Sk4px::Wide::div255() const {
+    // (x + 127) / 255 == ((x+128) * 257)>>16,
+    // and _mm_mulhi_epu16 makes the (_ * 257)>>16 part very convenient.
+    const __m128i _128 = _mm_set1_epi16(128),
+                  _257 = _mm_set1_epi16(257);
+    return Sk4px(_mm_packus_epi16(_mm_mulhi_epu16(_mm_add_epi16(fLo.fVec, _128), _257),
+                                  _mm_mulhi_epu16(_mm_add_epi16(fHi.fVec, _128), _257)));
+}
+
 // Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t).
 // These are safe on x86, often with no speed penalty.
 
diff --git a/src/opts/Sk4px_none.h b/src/opts/Sk4px_none.h
index 540edb8..efbd780 100644
--- a/src/opts/Sk4px_none.h
+++ b/src/opts/Sk4px_none.h
@@ -62,6 +62,12 @@
                  r.kth<12>(), r.kth<13>(), r.kth<14>(), r.kth<15>());
 }
 
+inline Sk4px Sk4px::Wide::div255() const {
+    // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+    auto v = *this + Sk16h(128);
+    return v.addNarrowHi(v>>8);
+}
+
 inline Sk4px Sk4px::alphas() const {
     static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
     return Sk16b(this->kth< 3>(), this->kth< 3>(), this->kth< 3>(), this->kth< 3>(),