| /* |
| * Copyright 2018 Google Inc. |
| * |
| * Use of this source code is governed by a BSD-style license that can be |
| * found in the LICENSE file. |
| */ |
| |
| #ifndef SkRasterPipeline_opts_DEFINED |
| #define SkRasterPipeline_opts_DEFINED |
| |
| #include "include/core/SkTypes.h" |
| #include "include/private/base/SkMalloc.h" |
| #include "include/private/base/SkSpan_impl.h" |
| #include "include/private/base/SkTemplates.h" |
| #include "modules/skcms/skcms.h" |
| #include "src/base/SkUtils.h" // unaligned_{load,store} |
| #include "src/core/SkRasterPipeline.h" |
| #include "src/core/SkRasterPipelineContextUtils.h" |
| #include "src/shaders/SkPerlinNoiseShaderType.h" |
| #include "src/sksl/tracing/SkSLTraceHook.h" |
| |
| #include <cstdint> |
| #include <type_traits> |
| |
| // Every function in this file should be marked static and inline using SI. |
| #if defined(__clang__) || defined(__GNUC__) |
| #define SI __attribute__((always_inline)) static inline |
| #else |
| #define SI static inline |
| #endif |
| |
| #if defined(__clang__) |
| #define SK_UNROLL _Pragma("unroll") |
| #else |
| #define SK_UNROLL |
| #endif |
| |
| #if defined(__clang__) |
| template <int N, typename T> using Vec = T __attribute__((ext_vector_type(N))); |
| #elif defined(__GNUC__) |
| // Unfortunately, GCC does not allow us to omit the struct. This will not compile: |
| // template <int N, typename T> using Vec = T __attribute__((vector_size(N*sizeof(T)))); |
| template <int N, typename T> struct VecHelper { |
| typedef T __attribute__((vector_size(N * sizeof(T)))) V; |
| }; |
| template <int N, typename T> using Vec = typename VecHelper<N, T>::V; |
| #endif |
| |
| template <typename Dst, typename Src> |
| SI Dst widen_cast(const Src& src) { |
| static_assert(sizeof(Dst) > sizeof(Src)); |
| static_assert(std::is_trivially_copyable<Dst>::value); |
| static_assert(std::is_trivially_copyable<Src>::value); |
| Dst dst; |
| memcpy(&dst, &src, sizeof(Src)); |
| return dst; |
| } |
| |
| struct Ctx { |
| SkRasterPipelineStage* fStage; |
| |
| template <typename T> |
| operator T*() { |
| return (T*)fStage->ctx; |
| } |
| }; |
| |
| using NoCtx = const void*; |
| |
| #if defined(JUMPER_IS_SCALAR) || defined(JUMPER_IS_NEON) || defined(JUMPER_IS_HSW) || \ |
| defined(JUMPER_IS_SKX) || defined(JUMPER_IS_AVX) || defined(JUMPER_IS_SSE41) || \ |
| defined(JUMPER_IS_SSE2) |
| // Honor the existing setting |
| #elif !defined(__clang__) && !defined(__GNUC__) |
| #define JUMPER_IS_SCALAR |
| #elif defined(SK_ARM_HAS_NEON) |
| #define JUMPER_IS_NEON |
| #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX |
| #define JUMPER_IS_SKX |
| #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2 |
| #define JUMPER_IS_HSW |
| #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX |
| #define JUMPER_IS_AVX |
| #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
| #define JUMPER_IS_SSE41 |
| #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 |
| #define JUMPER_IS_SSE2 |
| #elif SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX |
| #define JUMPER_IS_LASX |
| #elif SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX |
| #define JUMPER_IS_LSX |
| #else |
| #define JUMPER_IS_SCALAR |
| #endif |
| |
| // Older Clangs seem to crash when generating non-optimized NEON code for ARMv7. |
| #if defined(__clang__) && !defined(__OPTIMIZE__) && defined(SK_CPU_ARM32) |
| // Apple Clang 9 and vanilla Clang 5 are fine, and may even be conservative. |
| #if defined(__apple_build_version__) && __clang_major__ < 9 |
| #define JUMPER_IS_SCALAR |
| #elif __clang_major__ < 5 |
| #define JUMPER_IS_SCALAR |
| #endif |
| |
| #if defined(JUMPER_IS_NEON) && defined(JUMPER_IS_SCALAR) |
| #undef JUMPER_IS_NEON |
| #endif |
| #endif |
| |
| #if defined(JUMPER_IS_SCALAR) |
| #include <math.h> |
| #elif defined(JUMPER_IS_NEON) |
| #include <arm_neon.h> |
| #elif defined(JUMPER_IS_LASX) |
| #include <lasxintrin.h> |
| #include <lsxintrin.h> |
| #elif defined(JUMPER_IS_LSX) |
| #include <lsxintrin.h> |
| #else |
| #include <immintrin.h> |
| #endif |
| |
| // Notes: |
| // * rcp_fast and rcp_precise both produce a reciprocal, but rcp_fast is an estimate with at least |
| // 12 bits of precision while rcp_precise should be accurate for float size. For ARM rcp_precise |
| // requires 2 Newton-Raphson refinement steps because its estimate has 8 bit precision, and for |
| // Intel this requires one additional step because its estimate has 12 bit precision. |
| // |
| // * Don't call rcp_approx or rsqrt_approx directly; only use rcp_fast and rsqrt. |
| |
| namespace SK_OPTS_NS { |
| #if defined(JUMPER_IS_SCALAR) |
| // This path should lead to portable scalar code. |
| using F = float ; |
| using I32 = int32_t; |
| using U64 = uint64_t; |
| using U32 = uint32_t; |
| using U16 = uint16_t; |
| using U8 = uint8_t ; |
| |
| SI F min(F a, F b) { return fminf(a,b); } |
| SI I32 min(I32 a, I32 b) { return a < b ? a : b; } |
| SI U32 min(U32 a, U32 b) { return a < b ? a : b; } |
| SI F max(F a, F b) { return fmaxf(a,b); } |
| SI I32 max(I32 a, I32 b) { return a > b ? a : b; } |
| SI U32 max(U32 a, U32 b) { return a > b ? a : b; } |
| |
| SI F mad(F f, F m, F a) { return a+f*m; } |
| SI F nmad(F f, F m, F a) { return a-f*m; } |
| SI F abs_ (F v) { return fabsf(v); } |
| SI I32 abs_ (I32 v) { return v < 0 ? -v : v; } |
| SI F floor_(F v) { return floorf(v); } |
| SI F ceil_(F v) { return ceilf(v); } |
| SI F rcp_approx(F v) { return 1.0f / v; } // use rcp_fast instead |
| SI F rsqrt_approx(F v) { return 1.0f / sqrtf(v); } |
| SI F sqrt_ (F v) { return sqrtf(v); } |
| SI F rcp_precise (F v) { return 1.0f / v; } |
| |
| SI I32 iround(F v) { return (I32)(v + 0.5f); } |
| SI U32 round(F v) { return (U32)(v + 0.5f); } |
| SI U32 round(F v, F scale) { return (U32)(v*scale + 0.5f); } |
| SI U16 pack(U32 v) { return (U16)v; } |
| SI U8 pack(U16 v) { return (U8)v; } |
| |
| SI F if_then_else(I32 c, F t, F e) { return c ? t : e; } |
| SI I32 if_then_else(I32 c, I32 t, I32 e) { return c ? t : e; } |
| |
| SI bool any(I32 c) { return c != 0; } |
| SI bool all(I32 c) { return c != 0; } |
| |
| template <typename T> |
| SI T gather(const T* p, U32 ix) { return p[ix]; } |
| |
| SI void scatter_masked(I32 src, int* dst, U32 ix, I32 mask) { |
| dst[ix] = mask ? src : dst[ix]; |
| } |
| |
| SI void load2(const uint16_t* ptr, U16* r, U16* g) { |
| *r = ptr[0]; |
| *g = ptr[1]; |
| } |
| SI void store2(uint16_t* ptr, U16 r, U16 g) { |
| ptr[0] = r; |
| ptr[1] = g; |
| } |
| SI void load4(const uint16_t* ptr, U16* r, U16* g, U16* b, U16* a) { |
| *r = ptr[0]; |
| *g = ptr[1]; |
| *b = ptr[2]; |
| *a = ptr[3]; |
| } |
| SI void store4(uint16_t* ptr, U16 r, U16 g, U16 b, U16 a) { |
| ptr[0] = r; |
| ptr[1] = g; |
| ptr[2] = b; |
| ptr[3] = a; |
| } |
| |
| SI void load4(const float* ptr, F* r, F* g, F* b, F* a) { |
| *r = ptr[0]; |
| *g = ptr[1]; |
| *b = ptr[2]; |
| *a = ptr[3]; |
| } |
| SI void store4(float* ptr, F r, F g, F b, F a) { |
| ptr[0] = r; |
| ptr[1] = g; |
| ptr[2] = b; |
| ptr[3] = a; |
| } |
| |
| #elif defined(JUMPER_IS_NEON) |
| template <typename T> using V = Vec<4, T>; |
| using F = V<float >; |
| using I32 = V< int32_t>; |
| using U64 = V<uint64_t>; |
| using U32 = V<uint32_t>; |
| using U16 = V<uint16_t>; |
| using U8 = V<uint8_t >; |
| |
| // We polyfill a few routines that Clang doesn't build into ext_vector_types. |
| SI F min(F a, F b) { return vminq_f32(a,b); } |
| SI I32 min(I32 a, I32 b) { return vminq_s32(a,b); } |
| SI U32 min(U32 a, U32 b) { return vminq_u32(a,b); } |
| SI F max(F a, F b) { return vmaxq_f32(a,b); } |
| SI I32 max(I32 a, I32 b) { return vmaxq_s32(a,b); } |
| SI U32 max(U32 a, U32 b) { return vmaxq_u32(a,b); } |
| |
| SI F abs_ (F v) { return vabsq_f32(v); } |
| SI I32 abs_ (I32 v) { return vabsq_s32(v); } |
| SI F rcp_approx(F v) { auto e = vrecpeq_f32(v); return vrecpsq_f32 (v,e ) * e; } |
| SI F rcp_precise(F v) { auto e = rcp_approx(v); return vrecpsq_f32 (v,e ) * e; } |
| SI F rsqrt_approx(F v) { auto e = vrsqrteq_f32(v); return vrsqrtsq_f32(v,e*e) * e; } |
| |
| SI U16 pack(U32 v) { return __builtin_convertvector(v, U16); } |
| SI U8 pack(U16 v) { return __builtin_convertvector(v, U8); } |
| |
| SI F if_then_else(I32 c, F t, F e) { return vbslq_f32((U32)c,t,e); } |
| SI I32 if_then_else(I32 c, I32 t, I32 e) { return vbslq_s32((U32)c,t,e); } |
| |
| #if defined(SK_CPU_ARM64) |
| SI bool any(I32 c) { return vmaxvq_u32((U32)c) != 0; } |
| SI bool all(I32 c) { return vminvq_u32((U32)c) != 0; } |
| |
| SI F mad(F f, F m, F a) { return vfmaq_f32(a,f,m); } |
| SI F nmad(F f, F m, F a) { return vfmsq_f32(a,f,m); } |
| SI F floor_(F v) { return vrndmq_f32(v); } |
| SI F ceil_(F v) { return vrndpq_f32(v); } |
| SI F sqrt_(F v) { return vsqrtq_f32(v); } |
| SI I32 iround(F v) { return vcvtnq_s32_f32(v); } |
| SI U32 round(F v) { return vcvtnq_u32_f32(v); } |
| SI U32 round(F v, F scale) { return vcvtnq_u32_f32(v*scale); } |
| #else |
| SI bool any(I32 c) { return c[0] | c[1] | c[2] | c[3]; } |
| SI bool all(I32 c) { return c[0] & c[1] & c[2] & c[3]; } |
| |
| SI F mad(F f, F m, F a) { return vmlaq_f32(a,f,m); } |
| SI F nmad(F f, F m, F a) { return vmlsq_f32(a,f,m); } |
| |
| SI F floor_(F v) { |
| F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); |
| return roundtrip - if_then_else(roundtrip > v, F() + 1, F()); |
| } |
| |
| SI F ceil_(F v) { |
| F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); |
| return roundtrip + if_then_else(roundtrip < v, F() + 1, F()); |
| } |
| |
| SI F sqrt_(F v) { |
| auto e = vrsqrteq_f32(v); // Estimate and two refinement steps for e = rsqrt(v). |
| e *= vrsqrtsq_f32(v,e*e); |
| e *= vrsqrtsq_f32(v,e*e); |
| return v*e; // sqrt(v) == v*rsqrt(v). |
| } |
| |
| SI I32 iround(F v) { |
| return vcvtq_s32_f32(v + 0.5f); |
| } |
| |
| SI U32 round(F v) { |
| return vcvtq_u32_f32(v + 0.5f); |
| } |
| |
| SI U32 round(F v, F scale) { |
| return vcvtq_u32_f32(mad(v, scale, F() + 0.5f)); |
| } |
| #endif |
| |
| template <typename T> |
| SI V<T> gather(const T* p, U32 ix) { |
| return V<T>{p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]}; |
| } |
| SI void scatter_masked(I32 src, int* dst, U32 ix, I32 mask) { |
| I32 before = gather(dst, ix); |
| I32 after = if_then_else(mask, src, before); |
| dst[ix[0]] = after[0]; |
| dst[ix[1]] = after[1]; |
| dst[ix[2]] = after[2]; |
| dst[ix[3]] = after[3]; |
| } |
| SI void load2(const uint16_t* ptr, U16* r, U16* g) { |
| uint16x4x2_t rg = vld2_u16(ptr); |
| *r = rg.val[0]; |
| *g = rg.val[1]; |
| } |
| SI void store2(uint16_t* ptr, U16 r, U16 g) { |
| vst2_u16(ptr, (uint16x4x2_t{{r,g}})); |
| } |
| SI void load4(const uint16_t* ptr, U16* r, U16* g, U16* b, U16* a) { |
| uint16x4x4_t rgba = vld4_u16(ptr); |
| *r = rgba.val[0]; |
| *g = rgba.val[1]; |
| *b = rgba.val[2]; |
| *a = rgba.val[3]; |
| } |
| |
| SI void store4(uint16_t* ptr, U16 r, U16 g, U16 b, U16 a) { |
| vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}})); |
| } |
| SI void load4(const float* ptr, F* r, F* g, F* b, F* a) { |
| float32x4x4_t rgba = vld4q_f32(ptr); |
| *r = rgba.val[0]; |
| *g = rgba.val[1]; |
| *b = rgba.val[2]; |
| *a = rgba.val[3]; |
| } |
| SI void store4(float* ptr, F r, F g, F b, F a) { |
| vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}})); |
| } |
| |
| #elif defined(JUMPER_IS_SKX) |
| template <typename T> using V = Vec<16, T>; |
| using F = V<float >; |
| using I32 = V< int32_t>; |
| using U64 = V<uint64_t>; |
| using U32 = V<uint32_t>; |
| using U16 = V<uint16_t>; |
| using U8 = V<uint8_t >; |
| |
| SI F mad(F f, F m, F a) { return _mm512_fmadd_ps(f, m, a); } |
| SI F nmad(F f, F m, F a) { return _mm512_fnmadd_ps(f, m, a); } |
| SI F min(F a, F b) { return _mm512_min_ps(a,b); } |
| SI I32 min(I32 a, I32 b) { return (I32)_mm512_min_epi32((__m512i)a,(__m512i)b); } |
| SI U32 min(U32 a, U32 b) { return (U32)_mm512_min_epu32((__m512i)a,(__m512i)b); } |
| SI F max(F a, F b) { return _mm512_max_ps(a,b); } |
| SI I32 max(I32 a, I32 b) { return (I32)_mm512_max_epi32((__m512i)a,(__m512i)b); } |
| SI U32 max(U32 a, U32 b) { return (U32)_mm512_max_epu32((__m512i)a,(__m512i)b); } |
| SI F abs_ (F v) { return _mm512_and_ps(v, _mm512_sub_ps(_mm512_setzero(), v)); } |
| SI I32 abs_ (I32 v) { return (I32)_mm512_abs_epi32((__m512i)v); } |
| SI F floor_(F v) { return _mm512_floor_ps(v); } |
| SI F ceil_(F v) { return _mm512_ceil_ps(v); } |
| SI F rcp_approx(F v) { return _mm512_rcp14_ps (v); } |
| SI F rsqrt_approx (F v) { return _mm512_rsqrt14_ps(v); } |
| SI F sqrt_ (F v) { return _mm512_sqrt_ps (v); } |
| SI F rcp_precise (F v) { |
| F e = rcp_approx(v); |
| return _mm512_fnmadd_ps(v, e, _mm512_set1_ps(2.0f)) * e; |
| } |
| SI I32 iround(F v) { return (I32)_mm512_cvtps_epi32(v); } |
| SI U32 round(F v) { return (U32)_mm512_cvtps_epi32(v); } |
| SI U32 round(F v, F scale) { return (U32)_mm512_cvtps_epi32(v*scale); } |
| SI U16 pack(U32 v) { |
| __m256i rst = _mm256_packus_epi32(_mm512_castsi512_si256((__m512i)v), |
| _mm512_extracti64x4_epi64((__m512i)v, 1)); |
| return (U16)_mm256_permutex_epi64(rst, 216); |
| } |
| SI U8 pack(U16 v) { |
| __m256i rst = _mm256_packus_epi16((__m256i)v, (__m256i)v); |
| return (U8)_mm256_castsi256_si128(_mm256_permute4x64_epi64(rst, 8)); |
| } |
| SI F if_then_else(I32 c, F t, F e) { |
| __m512i mask = _mm512_set1_epi32(0x80000000); |
| __m512i aa = _mm512_and_si512((__m512i)c, mask); |
| return _mm512_mask_blend_ps(_mm512_test_epi32_mask(aa, aa),e,t); |
| } |
| SI I32 if_then_else(I32 c, I32 t, I32 e) { |
| __m512i mask = _mm512_set1_epi32(0x80000000); |
| __m512i aa = _mm512_and_si512((__m512i)c, mask); |
| return (I32)_mm512_mask_blend_epi32(_mm512_test_epi32_mask(aa, aa),(__m512i)e,(__m512i)t); |
| } |
| SI bool any(I32 c) { |
| __mmask16 mask32 = _mm512_test_epi32_mask((__m512i)c, (__m512i)c); |
| return mask32 != 0; |
| } |
| SI bool all(I32 c) { |
| __mmask16 mask32 = _mm512_test_epi32_mask((__m512i)c, (__m512i)c); |
| return mask32 == 0xffff; |
| } |
| template <typename T> |
| SI V<T> gather(const T* p, U32 ix) { |
| return V<T>{ p[ix[ 0]], p[ix[ 1]], p[ix[ 2]], p[ix[ 3]], |
| p[ix[ 4]], p[ix[ 5]], p[ix[ 6]], p[ix[ 7]], |
| p[ix[ 8]], p[ix[ 9]], p[ix[10]], p[ix[11]], |
| p[ix[12]], p[ix[13]], p[ix[14]], p[ix[15]] }; |
| } |
| SI F gather(const float* p, U32 ix) { return _mm512_i32gather_ps((__m512i)ix, p, 4); } |
| SI U32 gather(const uint32_t* p, U32 ix) { |
| return (U32)_mm512_i32gather_epi32((__m512i)ix, p, 4); } |
| SI U64 gather(const uint64_t* p, U32 ix) { |
| __m512i parts[] = { |
| _mm512_i32gather_epi64(_mm512_castsi512_si256((__m512i)ix), p, 8), |
| _mm512_i32gather_epi64(_mm512_extracti32x8_epi32((__m512i)ix, 1), p, 8), |
| }; |
| return sk_bit_cast<U64>(parts); |
| } |
| template <typename V, typename S> |
| SI void scatter_masked(V src, S* dst, U32 ix, I32 mask) { |
| V before = gather(dst, ix); |
| V after = if_then_else(mask, src, before); |
| dst[ix[0]] = after[0]; |
| dst[ix[1]] = after[1]; |
| dst[ix[2]] = after[2]; |
| dst[ix[3]] = after[3]; |
| dst[ix[4]] = after[4]; |
| dst[ix[5]] = after[5]; |
| dst[ix[6]] = after[6]; |
| dst[ix[7]] = after[7]; |
| dst[ix[8]] = after[8]; |
| dst[ix[9]] = after[9]; |
| dst[ix[10]] = after[10]; |
| dst[ix[11]] = after[11]; |
| dst[ix[12]] = after[12]; |
| dst[ix[13]] = after[13]; |
| dst[ix[14]] = after[14]; |
| dst[ix[15]] = after[15]; |
| } |
| |
| SI void load2(const uint16_t* ptr, U16* r, U16* g) { |
| __m256i _01234567 = _mm256_loadu_si256(((const __m256i*)ptr) + 0); |
| __m256i _89abcdef = _mm256_loadu_si256(((const __m256i*)ptr) + 1); |
| |
| *r = (U16)_mm256_permute4x64_epi64(_mm256_packs_epi32(_mm256_srai_epi32(_mm256_slli_epi32 |
| (_01234567, 16), 16), _mm256_srai_epi32(_mm256_slli_epi32(_89abcdef, 16), 16)), 216); |
| *g = (U16)_mm256_permute4x64_epi64(_mm256_packs_epi32(_mm256_srai_epi32(_01234567, 16), |
| _mm256_srai_epi32(_89abcdef, 16)), 216); |
| } |
| SI void store2(uint16_t* ptr, U16 r, U16 g) { |
| __m256i _01234567 = _mm256_unpacklo_epi16((__m256i)r, (__m256i)g); |
| __m256i _89abcdef = _mm256_unpackhi_epi16((__m256i)r, (__m256i)g); |
| __m512i combinedVector = _mm512_inserti64x4(_mm512_castsi256_si512(_01234567), |
| _89abcdef, 1); |
| __m512i aa = _mm512_permutexvar_epi64(_mm512_setr_epi64(0,1,4,5,2,3,6,7), combinedVector); |
| _01234567 = _mm512_castsi512_si256(aa); |
| _89abcdef = _mm512_extracti64x4_epi64(aa, 1); |
| |
| _mm256_storeu_si256((__m256i*)ptr + 0, _01234567); |
| _mm256_storeu_si256((__m256i*)ptr + 1, _89abcdef); |
| } |
| |
| SI void load4(const uint16_t* ptr, U16* r, U16* g, U16* b, U16* a) { |
| __m256i _0123 = _mm256_loadu_si256((const __m256i*)ptr), |
| _4567 = _mm256_loadu_si256(((const __m256i*)ptr) + 1), |
| _89ab = _mm256_loadu_si256(((const __m256i*)ptr) + 2), |
| _cdef = _mm256_loadu_si256(((const __m256i*)ptr) + 3); |
| |
| auto a0 = _mm256_unpacklo_epi16(_0123, _4567), |
| a1 = _mm256_unpackhi_epi16(_0123, _4567), |
| b0 = _mm256_unpacklo_epi16(a0, a1), |
| b1 = _mm256_unpackhi_epi16(a0, a1), |
| a2 = _mm256_unpacklo_epi16(_89ab, _cdef), |
| a3 = _mm256_unpackhi_epi16(_89ab, _cdef), |
| b2 = _mm256_unpacklo_epi16(a2, a3), |
| b3 = _mm256_unpackhi_epi16(a2, a3), |
| rr = _mm256_unpacklo_epi64(b0, b2), |
| gg = _mm256_unpackhi_epi64(b0, b2), |
| bb = _mm256_unpacklo_epi64(b1, b3), |
| aa = _mm256_unpackhi_epi64(b1, b3); |
| |
| *r = (U16)_mm256_permutexvar_epi32(_mm256_setr_epi32(0,4,1,5,2,6,3,7), rr); |
| *g = (U16)_mm256_permutexvar_epi32(_mm256_setr_epi32(0,4,1,5,2,6,3,7), gg); |
| *b = (U16)_mm256_permutexvar_epi32(_mm256_setr_epi32(0,4,1,5,2,6,3,7), bb); |
| *a = (U16)_mm256_permutexvar_epi32(_mm256_setr_epi32(0,4,1,5,2,6,3,7), aa); |
| } |
| SI void store4(uint16_t* ptr, U16 r, U16 g, U16 b, U16 a) { |
| auto rg012389ab = _mm256_unpacklo_epi16((__m256i)r, (__m256i)g), |
| rg4567cdef = _mm256_unpackhi_epi16((__m256i)r, (__m256i)g), |
| ba012389ab = _mm256_unpacklo_epi16((__m256i)b, (__m256i)a), |
| ba4567cdef = _mm256_unpackhi_epi16((__m256i)b, (__m256i)a); |
| |
| auto _0189 = _mm256_unpacklo_epi32(rg012389ab, ba012389ab), |
| _23ab = _mm256_unpackhi_epi32(rg012389ab, ba012389ab), |
| _45cd = _mm256_unpacklo_epi32(rg4567cdef, ba4567cdef), |
| _67ef = _mm256_unpackhi_epi32(rg4567cdef, ba4567cdef); |
| |
| auto _ab23 = _mm256_permutex_epi64(_23ab, 78), |
| _0123 = _mm256_blend_epi32(_0189, _ab23, 0xf0), |
| _89ab = _mm256_permutex_epi64(_mm256_blend_epi32(_0189, _ab23, 0x0f), 78), |
| _ef67 = _mm256_permutex_epi64(_67ef, 78), |
| _4567 = _mm256_blend_epi32(_45cd, _ef67, 0xf0), |
| _cdef = _mm256_permutex_epi64(_mm256_blend_epi32(_45cd, _ef67, 0x0f), 78); |
| |
| _mm256_storeu_si256((__m256i*)ptr, _0123); |
| _mm256_storeu_si256((__m256i*)ptr + 1, _4567); |
| _mm256_storeu_si256((__m256i*)ptr + 2, _89ab); |
| _mm256_storeu_si256((__m256i*)ptr + 3, _cdef); |
| } |
| |
| SI void load4(const float* ptr, F* r, F* g, F* b, F* a) { |
| F _048c, _159d, _26ae, _37bf; |
| |
| _048c = _mm512_castps128_ps512(_mm_loadu_ps(ptr) ); |
| _048c = _mm512_insertf32x4(_048c, _mm_loadu_ps(ptr+16), 1); |
| _048c = _mm512_insertf32x4(_048c, _mm_loadu_ps(ptr+32), 2); |
| _048c = _mm512_insertf32x4(_048c, _mm_loadu_ps(ptr+48), 3); |
| _159d = _mm512_castps128_ps512(_mm_loadu_ps(ptr+4) ); |
| _159d = _mm512_insertf32x4(_159d, _mm_loadu_ps(ptr+20), 1); |
| _159d = _mm512_insertf32x4(_159d, _mm_loadu_ps(ptr+36), 2); |
| _159d = _mm512_insertf32x4(_159d, _mm_loadu_ps(ptr+52), 3); |
| _26ae = _mm512_castps128_ps512(_mm_loadu_ps(ptr+8) ); |
| _26ae = _mm512_insertf32x4(_26ae, _mm_loadu_ps(ptr+24), 1); |
| _26ae = _mm512_insertf32x4(_26ae, _mm_loadu_ps(ptr+40), 2); |
| _26ae = _mm512_insertf32x4(_26ae, _mm_loadu_ps(ptr+56), 3); |
| _37bf = _mm512_castps128_ps512(_mm_loadu_ps(ptr+12) ); |
| _37bf = _mm512_insertf32x4(_37bf, _mm_loadu_ps(ptr+28), 1); |
| _37bf = _mm512_insertf32x4(_37bf, _mm_loadu_ps(ptr+44), 2); |
| _37bf = _mm512_insertf32x4(_37bf, _mm_loadu_ps(ptr+60), 3); |
| |
| F rg02468acf = _mm512_unpacklo_ps(_048c, _26ae), |
| ba02468acf = _mm512_unpackhi_ps(_048c, _26ae), |
| rg13579bde = _mm512_unpacklo_ps(_159d, _37bf), |
| ba13579bde = _mm512_unpackhi_ps(_159d, _37bf); |
| |
| *r = (F)_mm512_unpacklo_ps(rg02468acf, rg13579bde); |
| *g = (F)_mm512_unpackhi_ps(rg02468acf, rg13579bde); |
| *b = (F)_mm512_unpacklo_ps(ba02468acf, ba13579bde); |
| *a = (F)_mm512_unpackhi_ps(ba02468acf, ba13579bde); |
| } |
| |
| SI void store4(float* ptr, F r, F g, F b, F a) { |
| F rg014589cd = _mm512_unpacklo_ps(r, g), |
| rg2367abef = _mm512_unpackhi_ps(r, g), |
| ba014589cd = _mm512_unpacklo_ps(b, a), |
| ba2367abef = _mm512_unpackhi_ps(b, a); |
| |
| F _048c = (F)_mm512_unpacklo_pd((__m512d)rg014589cd, (__m512d)ba014589cd), |
| _26ae = (F)_mm512_unpacklo_pd((__m512d)rg2367abef, (__m512d)ba2367abef), |
| _159d = (F)_mm512_unpackhi_pd((__m512d)rg014589cd, (__m512d)ba014589cd), |
| _37bf = (F)_mm512_unpackhi_pd((__m512d)rg2367abef, (__m512d)ba2367abef); |
| |
| F _ae26 = (F)_mm512_permutexvar_pd(_mm512_setr_epi64(4,5,6,7,0,1,2,3), (__m512d)_26ae), |
| _bf37 = (F)_mm512_permutexvar_pd(_mm512_setr_epi64(4,5,6,7,0,1,2,3), (__m512d)_37bf), |
| _8c04 = (F)_mm512_permutexvar_pd(_mm512_setr_epi64(4,5,6,7,0,1,2,3), (__m512d)_048c), |
| _9d15 = (F)_mm512_permutexvar_pd(_mm512_setr_epi64(4,5,6,7,0,1,2,3), (__m512d)_159d); |
| |
| __m512i index = _mm512_setr_epi32(4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11); |
| F _0426 = (F)_mm512_permutex2var_pd((__m512d)_048c, _mm512_setr_epi64(0,1,2,3,12,13,14,15), |
| (__m512d)_ae26), |
| _1537 = (F)_mm512_permutex2var_pd((__m512d)_159d, _mm512_setr_epi64(0,1,2,3,12,13,14,15), |
| (__m512d)_bf37), |
| _5173 = _mm512_permutexvar_ps(index, _1537), |
| _0123 = (F)_mm512_permutex2var_pd((__m512d)_0426, _mm512_setr_epi64(0,1,10,11,4,5,14,15), |
| (__m512d)_5173); |
| |
| F _5476 = (F)_mm512_permutex2var_pd((__m512d)_5173, _mm512_setr_epi64(0,1,10,11,4,5,14,15), |
| (__m512d)_0426), |
| _4567 = _mm512_permutexvar_ps(index, _5476), |
| _8cae = (F)_mm512_permutex2var_pd((__m512d)_8c04, _mm512_setr_epi64(0,1,2,3,12,13,14,15), |
| (__m512d)_26ae), |
| _9dbf = (F)_mm512_permutex2var_pd((__m512d)_9d15, _mm512_setr_epi64(0,1,2,3,12,13,14,15), |
| (__m512d)_37bf), |
| _d9fb = _mm512_permutexvar_ps(index, _9dbf), |
| _89ab = (F)_mm512_permutex2var_pd((__m512d)_8cae, _mm512_setr_epi64(0,1,10,11,4,5,14,15), |
| (__m512d)_d9fb), |
| _dcfe = (F)_mm512_permutex2var_pd((__m512d)_d9fb, _mm512_setr_epi64(0,1,10,11,4,5,14,15), |
| (__m512d)_8cae), |
| _cdef = _mm512_permutexvar_ps(index, _dcfe); |
| |
| _mm512_storeu_ps(ptr+0, _0123); |
| _mm512_storeu_ps(ptr+16, _4567); |
| _mm512_storeu_ps(ptr+32, _89ab); |
| _mm512_storeu_ps(ptr+48, _cdef); |
| } |
| |
| #elif defined(JUMPER_IS_HSW) |
| // These are __m256 and __m256i, but friendlier and strongly-typed. |
| template <typename T> using V = Vec<8, T>; |
| using F = V<float >; |
| using I32 = V< int32_t>; |
| using U64 = V<uint64_t>; |
| using U32 = V<uint32_t>; |
| using U16 = V<uint16_t>; |
| using U8 = V<uint8_t >; |
| |
| SI F mad(F f, F m, F a) { return _mm256_fmadd_ps(f, m, a); } |
| SI F nmad(F f, F m, F a) { return _mm256_fnmadd_ps(f, m, a); } |
| |
| SI F min(F a, F b) { return _mm256_min_ps(a,b); } |
| SI I32 min(I32 a, I32 b) { return (I32)_mm256_min_epi32((__m256i)a,(__m256i)b); } |
| SI U32 min(U32 a, U32 b) { return (U32)_mm256_min_epu32((__m256i)a,(__m256i)b); } |
| SI F max(F a, F b) { return _mm256_max_ps(a,b); } |
| SI I32 max(I32 a, I32 b) { return (I32)_mm256_max_epi32((__m256i)a,(__m256i)b); } |
| SI U32 max(U32 a, U32 b) { return (U32)_mm256_max_epu32((__m256i)a,(__m256i)b); } |
| |
| SI F abs_ (F v) { return _mm256_and_ps(v, 0-v); } |
| SI I32 abs_ (I32 v) { return (I32)_mm256_abs_epi32((__m256i)v); } |
| SI F floor_(F v) { return _mm256_floor_ps(v); } |
| SI F ceil_(F v) { return _mm256_ceil_ps(v); } |
| SI F rcp_approx(F v) { return _mm256_rcp_ps (v); } // use rcp_fast instead |
| SI F rsqrt_approx(F v) { return _mm256_rsqrt_ps(v); } |
| SI F sqrt_ (F v) { return _mm256_sqrt_ps (v); } |
| SI F rcp_precise (F v) { |
| F e = rcp_approx(v); |
| return _mm256_fnmadd_ps(v, e, _mm256_set1_ps(2.0f)) * e; |
| } |
| |
| SI I32 iround(F v) { return (I32)_mm256_cvtps_epi32(v); } |
| SI U32 round(F v) { return (U32)_mm256_cvtps_epi32(v); } |
| SI U32 round(F v, F scale) { return (U32)_mm256_cvtps_epi32(v*scale); } |
| SI U16 pack(U32 v) { |
| return (U16)_mm_packus_epi32(_mm256_extractf128_si256((__m256i)v, 0), |
| _mm256_extractf128_si256((__m256i)v, 1)); |
| } |
| SI U8 pack(U16 v) { |
| auto r = _mm_packus_epi16((__m128i)v,(__m128i)v); |
| return sk_unaligned_load<U8>(&r); |
| } |
| |
| SI F if_then_else(I32 c, F t, F e) { return _mm256_blendv_ps(e, t, (__m256)c); } |
| SI I32 if_then_else(I32 c, I32 t, I32 e) { |
| return (I32)_mm256_blendv_ps((__m256)e, (__m256)t, (__m256)c); |
| } |
| |
| // NOTE: This version of 'all' only works with mask values (true == all bits set) |
| SI bool any(I32 c) { return !_mm256_testz_si256((__m256i)c, _mm256_set1_epi32(-1)); } |
| SI bool all(I32 c) { return _mm256_testc_si256((__m256i)c, _mm256_set1_epi32(-1)); } |
| |
| template <typename T> |
| SI V<T> gather(const T* p, U32 ix) { |
| return V<T>{ p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]], |
| p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], }; |
| } |
| SI F gather(const float* p, U32 ix) { return _mm256_i32gather_ps(p, (__m256i)ix, 4); } |
| SI U32 gather(const uint32_t* p, U32 ix) { |
| return (U32)_mm256_i32gather_epi32((const int*)p, (__m256i)ix, 4); |
| } |
| SI U64 gather(const uint64_t* p, U32 ix) { |
| __m256i parts[] = { |
| _mm256_i32gather_epi64( |
| (const long long int*)p, _mm256_extracti128_si256((__m256i)ix, 0), 8), |
| _mm256_i32gather_epi64( |
| (const long long int*)p, _mm256_extracti128_si256((__m256i)ix, 1), 8), |
| }; |
| return sk_bit_cast<U64>(parts); |
| } |
| SI void scatter_masked(I32 src, int* dst, U32 ix, I32 mask) { |
| I32 before = gather(dst, ix); |
| I32 after = if_then_else(mask, src, before); |
| dst[ix[0]] = after[0]; |
| dst[ix[1]] = after[1]; |
| dst[ix[2]] = after[2]; |
| dst[ix[3]] = after[3]; |
| dst[ix[4]] = after[4]; |
| dst[ix[5]] = after[5]; |
| dst[ix[6]] = after[6]; |
| dst[ix[7]] = after[7]; |
| } |
| |
| SI void load2(const uint16_t* ptr, U16* r, U16* g) { |
| __m128i _0123 = _mm_loadu_si128(((const __m128i*)ptr) + 0), |
| _4567 = _mm_loadu_si128(((const __m128i*)ptr) + 1); |
| *r = (U16)_mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(_0123, 16), 16), |
| _mm_srai_epi32(_mm_slli_epi32(_4567, 16), 16)); |
| *g = (U16)_mm_packs_epi32(_mm_srai_epi32(_0123, 16), |
| _mm_srai_epi32(_4567, 16)); |
| } |
| SI void store2(uint16_t* ptr, U16 r, U16 g) { |
| auto _0123 = _mm_unpacklo_epi16((__m128i)r, (__m128i)g), |
| _4567 = _mm_unpackhi_epi16((__m128i)r, (__m128i)g); |
| _mm_storeu_si128((__m128i*)ptr + 0, _0123); |
| _mm_storeu_si128((__m128i*)ptr + 1, _4567); |
| } |
| |
| SI void load4(const uint16_t* ptr, U16* r, U16* g, U16* b, U16* a) { |
| __m128i _01 = _mm_loadu_si128(((const __m128i*)ptr) + 0), |
| _23 = _mm_loadu_si128(((const __m128i*)ptr) + 1), |
| _45 = _mm_loadu_si128(((const __m128i*)ptr) + 2), |
| _67 = _mm_loadu_si128(((const __m128i*)ptr) + 3); |
| |
| auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2 |
| _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3 |
| _46 = _mm_unpacklo_epi16(_45, _67), |
| _57 = _mm_unpackhi_epi16(_45, _67); |
| |
| auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3 |
| ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3 |
| rg4567 = _mm_unpacklo_epi16(_46, _57), |
| ba4567 = _mm_unpackhi_epi16(_46, _57); |
| |
| *r = (U16)_mm_unpacklo_epi64(rg0123, rg4567); |
| *g = (U16)_mm_unpackhi_epi64(rg0123, rg4567); |
| *b = (U16)_mm_unpacklo_epi64(ba0123, ba4567); |
| *a = (U16)_mm_unpackhi_epi64(ba0123, ba4567); |
| } |
| SI void store4(uint16_t* ptr, U16 r, U16 g, U16 b, U16 a) { |
| auto rg0123 = _mm_unpacklo_epi16((__m128i)r, (__m128i)g), // r0 g0 r1 g1 r2 g2 r3 g3 |
| rg4567 = _mm_unpackhi_epi16((__m128i)r, (__m128i)g), // r4 g4 r5 g5 r6 g6 r7 g7 |
| ba0123 = _mm_unpacklo_epi16((__m128i)b, (__m128i)a), |
| ba4567 = _mm_unpackhi_epi16((__m128i)b, (__m128i)a); |
| |
| auto _01 = _mm_unpacklo_epi32(rg0123, ba0123), |
| _23 = _mm_unpackhi_epi32(rg0123, ba0123), |
| _45 = _mm_unpacklo_epi32(rg4567, ba4567), |
| _67 = _mm_unpackhi_epi32(rg4567, ba4567); |
| |
| _mm_storeu_si128((__m128i*)ptr + 0, _01); |
| _mm_storeu_si128((__m128i*)ptr + 1, _23); |
| _mm_storeu_si128((__m128i*)ptr + 2, _45); |
| _mm_storeu_si128((__m128i*)ptr + 3, _67); |
| } |
| |
| SI void load4(const float* ptr, F* r, F* g, F* b, F* a) { |
| F _04 = _mm256_castps128_ps256(_mm_loadu_ps(ptr+ 0)), |
| _15 = _mm256_castps128_ps256(_mm_loadu_ps(ptr+ 4)), |
| _26 = _mm256_castps128_ps256(_mm_loadu_ps(ptr+ 8)), |
| _37 = _mm256_castps128_ps256(_mm_loadu_ps(ptr+12)); |
| _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+16), 1); |
| _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+20), 1); |
| _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+24), 1); |
| _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+28), 1); |
| |
| F rg0145 = _mm256_unpacklo_ps(_04,_15), // r0 r1 g0 g1 | r4 r5 g4 g5 |
| ba0145 = _mm256_unpackhi_ps(_04,_15), |
| rg2367 = _mm256_unpacklo_ps(_26,_37), |
| ba2367 = _mm256_unpackhi_ps(_26,_37); |
| |
| *r = (F)_mm256_unpacklo_pd((__m256d)rg0145, (__m256d)rg2367); |
| *g = (F)_mm256_unpackhi_pd((__m256d)rg0145, (__m256d)rg2367); |
| *b = (F)_mm256_unpacklo_pd((__m256d)ba0145, (__m256d)ba2367); |
| *a = (F)_mm256_unpackhi_pd((__m256d)ba0145, (__m256d)ba2367); |
| } |
| SI void store4(float* ptr, F r, F g, F b, F a) { |
| F rg0145 = _mm256_unpacklo_ps(r, g), // r0 g0 r1 g1 | r4 g4 r5 g5 |
| rg2367 = _mm256_unpackhi_ps(r, g), // r2 ... | r6 ... |
| ba0145 = _mm256_unpacklo_ps(b, a), // b0 a0 b1 a1 | b4 a4 b5 a5 |
| ba2367 = _mm256_unpackhi_ps(b, a); // b2 ... | b6 ... |
| |
| F _04 = (F)_mm256_unpacklo_pd((__m256d)rg0145, (__m256d)ba0145),// r0 g0 b0 a0 | r4 g4 b4 a4 |
| _15 = (F)_mm256_unpackhi_pd((__m256d)rg0145, (__m256d)ba0145),// r1 ... | r5 ... |
| _26 = (F)_mm256_unpacklo_pd((__m256d)rg2367, (__m256d)ba2367),// r2 ... | r6 ... |
| _37 = (F)_mm256_unpackhi_pd((__m256d)rg2367, (__m256d)ba2367);// r3 ... | r7 ... |
| |
| F _01 = _mm256_permute2f128_ps(_04, _15, 32), // 32 == 0010 0000 == lo, lo |
| _23 = _mm256_permute2f128_ps(_26, _37, 32), |
| _45 = _mm256_permute2f128_ps(_04, _15, 49), // 49 == 0011 0001 == hi, hi |
| _67 = _mm256_permute2f128_ps(_26, _37, 49); |
| _mm256_storeu_ps(ptr+ 0, _01); |
| _mm256_storeu_ps(ptr+ 8, _23); |
| _mm256_storeu_ps(ptr+16, _45); |
| _mm256_storeu_ps(ptr+24, _67); |
| } |
| |
| #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX) |
| template <typename T> using V = Vec<4, T>; |
| using F = V<float >; |
| using I32 = V< int32_t>; |
| using U64 = V<uint64_t>; |
| using U32 = V<uint32_t>; |
| using U16 = V<uint16_t>; |
| using U8 = V<uint8_t >; |
| |
| SI F if_then_else(I32 c, F t, F e) { |
| return _mm_or_ps(_mm_and_ps((__m128)c, t), _mm_andnot_ps((__m128)c, e)); |
| } |
| SI I32 if_then_else(I32 c, I32 t, I32 e) { |
| return (I32)_mm_or_ps(_mm_and_ps((__m128)c, (__m128)t), |
| _mm_andnot_ps((__m128)c, (__m128)e)); |
| } |
| |
| SI F min(F a, F b) { return _mm_min_ps(a,b); } |
| SI F max(F a, F b) { return _mm_max_ps(a,b); } |
| #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX) |
| SI I32 min(I32 a, I32 b) { return (I32)_mm_min_epi32((__m128i)a,(__m128i)b); } |
| SI U32 min(U32 a, U32 b) { return (U32)_mm_min_epu32((__m128i)a,(__m128i)b); } |
| SI I32 max(I32 a, I32 b) { return (I32)_mm_max_epi32((__m128i)a,(__m128i)b); } |
| SI U32 max(U32 a, U32 b) { return (U32)_mm_max_epu32((__m128i)a,(__m128i)b); } |
| #else |
| SI I32 min(I32 a, I32 b) { return if_then_else(a < b, a, b); } |
| SI I32 max(I32 a, I32 b) { return if_then_else(a > b, a, b); } |
| SI U32 min(U32 a, U32 b) { |
| return sk_bit_cast<U32>(if_then_else(a < b, sk_bit_cast<I32>(a), sk_bit_cast<I32>(b))); |
| } |
| SI U32 max(U32 a, U32 b) { |
| return sk_bit_cast<U32>(if_then_else(a > b, sk_bit_cast<I32>(a), sk_bit_cast<I32>(b))); |
| } |
| #endif |
| |
| SI F mad(F f, F m, F a) { return a+f*m; } |
| SI F nmad(F f, F m, F a) { return a-f*m; } |
| SI F abs_(F v) { return _mm_and_ps(v, 0-v); } |
| #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX) |
| SI I32 abs_(I32 v) { return (I32)_mm_abs_epi32((__m128i)v); } |
| #else |
| SI I32 abs_(I32 v) { return max(v, -v); } |
| #endif |
| SI F rcp_approx(F v) { return _mm_rcp_ps (v); } // use rcp_fast instead |
| SI F rcp_precise (F v) { F e = rcp_approx(v); return e * (2.0f - v * e); } |
| SI F rsqrt_approx(F v) { return _mm_rsqrt_ps(v); } |
| SI F sqrt_(F v) { return _mm_sqrt_ps (v); } |
| |
| SI I32 iround(F v) { return (I32)_mm_cvtps_epi32(v); } |
| SI U32 round(F v) { return (U32)_mm_cvtps_epi32(v); } |
| SI U32 round(F v, F scale) { return (U32)_mm_cvtps_epi32(v*scale); } |
| |
| SI U16 pack(U32 v) { |
| #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX) |
| auto p = _mm_packus_epi32((__m128i)v,(__m128i)v); |
| #else |
| // Sign extend so that _mm_packs_epi32() does the pack we want. |
| auto p = _mm_srai_epi32(_mm_slli_epi32((__m128i)v, 16), 16); |
| p = _mm_packs_epi32(p,p); |
| #endif |
| return sk_unaligned_load<U16>(&p); // We have two copies. Return (the lower) one. |
| } |
| SI U8 pack(U16 v) { |
| auto r = widen_cast<__m128i>(v); |
| r = _mm_packus_epi16(r,r); |
| return sk_unaligned_load<U8>(&r); |
| } |
| |
| // NOTE: This only checks the top bit of each lane, and is incorrect with non-mask values. |
| SI bool any(I32 c) { return _mm_movemask_ps(sk_bit_cast<F>(c)) != 0b0000; } |
| SI bool all(I32 c) { return _mm_movemask_ps(sk_bit_cast<F>(c)) == 0b1111; } |
| |
| SI F floor_(F v) { |
| #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX) |
| return _mm_floor_ps(v); |
| #else |
| F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v)); |
| return roundtrip - if_then_else(roundtrip > v, F() + 1, F() + 0); |
| #endif |
| } |
| |
| SI F ceil_(F v) { |
| #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX) |
| return _mm_ceil_ps(v); |
| #else |
| F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v)); |
| return roundtrip + if_then_else(roundtrip < v, F() + 1, F() + 0); |
| #endif |
| } |
| |
| template <typename T> |
| SI V<T> gather(const T* p, U32 ix) { |
| return V<T>{p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]}; |
| } |
| SI void scatter_masked(I32 src, int* dst, U32 ix, I32 mask) { |
| I32 before = gather(dst, ix); |
| I32 after = if_then_else(mask, src, before); |
| dst[ix[0]] = after[0]; |
| dst[ix[1]] = after[1]; |
| dst[ix[2]] = after[2]; |
| dst[ix[3]] = after[3]; |
| } |
| SI void load2(const uint16_t* ptr, U16* r, U16* g) { |
| __m128i _01 = _mm_loadu_si128(((const __m128i*)ptr) + 0); // r0 g0 r1 g1 r2 g2 r3 g3 |
| auto rg01_23 = _mm_shufflelo_epi16(_01, 0xD8); // r0 r1 g0 g1 r2 g2 r3 g3 |
| auto rg = _mm_shufflehi_epi16(rg01_23, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3 |
| |
| auto R = _mm_shuffle_epi32(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3 |
| auto G = _mm_shuffle_epi32(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3 |
| *r = sk_unaligned_load<U16>(&R); |
| *g = sk_unaligned_load<U16>(&G); |
| } |
| SI void store2(uint16_t* ptr, U16 r, U16 g) { |
| __m128i rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)); |
| _mm_storeu_si128((__m128i*)ptr + 0, rg); |
| } |
| |
| SI void load4(const uint16_t* ptr, U16* r, U16* g, U16* b, U16* a) { |
| __m128i _01 = _mm_loadu_si128(((const __m128i*)ptr) + 0), // r0 g0 b0 a0 r1 g1 b1 a1 |
| _23 = _mm_loadu_si128(((const __m128i*)ptr) + 1); // r2 g2 b2 a2 r3 g3 b3 a3 |
| |
| auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2 |
| _13 = _mm_unpackhi_epi16(_01, _23); // r1 r3 g1 g3 b1 b3 a1 a3 |
| |
| auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3 |
| ba = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 a0 a1 a2 a3 |
| |
| *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0); |
| *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4); |
| *b = sk_unaligned_load<U16>((uint16_t*)&ba + 0); |
| *a = sk_unaligned_load<U16>((uint16_t*)&ba + 4); |
| } |
| |
| SI void store4(uint16_t* ptr, U16 r, U16 g, U16 b, U16 a) { |
| auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)), |
| ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a)); |
| |
| _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba)); |
| _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba)); |
| } |
| |
| SI void load4(const float* ptr, F* r, F* g, F* b, F* a) { |
| F _0 = _mm_loadu_ps(ptr + 0), |
| _1 = _mm_loadu_ps(ptr + 4), |
| _2 = _mm_loadu_ps(ptr + 8), |
| _3 = _mm_loadu_ps(ptr +12); |
| _MM_TRANSPOSE4_PS(_0,_1,_2,_3); |
| *r = _0; |
| *g = _1; |
| *b = _2; |
| *a = _3; |
| } |
| |
| SI void store4(float* ptr, F r, F g, F b, F a) { |
| _MM_TRANSPOSE4_PS(r,g,b,a); |
| _mm_storeu_ps(ptr + 0, r); |
| _mm_storeu_ps(ptr + 4, g); |
| _mm_storeu_ps(ptr + 8, b); |
| _mm_storeu_ps(ptr +12, a); |
| } |
| |
| #elif defined(JUMPER_IS_LASX) |
| // These are __m256 and __m256i, but friendlier and strongly-typed. |
| template <typename T> using V = Vec<8, T>; |
| using F = V<float >; |
| using I32 = V<int32_t>; |
| using U64 = V<uint64_t>; |
| using U32 = V<uint32_t>; |
| using U16 = V<uint16_t>; |
| using U8 = V<uint8_t >; |
| |
| SI __m128i emulate_lasx_d_xr2vr_l(__m256i a) { |
| v4i64 tmp = a; |
| v2i64 al = {tmp[0], tmp[1]}; |
| return (__m128i)al; |
| } |
| |
| SI __m128i emulate_lasx_d_xr2vr_h(__m256i a) { |
| v4i64 tmp = a; |
| v2i64 ah = {tmp[2], tmp[3]}; |
| return (__m128i)ah; |
| } |
| |
| SI F if_then_else(I32 c, F t, F e) { |
| return sk_bit_cast<Vec<8,float>>(__lasx_xvbitsel_v(sk_bit_cast<__m256i>(e), |
| sk_bit_cast<__m256i>(t), |
| sk_bit_cast<__m256i>(c))); |
| } |
| |
| SI I32 if_then_else(I32 c, I32 t, I32 e) { |
| return sk_bit_cast<Vec<8,int32_t>>(__lasx_xvbitsel_v(sk_bit_cast<__m256i>(e), |
| sk_bit_cast<__m256i>(t), |
| sk_bit_cast<__m256i>(c))); |
| } |
| |
| SI F min(F a, F b) { return __lasx_xvfmin_s(a,b); } |
| SI F max(F a, F b) { return __lasx_xvfmax_s(a,b); } |
| SI I32 min(I32 a, I32 b) { return __lasx_xvmin_w(a,b); } |
| SI U32 min(U32 a, U32 b) { return __lasx_xvmin_wu(a,b); } |
| SI I32 max(I32 a, I32 b) { return __lasx_xvmax_w(a,b); } |
| SI U32 max(U32 a, U32 b) { return __lasx_xvmax_wu(a,b); } |
| |
| SI F mad(F f, F m, F a) { return __lasx_xvfmadd_s(f, m, a); } |
| SI F nmad(F f, F m, F a) { return __lasx_xvfmadd_s(-f, m, a); } |
| SI F abs_ (F v) { return (F)__lasx_xvand_v((I32)v, (I32)(0-v)); } |
| SI I32 abs_(I32 v) { return max(v, -v); } |
| SI F rcp_approx(F v) { return __lasx_xvfrecip_s(v); } |
| SI F rcp_precise (F v) { F e = rcp_approx(v); return e * nmad(v, e, 2.0f); } |
| SI F rsqrt_approx (F v) { return __lasx_xvfrsqrt_s(v); } |
| SI F sqrt_(F v) { return __lasx_xvfsqrt_s(v); } |
| |
| SI U32 iround(F v) { |
| F t = F(0.5); |
| return __lasx_xvftintrz_w_s(v + t); |
| } |
| |
| SI U32 round(F v) { |
| F t = F(0.5); |
| return __lasx_xvftintrz_w_s(v + t); |
| } |
| |
| SI U32 round(F v, F scale) { |
| F t = F(0.5); |
| return __lasx_xvftintrz_w_s(mad(v, scale, t)); |
| } |
| |
| SI U16 pack(U32 v) { |
| return __lsx_vpickev_h(__lsx_vsat_wu(emulate_lasx_d_xr2vr_h(v), 15), |
| __lsx_vsat_wu(emulate_lasx_d_xr2vr_l(v), 15)); |
| } |
| |
| SI U8 pack(U16 v) { |
| __m128i tmp = __lsx_vsat_hu(v, 7); |
| auto r = __lsx_vpickev_b(tmp, tmp); |
| return sk_unaligned_load<U8>(&r); |
| } |
| |
| SI bool any(I32 c){ |
| v8i32 retv = (v8i32)__lasx_xvmskltz_w(__lasx_xvslt_wu(__lasx_xvldi(0), c)); |
| return (retv[0] | retv[4]) != 0b0000; |
| } |
| |
| SI bool all(I32 c){ |
| v8i32 retv = (v8i32)__lasx_xvmskltz_w(__lasx_xvslt_wu(__lasx_xvldi(0), c)); |
| return (retv[0] & retv[4]) == 0b1111; |
| } |
| |
| SI F floor_(F v) { |
| return __lasx_xvfrintrm_s(v); |
| } |
| |
| SI F ceil_(F v) { |
| return __lasx_xvfrintrp_s(v); |
| } |
| |
| template <typename T> |
| SI V<T> gather(const T* p, U32 ix) { |
| return { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]], |
| p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], }; |
| } |
| |
| template <typename V, typename S> |
| SI void scatter_masked(V src, S* dst, U32 ix, I32 mask) { |
| V before = gather(dst, ix); |
| V after = if_then_else(mask, src, before); |
| dst[ix[0]] = after[0]; |
| dst[ix[1]] = after[1]; |
| dst[ix[2]] = after[2]; |
| dst[ix[3]] = after[3]; |
| dst[ix[4]] = after[4]; |
| dst[ix[5]] = after[5]; |
| dst[ix[6]] = after[6]; |
| dst[ix[7]] = after[7]; |
| } |
| |
| SI void load2(const uint16_t* ptr, U16* r, U16* g) { |
| U16 _0123 = __lsx_vld(ptr, 0), |
| _4567 = __lsx_vld(ptr, 16); |
| *r = __lsx_vpickev_h(__lsx_vsat_w(__lsx_vsrai_w(__lsx_vslli_w(_4567, 16), 16), 15), |
| __lsx_vsat_w(__lsx_vsrai_w(__lsx_vslli_w(_0123, 16), 16), 15)); |
| *g = __lsx_vpickev_h(__lsx_vsat_w(__lsx_vsrai_w(_4567, 16), 15), |
| __lsx_vsat_w(__lsx_vsrai_w(_0123, 16), 15)); |
| } |
| SI void store2(uint16_t* ptr, U16 r, U16 g) { |
| auto _0123 = __lsx_vilvl_h(g, r), |
| _4567 = __lsx_vilvh_h(g, r); |
| __lsx_vst(_0123, ptr, 0); |
| __lsx_vst(_4567, ptr, 16); |
| } |
| |
| SI void load4(const uint16_t* ptr, U16* r, U16* g, U16* b, U16* a) { |
| __m128i _01 = __lsx_vld(ptr, 0), |
| _23 = __lsx_vld(ptr, 16), |
| _45 = __lsx_vld(ptr, 32), |
| _67 = __lsx_vld(ptr, 48); |
| |
| auto _02 = __lsx_vilvl_h(_23, _01), // r0 r2 g0 g2 b0 b2 a0 a2 |
| _13 = __lsx_vilvh_h(_23, _01), // r1 r3 g1 g3 b1 b3 a1 a3 |
| _46 = __lsx_vilvl_h(_67, _45), |
| _57 = __lsx_vilvh_h(_67, _45); |
| |
| auto rg0123 = __lsx_vilvl_h(_13, _02), // r0 r1 r2 r3 g0 g1 g2 g3 |
| ba0123 = __lsx_vilvh_h(_13, _02), // b0 b1 b2 b3 a0 a1 a2 a3 |
| rg4567 = __lsx_vilvl_h(_57, _46), |
| ba4567 = __lsx_vilvh_h(_57, _46); |
| |
| *r = __lsx_vilvl_d(rg4567, rg0123); |
| *g = __lsx_vilvh_d(rg4567, rg0123); |
| *b = __lsx_vilvl_d(ba4567, ba0123); |
| *a = __lsx_vilvh_d(ba4567, ba0123); |
| } |
| |
| SI void store4(uint16_t* ptr, U16 r, U16 g, U16 b, U16 a) { |
| auto rg0123 = __lsx_vilvl_h(g, r), // r0 g0 r1 g1 r2 g2 r3 g3 |
| rg4567 = __lsx_vilvh_h(g, r), // r4 g4 r5 g5 r6 g6 r7 g7 |
| ba0123 = __lsx_vilvl_h(a, b), |
| ba4567 = __lsx_vilvh_h(a, b); |
| |
| auto _01 =__lsx_vilvl_w(ba0123, rg0123), |
| _23 =__lsx_vilvh_w(ba0123, rg0123), |
| _45 =__lsx_vilvl_w(ba4567, rg4567), |
| _67 =__lsx_vilvh_w(ba4567, rg4567); |
| |
| __lsx_vst(_01, ptr, 0); |
| __lsx_vst(_23, ptr, 16); |
| __lsx_vst(_45, ptr, 32); |
| __lsx_vst(_67, ptr, 48); |
| } |
| |
| SI void load4(const float* ptr, F* r, F* g, F* b, F* a) { |
| F _04 = (F)__lasx_xvpermi_q(__lasx_xvld(ptr, 0), __lasx_xvld(ptr, 64), 0x02); |
| F _15 = (F)__lasx_xvpermi_q(__lasx_xvld(ptr, 16), __lasx_xvld(ptr, 80), 0x02); |
| F _26 = (F)__lasx_xvpermi_q(__lasx_xvld(ptr, 32), __lasx_xvld(ptr, 96), 0x02); |
| F _37 = (F)__lasx_xvpermi_q(__lasx_xvld(ptr, 48), __lasx_xvld(ptr, 112), 0x02); |
| |
| F rg0145 = (F)__lasx_xvilvl_w((__m256i)_15, (__m256i)_04), // r0 r1 g0 g1 | r4 r5 g4 g5 |
| ba0145 = (F)__lasx_xvilvh_w((__m256i)_15, (__m256i)_04), |
| rg2367 = (F)__lasx_xvilvl_w((__m256i)_37, (__m256i)_26), |
| ba2367 = (F)__lasx_xvilvh_w((__m256i)_37, (__m256i)_26); |
| |
| *r = (F)__lasx_xvilvl_d((__m256i)rg2367, (__m256i)rg0145); |
| *g = (F)__lasx_xvilvh_d((__m256i)rg2367, (__m256i)rg0145); |
| *b = (F)__lasx_xvilvl_d((__m256i)ba2367, (__m256i)ba0145); |
| *a = (F)__lasx_xvilvh_d((__m256i)ba2367, (__m256i)ba0145); |
| } |
| SI void store4(float* ptr, F r, F g, F b, F a) { |
| F rg0145 = (F)__lasx_xvilvl_w((__m256i)g, (__m256i)r), // r0 g0 r1 g1 | r4 g4 r5 g5 |
| rg2367 = (F)__lasx_xvilvh_w((__m256i)g, (__m256i)r), // r2 ... | r6 ... |
| ba0145 = (F)__lasx_xvilvl_w((__m256i)a, (__m256i)b), // b0 a0 b1 a1 | b4 a4 b5 a5 |
| ba2367 = (F)__lasx_xvilvh_w((__m256i)a, (__m256i)b); // b2 ... | b6 ... |
| |
| F _04 = (F)__lasx_xvilvl_d((__m256i)ba0145, (__m256i)rg0145), // r0 g0 b0 a0 | r4 g4 b4 a4 |
| _15 = (F)__lasx_xvilvh_d((__m256i)ba0145, (__m256i)rg0145), // r1 ... | r5 ... |
| _26 = (F)__lasx_xvilvl_d((__m256i)ba2367, (__m256i)rg2367), // r2 ... | r6 ... |
| _37 = (F)__lasx_xvilvh_d((__m256i)ba2367, (__m256i)rg2367); // r3 ... | r7 ... |
| |
| F _01 = (F)__lasx_xvpermi_q((__m256i)_04, (__m256i)_15, 0x02), |
| _23 = (F)__lasx_xvpermi_q((__m256i)_26, (__m256i)_37, 0x02), |
| _45 = (F)__lasx_xvpermi_q((__m256i)_04, (__m256i)_15, 0x13), |
| _67 = (F)__lasx_xvpermi_q((__m256i)_26, (__m256i)_37, 0x13); |
| __lasx_xvst(_01, ptr, 0); |
| __lasx_xvst(_23, ptr, 32); |
| __lasx_xvst(_45, ptr, 64); |
| __lasx_xvst(_67, ptr, 96); |
| } |
| |
| #elif defined(JUMPER_IS_LSX) |
| template <typename T> using V = Vec<4, T>; |
| using F = V<float >; |
| using I32 = V<int32_t >; |
| using U64 = V<uint64_t>; |
| using U32 = V<uint32_t>; |
| using U16 = V<uint16_t>; |
| using U8 = V<uint8_t >; |
| |
| #define _LSX_TRANSPOSE4_S(row0, row1, row2, row3) \ |
| do { \ |
| __m128 __t0 = (__m128)__lsx_vilvl_w ((__m128i)row1, (__m128i)row0); \ |
| __m128 __t1 = (__m128)__lsx_vilvl_w ((__m128i)row3, (__m128i)row2); \ |
| __m128 __t2 = (__m128)__lsx_vilvh_w ((__m128i)row1, (__m128i)row0); \ |
| __m128 __t3 = (__m128)__lsx_vilvh_w ((__m128i)row3, (__m128i)row2); \ |
| (row0) = (__m128)__lsx_vilvl_d ((__m128i)__t1, (__m128i)__t0); \ |
| (row1) = (__m128)__lsx_vilvh_d ((__m128i)__t1, (__m128i)__t0); \ |
| (row2) = (__m128)__lsx_vilvl_d ((__m128i)__t3, (__m128i)__t2); \ |
| (row3) = (__m128)__lsx_vilvh_d ((__m128i)__t3, (__m128i)__t2); \ |
| } while (0) |
| |
| SI F if_then_else(I32 c, F t, F e) { |
| return sk_bit_cast<Vec<4,float>>(__lsx_vbitsel_v(sk_bit_cast<__m128i>(e), |
| sk_bit_cast<__m128i>(t), |
| sk_bit_cast<__m128i>(c))); |
| } |
| |
| SI I32 if_then_else(I32 c, I32 t, I32 e) { |
| return sk_bit_cast<Vec<4,int32_t>>(__lsx_vbitsel_v(sk_bit_cast<__m128i>(e), |
| sk_bit_cast<__m128i>(t), |
| sk_bit_cast<__m128i>(c))); |
| } |
| |
| SI F min(F a, F b) { return __lsx_vfmin_s(a,b); } |
| SI F max(F a, F b) { return __lsx_vfmax_s(a,b); } |
| SI I32 min(I32 a, I32 b) { return __lsx_vmin_w(a,b); } |
| SI U32 min(U32 a, U32 b) { return __lsx_vmin_wu(a,b); } |
| SI I32 max(I32 a, I32 b) { return __lsx_vmax_w(a,b); } |
| SI U32 max(U32 a, U32 b) { return __lsx_vmax_wu(a,b); } |
| |
| SI F mad(F f, F m, F a) { return __lsx_vfmadd_s(f, m, a); } |
| SI F nmad(F f, F m, F a) { return __lsx_vfmadd_s(-f, m, a); } |
| SI F abs_(F v) { return (F)__lsx_vand_v((I32)v, (I32)(0-v)); } |
| SI I32 abs_(I32 v) { return max(v, -v); } |
| SI F rcp_approx (F v) { return __lsx_vfrecip_s(v); } |
| SI F rcp_precise (F v) { F e = rcp_approx(v); return e * nmad(v, e, 2.0f); } |
| SI F rsqrt_approx (F v) { return __lsx_vfrsqrt_s(v); } |
| SI F sqrt_(F v) { return __lsx_vfsqrt_s (v); } |
| |
| SI U32 iround(F v) { |
| F t = F(0.5); |
| return __lsx_vftintrz_w_s(v + t); } |
| |
| SI U32 round(F v) { |
| F t = F(0.5); |
| return __lsx_vftintrz_w_s(v + t); } |
| |
| SI U32 round(F v, F scale) { |
| F t = F(0.5); |
| return __lsx_vftintrz_w_s(mad(v, scale, t)); } |
| |
| SI U16 pack(U32 v) { |
| __m128i tmp = __lsx_vsat_wu(v, 15); |
| auto p = __lsx_vpickev_h(tmp, tmp); |
| return sk_unaligned_load<U16>(&p); // We have two copies. Return (the lower) one. |
| } |
| |
| SI U8 pack(U16 v) { |
| auto r = widen_cast<__m128i>(v); |
| __m128i tmp = __lsx_vsat_hu(r, 7); |
| r = __lsx_vpickev_b(tmp, tmp); |
| return sk_unaligned_load<U8>(&r); |
| } |
| |
| SI bool any(I32 c){ |
| v4i32 retv = (v4i32)__lsx_vmskltz_w(__lsx_vslt_wu(__lsx_vldi(0), c)); |
| return retv[0] != 0b0000; |
| } |
| |
| SI bool all(I32 c){ |
| v4i32 retv = (v4i32)__lsx_vmskltz_w(__lsx_vslt_wu(__lsx_vldi(0), c)); |
| return retv[0] == 0b1111; |
| } |
| |
| SI F floor_(F v) { |
| return __lsx_vfrintrm_s(v); |
| } |
| |
| SI F ceil_(F v) { |
| return __lsx_vfrintrp_s(v); |
| } |
| |
| template <typename T> |
| SI V<T> gather(const T* p, U32 ix) { |
| return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]}; |
| } |
| |
| template <typename V, typename S> |
| SI void scatter_masked(V src, S* dst, U32 ix, I32 mask) { |
| V before = gather(dst, ix); |
| V after = if_then_else(mask, src, before); |
| dst[ix[0]] = after[0]; |
| dst[ix[1]] = after[1]; |
| dst[ix[2]] = after[2]; |
| dst[ix[3]] = after[3]; |
| } |
| |
| SI void load2(const uint16_t* ptr, U16* r, U16* g) { |
| __m128i _01 = __lsx_vld(ptr, 0); // r0 g0 r1 g1 r2 g2 r3 g3 |
| auto rg = __lsx_vshuf4i_h(_01, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3 |
| |
| auto R = __lsx_vshuf4i_w(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3 |
| auto G = __lsx_vshuf4i_w(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3 |
| *r = sk_unaligned_load<U16>(&R); |
| *g = sk_unaligned_load<U16>(&G); |
| } |
| |
| SI void store2(uint16_t* ptr, U16 r, U16 g) { |
| U32 rg = __lsx_vilvl_h(widen_cast<__m128i>(g), widen_cast<__m128i>(r)); |
| __lsx_vst(rg, ptr, 0); |
| } |
| |
| SI void load4(const uint16_t* ptr, U16* r, U16* g, U16* b, U16* a) { |
| __m128i _01 = __lsx_vld(ptr, 0), // r0 g0 b0 a0 r1 g1 b1 a1 |
| _23 = __lsx_vld(ptr, 16); // r2 g2 b2 a2 r3 g3 b3 a3 |
| |
| auto _02 = __lsx_vilvl_h(_23, _01), // r0 r2 g0 g2 b0 b2 a0 a2 |
| _13 = __lsx_vilvh_h(_23, _01); // r1 r3 g1 g3 b1 b3 a1 a3 |
| |
| auto rg = __lsx_vilvl_h(_13, _02), // r0 r1 r2 r3 g0 g1 g2 g3 |
| ba = __lsx_vilvh_h(_13, _02); // b0 b1 b2 b3 a0 a1 a2 a3 |
| |
| *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0); |
| *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4); |
| *b = sk_unaligned_load<U16>((uint16_t*)&ba + 0); |
| *a = sk_unaligned_load<U16>((uint16_t*)&ba + 4); |
| } |
| |
| SI void store4(uint16_t* ptr, U16 r, U16 g, U16 b, U16 a) { |
| auto rg = __lsx_vilvl_h(widen_cast<__m128i>(g), widen_cast<__m128i>(r)), |
| ba = __lsx_vilvl_h(widen_cast<__m128i>(a), widen_cast<__m128i>(b)); |
| |
| __lsx_vst(__lsx_vilvl_w(ba, rg), ptr, 0); |
| __lsx_vst(__lsx_vilvh_w(ba, rg), ptr, 16); |
| } |
| |
| SI void load4(const float* ptr, F* r, F* g, F* b, F* a) { |
| F _0 = (F)__lsx_vld(ptr, 0), |
| _1 = (F)__lsx_vld(ptr, 16), |
| _2 = (F)__lsx_vld(ptr, 32), |
| _3 = (F)__lsx_vld(ptr, 48); |
| _LSX_TRANSPOSE4_S(_0,_1,_2,_3); |
| *r = _0; |
| *g = _1; |
| *b = _2; |
| *a = _3; |
| } |
| |
| SI void store4(float* ptr, F r, F g, F b, F a) { |
| _LSX_TRANSPOSE4_S(r,g,b,a); |
| __lsx_vst(r, ptr, 0); |
| __lsx_vst(g, ptr, 16); |
| __lsx_vst(b, ptr, 32); |
| __lsx_vst(a, ptr, 48); |
| } |
| |
| #endif |
| |
| // Helpers to do scalar -> vector promotion on GCC (clang does this automatically) |
| // We need to subtract (not add) zero to keep float conversion zero-cost. See: |
| // https://stackoverflow.com/q/48255293 |
| // |
| // The GCC implementation should be usable everywhere, but Mac clang (only) complains that the |
| // expressions make these functions not constexpr. |
| // |
| // Further: We can't use the subtract-zero version in scalar mode. There, the subtraction will |
| // really happen (at least at low optimization levels), which can alter the bit pattern of NaNs. |
| // Because F_() is used when copying uniforms (even integer uniforms), this can corrupt values. |
| // The vector subtraction of zero doesn't appear to ever alter NaN bit patterns. |
| #if defined(__clang__) || defined(JUMPER_IS_SCALAR) |
| SI constexpr F F_(float x) { return x; } |
| SI constexpr I32 I32_(int32_t x) { return x; } |
| SI constexpr U32 U32_(uint32_t x) { return x; } |
| #else |
| SI constexpr F F_(float x) { return x - F(); } |
| SI constexpr I32 I32_(int32_t x) { return x + I32(); } |
| SI constexpr U32 U32_(uint32_t x) { return x + U32(); } |
| #endif |
| |
| // Extremely helpful literals: |
| static constexpr F F0 = F_(0.0f), |
| F1 = F_(1.0f); |
| |
| #if !defined(JUMPER_IS_SCALAR) |
| SI F min(F a, float b) { return min(a, F_(b)); } |
| SI F min(float a, F b) { return min(F_(a), b); } |
| SI F max(F a, float b) { return max(a, F_(b)); } |
| SI F max(float a, F b) { return max(F_(a), b); } |
| |
| SI F mad(F f, F m, float a) { return mad(f, m, F_(a)); } |
| SI F mad(F f, float m, F a) { return mad(f, F_(m), a); } |
| SI F mad(F f, float m, float a) { return mad(f, F_(m), F_(a)); } |
| SI F mad(float f, F m, F a) { return mad(F_(f), m, a); } |
| SI F mad(float f, F m, float a) { return mad(F_(f), m, F_(a)); } |
| SI F mad(float f, float m, F a) { return mad(F_(f), F_(m), a); } |
| |
| SI F nmad(F f, F m, float a) { return nmad(f, m, F_(a)); } |
| SI F nmad(F f, float m, F a) { return nmad(f, F_(m), a); } |
| SI F nmad(F f, float m, float a) { return nmad(f, F_(m), F_(a)); } |
| SI F nmad(float f, F m, F a) { return nmad(F_(f), m, a); } |
| SI F nmad(float f, F m, float a) { return nmad(F_(f), m, F_(a)); } |
| SI F nmad(float f, float m, F a) { return nmad(F_(f), F_(m), a); } |
| #endif |
| |
| // We need to be a careful with casts. |
| // (F)x means cast x to float in the portable path, but bit_cast x to float in the others. |
| // These named casts and bit_cast() are always what they seem to be. |
| #if defined(JUMPER_IS_SCALAR) |
| SI F cast (U32 v) { return (F)v; } |
| SI F cast64(U64 v) { return (F)v; } |
| SI U32 trunc_(F v) { return (U32)v; } |
| SI U32 expand(U16 v) { return (U32)v; } |
| SI U32 expand(U8 v) { return (U32)v; } |
| #else |
| SI F cast (U32 v) { return __builtin_convertvector((I32)v, F); } |
| SI F cast64(U64 v) { return __builtin_convertvector( v, F); } |
| SI U32 trunc_(F v) { return (U32)__builtin_convertvector( v, I32); } |
| SI U32 expand(U16 v) { return __builtin_convertvector( v, U32); } |
| SI U32 expand(U8 v) { return __builtin_convertvector( v, U32); } |
| #endif |
| |
| #if !defined(JUMPER_IS_SCALAR) |
| SI F if_then_else(I32 c, F t, float e) { return if_then_else(c, t , F_(e)); } |
| SI F if_then_else(I32 c, float t, F e) { return if_then_else(c, F_(t), e ); } |
| SI F if_then_else(I32 c, float t, float e) { return if_then_else(c, F_(t), F_(e)); } |
| #endif |
| |
| SI F fract(F v) { return v - floor_(v); } |
| |
| // See http://www.machinedlearnings.com/2011/06/fast-approximate-logarithm-exponential.html |
| SI F approx_log2(F x) { |
| // e - 127 is a fair approximation of log2(x) in its own right... |
| F e = cast(sk_bit_cast<U32>(x)) * (1.0f / (1<<23)); |
| |
| // ... but using the mantissa to refine its error is _much_ better. |
| F m = sk_bit_cast<F>((sk_bit_cast<U32>(x) & 0x007fffff) | 0x3f000000); |
| |
| return nmad(m, 1.498030302f, e - 124.225514990f) - 1.725879990f / (0.3520887068f + m); |
| } |
| |
| SI F approx_log(F x) { |
| const float ln2 = 0.69314718f; |
| return ln2 * approx_log2(x); |
| } |
| |
| SI F approx_pow2(F x) { |
| constexpr float kInfinityBits = 0x7f800000; |
| |
| F f = fract(x); |
| F approx = nmad(f, 1.490129070f, x + 121.274057500f); |
| approx += 27.728023300f / (4.84252568f - f); |
| approx *= 1.0f * (1<<23); |
| approx = min(max(approx, F0), F_(kInfinityBits)); // guard against underflow/overflow |
| |
| return sk_bit_cast<F>(round(approx)); |
| } |
| |
| SI F approx_exp(F x) { |
| const float log2_e = 1.4426950408889634074f; |
| return approx_pow2(log2_e * x); |
| } |
| |
| SI F approx_powf(F x, F y) { |
| return if_then_else((x == 0)|(x == 1), x |
| , approx_pow2(approx_log2(x) * y)); |
| } |
| #if !defined(JUMPER_IS_SCALAR) |
| SI F approx_powf(F x, float y) { return approx_powf(x, F_(y)); } |
| #endif |
| |
| SI F from_half(U16 h) { |
| #if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) |
| return vcvt_f32_f16((float16x4_t)h); |
| |
| #elif defined(JUMPER_IS_SKX) |
| return _mm512_cvtph_ps((__m256i)h); |
| |
| #elif defined(JUMPER_IS_HSW) |
| return _mm256_cvtph_ps((__m128i)h); |
| |
| #else |
| // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias. |
| U32 sem = expand(h), |
| s = sem & 0x8000, |
| em = sem ^ s; |
| |
| // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero. |
| auto denorm = (I32)em < 0x0400; // I32 comparison is often quicker, and always safe here. |
| return if_then_else(denorm, F0 |
| , sk_bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) )); |
| #endif |
| } |
| |
| SI U16 to_half(F f) { |
| #if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) |
| return (U16)vcvt_f16_f32(f); |
| |
| #elif defined(JUMPER_IS_SKX) |
| return (U16)_mm512_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION); |
| |
| #elif defined(JUMPER_IS_HSW) |
| return (U16)_mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION); |
| |
| #else |
| // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias. |
| U32 sem = sk_bit_cast<U32>(f), |
| s = sem & 0x80000000, |
| em = sem ^ s; |
| |
| // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero. |
| auto denorm = (I32)em < 0x38800000; // I32 comparison is often quicker, and always safe here. |
| return pack((U32)if_then_else(denorm, I32_(0) |
| , (I32)((s>>16) + (em>>13) - ((127-15)<<10)))); |
| #endif |
| } |
| |
| static void patch_memory_contexts(SkSpan<SkRasterPipeline_MemoryCtxPatch> memoryCtxPatches, |
| size_t dx, size_t dy, size_t tail) { |
| for (SkRasterPipeline_MemoryCtxPatch& patch : memoryCtxPatches) { |
| SkRasterPipeline_MemoryCtx* ctx = patch.info.context; |
| |
| const ptrdiff_t offset = patch.info.bytesPerPixel * (dy * ctx->stride + dx); |
| if (patch.info.load) { |
| void* ctxData = SkTAddOffset<void>(ctx->pixels, offset); |
| memcpy(patch.scratch, ctxData, patch.info.bytesPerPixel * tail); |
| } |
| |
| SkASSERT(patch.backup == nullptr); |
| void* scratchFakeBase = SkTAddOffset<void>(patch.scratch, -offset); |
| patch.backup = ctx->pixels; |
| ctx->pixels = scratchFakeBase; |
| } |
| } |
| |
| static void restore_memory_contexts(SkSpan<SkRasterPipeline_MemoryCtxPatch> memoryCtxPatches, |
| size_t dx, size_t dy, size_t tail) { |
| for (SkRasterPipeline_MemoryCtxPatch& patch : memoryCtxPatches) { |
| SkRasterPipeline_MemoryCtx* ctx = patch.info.context; |
| |
| SkASSERT(patch.backup != nullptr); |
| ctx->pixels = patch.backup; |
| patch.backup = nullptr; |
| |
| const ptrdiff_t offset = patch.info.bytesPerPixel * (dy * ctx->stride + dx); |
| if (patch.info.store) { |
| void* ctxData = SkTAddOffset<void>(ctx->pixels, offset); |
| memcpy(ctxData, patch.scratch, patch.info.bytesPerPixel * tail); |
| } |
| } |
| } |
| |
| #if defined(JUMPER_IS_SCALAR) || defined(JUMPER_IS_SSE2) |
| // In scalar and SSE2 mode, we always use precise math so we can have more predictable results. |
| // Chrome will use the SSE2 implementation when --disable-skia-runtime-opts is set. (b/40042946) |
| SI F rcp_fast(F v) { return rcp_precise(v); } |
| SI F rsqrt(F v) { return rcp_precise(sqrt_(v)); } |
| #else |
| SI F rcp_fast(F v) { return rcp_approx(v); } |
| SI F rsqrt(F v) { return rsqrt_approx(v); } |
| #endif |
| |
| // Our fundamental vector depth is our pixel stride. |
| static constexpr size_t N = sizeof(F) / sizeof(float); |
| |
| // We're finally going to get to what a Stage function looks like! |
| |
| // Any custom ABI to use for all (non-externally-facing) stage functions? |
| // Also decide here whether to use narrow (compromise) or wide (ideal) stages. |
| #if defined(SK_CPU_ARM32) && defined(JUMPER_IS_NEON) |
| // This lets us pass vectors more efficiently on 32-bit ARM. |
| // We can still only pass 16 floats, so best as 4x {r,g,b,a}. |
| #define ABI __attribute__((pcs("aapcs-vfp"))) |
| #define JUMPER_NARROW_STAGES 1 |
| #elif defined(_MSC_VER) |
| // Even if not vectorized, this lets us pass {r,g,b,a} as registers, |
| // instead of {b,a} on the stack. Narrow stages work best for __vectorcall. |
| #define ABI __vectorcall |
| #define JUMPER_NARROW_STAGES 1 |
| #elif defined(__x86_64__) || defined(SK_CPU_ARM64) || defined(SK_CPU_LOONGARCH) |
| // These platforms are ideal for wider stages, and their default ABI is ideal. |
| #define ABI |
| #define JUMPER_NARROW_STAGES 0 |
| #else |
| // 32-bit or unknown... shunt them down the narrow path. |
| // Odds are these have few registers and are better off there. |
| #define ABI |
| #define JUMPER_NARROW_STAGES 1 |
| #endif |
| |
| #if JUMPER_NARROW_STAGES |
| struct Params { |
| size_t dx, dy; |
| std::byte* base; |
| F dr,dg,db,da; |
| }; |
| using Stage = void(ABI*)(Params*, SkRasterPipelineStage* program, F r, F g, F b, F a); |
| #else |
| using Stage = void(ABI*)(SkRasterPipelineStage* program, size_t dx, size_t dy, |
| std::byte* base, F,F,F,F, F,F,F,F); |
| #endif |
| |
| static void start_pipeline(size_t dx, size_t dy, |
| size_t xlimit, size_t ylimit, |
| SkRasterPipelineStage* program, |
| SkSpan<SkRasterPipeline_MemoryCtxPatch> memoryCtxPatches, |
| uint8_t* tailPointer) { |
| uint8_t unreferencedTail; |
| if (!tailPointer) { |
| tailPointer = &unreferencedTail; |
| } |
| auto start = (Stage)program->fn; |
| const size_t x0 = dx; |
| std::byte* const base = nullptr; |
| for (; dy < ylimit; dy++) { |
| #if JUMPER_NARROW_STAGES |
| Params params = { x0,dy,base, F0,F0,F0,F0 }; |
| while (params.dx + N <= xlimit) { |
| start(¶ms,program, F0,F0,F0,F0); |
| params.dx += N; |
| } |
| if (size_t tail = xlimit - params.dx) { |
| *tailPointer = tail; |
| patch_memory_contexts(memoryCtxPatches, params.dx, dy, tail); |
| start(¶ms,program, F0,F0,F0,F0); |
| restore_memory_contexts(memoryCtxPatches, params.dx, dy, tail); |
| *tailPointer = 0xFF; |
| } |
| #else |
| dx = x0; |
| while (dx + N <= xlimit) { |
| start(program,dx,dy,base, F0,F0,F0,F0, F0,F0,F0,F0); |
| dx += N; |
| } |
| if (size_t tail = xlimit - dx) { |
| *tailPointer = tail; |
| patch_memory_contexts(memoryCtxPatches, dx, dy, tail); |
| start(program,dx,dy,base, F0,F0,F0,F0, F0,F0,F0,F0); |
| restore_memory_contexts(memoryCtxPatches, dx, dy, tail); |
| *tailPointer = 0xFF; |
| } |
| #endif |
| } |
| } |
| |
| #if SK_HAS_MUSTTAIL |
| #define JUMPER_MUSTTAIL [[clang::musttail]] |
| #else |
| #define JUMPER_MUSTTAIL |
| #endif |
| |
| #if JUMPER_NARROW_STAGES |
| #define DECLARE_STAGE(name, ARG, STAGE_RET, INC, OFFSET, MUSTTAIL) \ |
| SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, std::byte*& base, \ |
| F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \ |
| static void ABI name(Params* params, SkRasterPipelineStage* program, \ |
| F r, F g, F b, F a) { \ |
| OFFSET name##_k(Ctx{program}, params->dx,params->dy,params->base, \ |
| r,g,b,a, params->dr, params->dg, params->db, params->da); \ |
| INC; \ |
| auto fn = (Stage)program->fn; \ |
| MUSTTAIL return fn(params, program, r,g,b,a); \ |
| } \ |
| SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, std::byte*& base, \ |
| F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da) |
| #else |
| #define DECLARE_STAGE(name, ARG, STAGE_RET, INC, OFFSET, MUSTTAIL) \ |
| SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, std::byte*& base, \ |
| F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \ |
| static void ABI name(SkRasterPipelineStage* program, size_t dx, size_t dy, \ |
| std::byte* base, F r, F g, F b, F a, F dr, F dg, F db, F da) { \ |
| OFFSET name##_k(Ctx{program}, dx,dy,base, r,g,b,a, dr,dg,db,da); \ |
| INC; \ |
| auto fn = (Stage)program->fn; \ |
| MUSTTAIL return fn(program, dx,dy,base, r,g,b,a, dr,dg,db,da); \ |
| } \ |
| SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, std::byte*& base, \ |
| F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da) |
| #endif |
| |
| // A typical stage returns void, always increments the program counter by 1, and lets the optimizer |
| // decide whether or not tail-calling is appropriate. |
| #define STAGE(name, arg) \ |
| DECLARE_STAGE(name, arg, void, ++program, /*no offset*/, /*no musttail*/) |
| |
| // A tail stage returns void, always increments the program counter by 1, and uses tail-calling. |
| // Tail-calling is necessary in SkSL-generated programs, which can be thousands of ops long, and |
| // could overflow the stack (particularly in debug). |
| #define STAGE_TAIL(name, arg) \ |
| DECLARE_STAGE(name, arg, void, ++program, /*no offset*/, JUMPER_MUSTTAIL) |
| |
| // A branch stage returns an integer, which is added directly to the program counter, and tailcalls. |
| #define STAGE_BRANCH(name, arg) \ |
| DECLARE_STAGE(name, arg, int, /*no increment*/, program +=, JUMPER_MUSTTAIL) |
| |
| // just_return() is a simple no-op stage that only exists to end the chain, |
| // returning back up to start_pipeline(), and from there to the caller. |
| #if JUMPER_NARROW_STAGES |
| static void ABI just_return(Params*, SkRasterPipelineStage*, F,F,F,F) {} |
| #else |
| static void ABI just_return(SkRasterPipelineStage*, size_t,size_t, std::byte*, |
| F,F,F,F, F,F,F,F) {} |
| #endif |
| |
| // Note that in release builds, most stages consume no stack (thanks to tail call optimization). |
| // However: certain builds (especially with non-clang compilers) may fail to optimize tail |
| // calls, resulting in actual stack frames being generated. |
| // |
| // stack_checkpoint() and stack_rewind() are special stages that can be used to manage stack growth. |
| // If a pipeline contains a stack_checkpoint, followed by any number of stack_rewind (at any point), |
| // the C++ stack will be reset to the state it was at when the stack_checkpoint was initially hit. |
| // |
| // All instances of stack_rewind (as well as the one instance of stack_checkpoint near the start of |
| // a pipeline) share a single context (of type SkRasterPipeline_RewindCtx). That context holds the |
| // full state of the mutable registers that are normally passed to the next stage in the program. |
| // |
| // stack_rewind is the only stage other than just_return that actually returns (rather than jumping |
| // to the next stage in the program). Before it does so, it stashes all of the registers in the |
| // context. This includes the updated `program` pointer. Unlike stages that tail call exactly once, |
| // stack_checkpoint calls the next stage in the program repeatedly, as long as the `program` in the |
| // context is overwritten (i.e., as long as a stack_rewind was the reason the pipeline returned, |
| // rather than a just_return). |
| // |
| // Normally, just_return is the only stage that returns, and no other stage does anything after a |
| // subsequent (called) stage returns, so the stack just unwinds all the way to start_pipeline. |
| // With stack_checkpoint on the stack, any stack_rewind stages will return all the way up to the |
| // stack_checkpoint. That grabs the values that would have been passed to the next stage (from the |
| // context), and continues the linear execution of stages, but has reclaimed all of the stack frames |
| // pushed before the stack_rewind before doing so. |
| #if JUMPER_NARROW_STAGES |
| static void ABI stack_checkpoint(Params* params, SkRasterPipelineStage* program, |
| F r, F g, F b, F a) { |
| SkRasterPipeline_RewindCtx* ctx = Ctx{program}; |
| while (program) { |
| auto next = (Stage)(++program)->fn; |
| |
| ctx->stage = nullptr; |
| next(params, program, r, g, b, a); |
| program = ctx->stage; |
| |
| if (program) { |
| r = sk_unaligned_load<F>(ctx->r ); |
| g = sk_unaligned_load<F>(ctx->g ); |
| b = sk_unaligned_load<F>(ctx->b ); |
| a = sk_unaligned_load<F>(ctx->a ); |
| params->dr = sk_unaligned_load<F>(ctx->dr); |
| params->dg = sk_unaligned_load<F>(ctx->dg); |
| params->db = sk_unaligned_load<F>(ctx->db); |
| params->da = sk_unaligned_load<F>(ctx->da); |
| params->base = ctx->base; |
| } |
| } |
| } |
| static void ABI stack_rewind(Params* params, SkRasterPipelineStage* program, |
| F r, F g, F b, F a) { |
| SkRasterPipeline_RewindCtx* ctx = Ctx{program}; |
| sk_unaligned_store(ctx->r , r ); |
| sk_unaligned_store(ctx->g , g ); |
| sk_unaligned_store(ctx->b , b ); |
| sk_unaligned_store(ctx->a , a ); |
| sk_unaligned_store(ctx->dr, params->dr); |
| sk_unaligned_store(ctx->dg, params->dg); |
| sk_unaligned_store(ctx->db, params->db); |
| sk_unaligned_store(ctx->da, params->da); |
| ctx->base = params->base; |
| ctx->stage = program; |
| } |
| #else |
| static void ABI stack_checkpoint(SkRasterPipelineStage* program, |
| size_t dx, size_t dy, std::byte* base, |
| F r, F g, F b, F a, F dr, F dg, F db, F da) { |
| SkRasterPipeline_RewindCtx* ctx = Ctx{program}; |
| while (program) { |
| auto next = (Stage)(++program)->fn; |
| |
| ctx->stage = nullptr; |
| next(program, dx, dy, base, r, g, b, a, dr, dg, db, da); |
| program = ctx->stage; |
| |
| if (program) { |
| r = sk_unaligned_load<F>(ctx->r ); |
| g = sk_unaligned_load<F>(ctx->g ); |
| b = sk_unaligned_load<F>(ctx->b ); |
| a = sk_unaligned_load<F>(ctx->a ); |
| dr = sk_unaligned_load<F>(ctx->dr); |
| dg = sk_unaligned_load<F>(ctx->dg); |
| db = sk_unaligned_load<F>(ctx->db); |
| da = sk_unaligned_load<F>(ctx->da); |
| base = ctx->base; |
| } |
| } |
| } |
| static void ABI stack_rewind(SkRasterPipelineStage* program, |
| size_t dx, size_t dy, std::byte* base, |
| F r, F g, F b, F a, F dr, F dg, F db, F da) { |
| SkRasterPipeline_RewindCtx* ctx = Ctx{program}; |
| sk_unaligned_store(ctx->r , r ); |
| sk_unaligned_store(ctx->g , g ); |
| sk_unaligned_store(ctx->b , b ); |
| sk_unaligned_store(ctx->a , a ); |
| sk_unaligned_store(ctx->dr, dr); |
| sk_unaligned_store(ctx->dg, dg); |
| sk_unaligned_store(ctx->db, db); |
| sk_unaligned_store(ctx->da, da); |
| ctx->base = base; |
| ctx->stage = program; |
| } |
| #endif |
| |
| |
| // We could start defining normal Stages now. But first, some helper functions. |
| |
| template <typename V, typename T> |
| SI V load(const T* src) { |
| return sk_unaligned_load<V>(src); |
| } |
| |
| template <typename V, typename T> |
| SI void store(T* dst, V v) { |
| sk_unaligned_store(dst, v); |
| } |
| |
| SI F from_byte(U8 b) { |
| return cast(expand(b)) * (1/255.0f); |
| } |
| SI F from_short(U16 s) { |
| return cast(expand(s)) * (1/65535.0f); |
| } |
| SI void from_565(U16 _565, F* r, F* g, F* b) { |
| U32 wide = expand(_565); |
| *r = cast(wide & (31<<11)) * (1.0f / (31<<11)); |
| *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5)); |
| *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0)); |
| } |
| SI void from_4444(U16 _4444, F* r, F* g, F* b, F* a) { |
| U32 wide = expand(_4444); |
| *r = cast(wide & (15<<12)) * (1.0f / (15<<12)); |
| *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8)); |
| *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4)); |
| *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0)); |
| } |
| SI void from_8888(U32 _8888, F* r, F* g, F* b, F* a) { |
| *r = cast((_8888 ) & 0xff) * (1/255.0f); |
| *g = cast((_8888 >> 8) & 0xff) * (1/255.0f); |
| *b = cast((_8888 >> 16) & 0xff) * (1/255.0f); |
| *a = cast((_8888 >> 24) ) * (1/255.0f); |
| } |
| SI void from_88(U16 _88, F* r, F* g) { |
| U32 wide = expand(_88); |
| *r = cast((wide ) & 0xff) * (1/255.0f); |
| *g = cast((wide >> 8) & 0xff) * (1/255.0f); |
| } |
| SI void from_1010102(U32 rgba, F* r, F* g, F* b, F* a) { |
| *r = cast((rgba ) & 0x3ff) * (1/1023.0f); |
| *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f); |
| *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f); |
| *a = cast((rgba >> 30) ) * (1/ 3.0f); |
| } |
| SI void from_1010102_xr(U32 rgba, F* r, F* g, F* b, F* a) { |
| static constexpr float min = -0.752941f; |
| static constexpr float max = 1.25098f; |
| static constexpr float range = max - min; |
| *r = cast((rgba ) & 0x3ff) * (1/1023.0f) * range + min; |
| *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f) * range + min; |
| *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f) * range + min; |
| *a = cast((rgba >> 30) ) * (1/ 3.0f); |
| } |
| SI void from_10101010_xr(U64 _10x6, F* r, F* g, F* b, F* a) { |
| *r = (cast64((_10x6 >> 6) & 0x3ff) - 384.f) / 510.f; |
| *g = (cast64((_10x6 >> 22) & 0x3ff) - 384.f) / 510.f; |
| *b = (cast64((_10x6 >> 38) & 0x3ff) - 384.f) / 510.f; |
| *a = (cast64((_10x6 >> 54) & 0x3ff) - 384.f) / 510.f; |
| } |
| SI void from_10x6(U64 _10x6, F* r, F* g, F* b, F* a) { |
| *r = cast64((_10x6 >> 6) & 0x3ff) * (1/1023.0f); |
| *g = cast64((_10x6 >> 22) & 0x3ff) * (1/1023.0f); |
| *b = cast64((_10x6 >> 38) & 0x3ff) * (1/1023.0f); |
| *a = cast64((_10x6 >> 54) & 0x3ff) * (1/1023.0f); |
| } |
| SI void from_1616(U32 _1616, F* r, F* g) { |
| *r = cast((_1616 ) & 0xffff) * (1/65535.0f); |
| *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f); |
| } |
| SI void from_16161616(U64 _16161616, F* r, F* g, F* b, F* a) { |
| *r = cast64((_16161616 ) & 0xffff) * (1/65535.0f); |
| *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f); |
| *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f); |
| *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f); |
| } |
| |
| // Used by load_ and store_ stages to get to the right (dx,dy) starting point of contiguous memory. |
| template <typename T> |
| SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) { |
| return (T*)ctx->pixels + dy*ctx->stride + dx; |
| } |
| |
| // clamp v to [0,limit). |
| SI F clamp(F v, F limit) { |
| F inclusive = sk_bit_cast<F>(sk_bit_cast<U32>(limit) - 1); // Exclusive -> inclusive. |
| return min(max(0.0f, v), inclusive); |
| } |
| |
| // clamp to (0,limit). |
| SI F clamp_ex(F v, float limit) { |
| const F inclusiveZ = F_(std::numeric_limits<float>::min()), |
| inclusiveL = sk_bit_cast<F>( sk_bit_cast<U32>(F_(limit)) - 1 ); |
| return min(max(inclusiveZ, v), inclusiveL); |
| } |
| |
| // Polynomial approximation of degree 5 for sin(x * 2 * pi) in the range [-1/4, 1/4] |
| // Adapted from https://github.com/google/swiftshader/blob/master/docs/Sin-Cos-Optimization.pdf |
| SI F sin5q_(F x) { |
| // A * x + B * x^3 + C * x^5 |
| // Exact at x = 0, 1/12, 1/6, 1/4, and their negatives, |
| // which correspond to x * 2 * pi = 0, pi/6, pi/3, pi/2 |
| constexpr float A = 6.28230858f; |
| constexpr float B = -41.1693687f; |
| constexpr float C = 74.4388885f; |
| F x2 = x * x; |
| return x * mad(mad(x2, C, B), x2, A); |
| } |
| |
| SI F sin_(F x) { |
| constexpr float one_over_pi2 = 1 / (2 * SK_FloatPI); |
| x = mad(x, -one_over_pi2, 0.25f); |
| x = 0.25f - abs_(x - floor_(x + 0.5f)); |
| return sin5q_(x); |
| } |
| |
| SI F cos_(F x) { |
| constexpr float one_over_pi2 = 1 / (2 * SK_FloatPI); |
| x *= one_over_pi2; |
| x = 0.25f - abs_(x - floor_(x + 0.5f)); |
| return sin5q_(x); |
| } |
| |
| /* "GENERATING ACCURATE VALUES FOR THE TANGENT FUNCTION" |
| https://mae.ufl.edu/~uhk/ACCURATE-TANGENT.pdf |
| |
| approx = x + (1/3)x^3 + (2/15)x^5 + (17/315)x^7 + (62/2835)x^9 |
| |
| Some simplifications: |
| 1. tan(x) is periodic, -PI/2 < x < PI/2 |
| 2. tan(x) is odd, so tan(-x) = -tan(x) |
| 3. Our polynomial approximation is best near zero, so we use the following identity |
| tan(x) + tan(y) |
| tan(x + y) = ----------------- |
| 1 - tan(x)*tan(y) |
| tan(PI/4) = 1 |
| |
| So for x > PI/8, we do the following refactor: |
| x' = x - PI/4 |
| |
| 1 + tan(x') |
| tan(x) = ------------ |
| 1 - tan(x') |
| */ |
| SI F tan_(F x) { |
| constexpr float Pi = SK_FloatPI; |
| // periodic between -pi/2 ... pi/2 |
| // shift to 0...Pi, scale 1/Pi to get into 0...1, then fract, scale-up, shift-back |
| x = mad(fract(mad(x, 1/Pi, 0.5f)), Pi, -Pi/2); |
| |
| I32 neg = (x < 0.0f); |
| x = if_then_else(neg, -x, x); |
| |
| // minimize total error by shifting if x > pi/8 |
| I32 use_quotient = (x > (Pi/8)); |
| x = if_then_else(use_quotient, x - (Pi/4), x); |
| |
| // 9th order poly = 4th order(x^2) * x |
| const float c4 = 62 / 2835.0f; |
| const float c3 = 17 / 315.0f; |
| const float c2 = 2 / 15.0f; |
| const float c1 = 1 / 3.0f; |
| const float c0 = 1.0f; |
| F x2 = x * x; |
| x *= mad(x2, mad(x2, mad(x2, mad(x2, c4, c3), c2), c1), c0); |
| x = if_then_else(use_quotient, (1+x)/(1-x), x); |
| x = if_then_else(neg, -x, x); |
| return x; |
| } |
| |
| /* Use 4th order polynomial approximation from https://arachnoid.com/polysolve/ |
| with 129 values of x,atan(x) for x:[0...1] |
| This only works for 0 <= x <= 1 |
| */ |
| SI F approx_atan_unit(F x) { |
| // y = 0.14130025741326729 x⁴ |
| // - 0.34312835980675116 x³ |
| // - 0.016172900528248768 x² |
| // + 1.00376969762003850 x |
| // - 0.00014758242182738969 |
| const float c4 = 0.14130025741326729f; |
| const float c3 = -0.34312835980675116f; |
| const float c2 = -0.016172900528248768f; |
| const float c1 = 1.0037696976200385f; |
| const float c0 = -0.00014758242182738969f; |
| return mad(x, mad(x, mad(x, mad(x, c4, c3), c2), c1), c0); |
| } |
| |
| // Use identity atan(x) = pi/2 - atan(1/x) for x > 1 |
| SI F atan_(F x) { |
| I32 neg = (x < 0.0f); |
| x = if_then_else(neg, -x, x); |
| I32 flip = (x > 1.0f); |
| x = if_then_else(flip, 1/x, x); |
| x = approx_atan_unit(x); |
| x = if_then_else(flip, SK_FloatPI/2 - x, x); |
| x = if_then_else(neg, -x, x); |
| return x; |
| } |
| |
| // Handbook of Mathematical Functions, by Milton Abramowitz and Irene Stegun: |
| // https://books.google.com/books/content?id=ZboM5tOFWtsC&pg=PA81&img=1&zoom=3&hl=en&bul=1&sig=ACfU3U2M75tG_iGVOS92eQspr14LTq02Nw&ci=0%2C15%2C999%2C1279&edge=0 |
| // http://screen/8YGJxUGFQ49bVX6 |
| SI F asin_(F x) { |
| I32 neg = (x < 0.0f); |
| x = if_then_else(neg, -x, x); |
| const float c3 = -0.0187293f; |
| const float c2 = 0.0742610f; |
| const float c1 = -0.2121144f; |
| const float c0 = 1.5707288f; |
| F poly = mad(x, mad(x, mad(x, c3, c2), c1), c0); |
| x = nmad(sqrt_(1 - x), poly, SK_FloatPI/2); |
| x = if_then_else(neg, -x, x); |
| return x; |
| } |
| |
| SI F acos_(F x) { |
| return SK_FloatPI/2 - asin_(x); |
| } |
| |
| /* Use identity atan(x) = pi/2 - atan(1/x) for x > 1 |
| By swapping y,x to ensure the ratio is <= 1, we can safely call atan_unit() |
| which avoids a 2nd divide instruction if we had instead called atan(). |
| */ |
| SI F atan2_(F y0, F x0) { |
| I32 flip = (abs_(y0) > abs_(x0)); |
| F y = if_then_else(flip, x0, y0); |
| F x = if_then_else(flip, y0, x0); |
| F arg = y/x; |
| |
| I32 neg = (arg < 0.0f); |
| arg = if_then_else(neg, -arg, arg); |
| |
| F r = approx_atan_unit(arg); |
| r = if_then_else(flip, SK_FloatPI/2 - r, r); |
| r = if_then_else(neg, -r, r); |
| |
| // handle quadrant distinctions |
| r = if_then_else((y0 >= 0) & (x0 < 0), r + SK_FloatPI, r); |
| r = if_then_else((y0 < 0) & (x0 <= 0), r - SK_FloatPI, r); |
| // Note: we don't try to handle 0,0 or infinities |
| return r; |
| } |
| |
| // Used by gather_ stages to calculate the base pointer and a vector of indices to load. |
| template <typename T> |
| SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) { |
| // We use exclusive clamp so that our min value is > 0 because ULP subtraction using U32 would |
| // produce a NaN if applied to +0.f. |
| x = clamp_ex(x, ctx->width ); |
| y = clamp_ex(y, ctx->height); |
| x = sk_bit_cast<F>(sk_bit_cast<U32>(x) - (uint32_t)ctx->roundDownAtInteger); |
| y = sk_bit_cast<F>(sk_bit_cast<U32>(y) - (uint32_t)ctx->roundDownAtInteger); |
| *ptr = (const T*)ctx->pixels; |
| return trunc_(y)*ctx->stride + trunc_(x); |
| } |
| |
| // We often have a nominally [0,1] float value we need to scale and convert to an integer, |
| // whether for a table lookup or to pack back down into bytes for storage. |
| // |
| // In practice, especially when dealing with interesting color spaces, that notionally |
| // [0,1] float may be out of [0,1] range. Unorms cannot represent that, so we must clamp. |
| // |
| // You can adjust the expected input to [0,bias] by tweaking that parameter. |
| SI U32 to_unorm(F v, float scale, float bias = 1.0f) { |
| // Any time we use round() we probably want to use to_unorm(). |
| return round(min(max(0.0f, v), bias), F_(scale)); |
| } |
| |
| SI I32 cond_to_mask(I32 cond) { |
| #if defined(JUMPER_IS_SCALAR) |
| // In scalar mode, conditions are bools (0 or 1), but we want to store and operate on masks |
| // (eg, using bitwise operations to select values). |
| return if_then_else(cond, I32(~0), I32(0)); |
| #else |
| // In SIMD mode, our various instruction sets already represent conditions as masks. |
| return cond; |
| #endif |
| } |
| |
| #if defined(JUMPER_IS_SCALAR) |
| // In scalar mode, `data` only contains a single lane. |
| SI uint32_t select_lane(uint32_t data, int /*lane*/) { return data; } |
| SI int32_t select_lane( int32_t data, int /*lane*/) { return data; } |
| #else |
| // In SIMD mode, `data` contains a vector of lanes. |
| SI uint32_t select_lane(U32 data, int lane) { return data[lane]; } |
| SI int32_t select_lane(I32 data, int lane) { return data[lane]; } |
| #endif |
| |
| // Now finally, normal Stages! |
| |
| STAGE(seed_shader, NoCtx) { |
| static constexpr float iota[] = { |
| 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f, |
| 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f, |
| }; |
| static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp); |
| |
| // It's important for speed to explicitly cast(dx) and cast(dy), |
| // which has the effect of splatting them to vectors before converting to floats. |
| // On Intel this breaks a data dependency on previous loop iterations' registers. |
| r = cast(U32_(dx)) + sk_unaligned_load<F>(iota); |
| g = cast(U32_(dy)) + 0.5f; |
| b = F1; // This is w=1 for matrix multiplies by the device coords. |
| a = F0; |
| } |
| |
| STAGE(dither, const float* rate) { |
| // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors. |
| uint32_t iota[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; |
| static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp); |
| |
| U32 X = U32_(dx) + sk_unaligned_load<U32>(iota), |
| Y = U32_(dy); |
| |
| // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering. |
| // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ]. |
| |
| // We only need X and X^Y from here on, so it's easier to just think of that as "Y". |
| Y ^= X; |
| |
| // We'll mix the bottom 3 bits of each of X and Y to make 6 bits, |
| // for 2^6 == 64 == 8x8 matrix values. If X=abc and Y=def, we make fcebda. |
| U32 M = (Y & 1) << 5 | (X & 1) << 4 |
| | (Y & 2) << 2 | (X & 2) << 1 |
| | (Y & 4) >> 1 | (X & 4) >> 2; |
| |
| // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon. |
| // We want to make sure our dither is less than 0.5 in either direction to keep exact values |
| // like 0 and 1 unchanged after rounding. |
| F dither = mad(cast(M), 2/128.0f, -63/128.0f); |
| |
| r = mad(dither, *rate, r); |
| g = mad(dither, *rate, g); |
| b = mad(dither, *rate, b); |
| |
| r = max(0.0f, min(r, a)); |
| g = max(0.0f, min(g, a)); |
| b = max(0.0f, min(b, a)); |
| } |
| |
| // load 4 floats from memory, and splat them into r,g,b,a |
| STAGE(uniform_color, const SkRasterPipeline_UniformColorCtx* c) { |
| r = F_(c->r); |
| g = F_(c->g); |
| b = F_(c->b); |
| a = F_(c->a); |
| } |
| STAGE(unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx* c) { |
| r = F_(c->r); |
| g = F_(c->g); |
| b = F_(c->b); |
| a = F_(c->a); |
| } |
| // load 4 floats from memory, and splat them into dr,dg,db,da |
| STAGE(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) { |
| dr = F_(c->r); |
| dg = F_(c->g); |
| db = F_(c->b); |
| da = F_(c->a); |
| } |
| |
| // splats opaque-black into r,g,b,a |
| STAGE(black_color, NoCtx) { |
| r = g = b = F0; |
| a = F1; |
| } |
| |
| STAGE(white_color, NoCtx) { |
| r = g = b = a = F1; |
| } |
| |
| // load registers r,g,b,a from context (mirrors store_src) |
| STAGE(load_src, const float* ptr) { |
| r = sk_unaligned_load<F>(ptr + 0*N); |
| g = sk_unaligned_load<F>(ptr + 1*N); |
| b = sk_unaligned_load<F>(ptr + 2*N); |
| a = sk_unaligned_load<F>(ptr + 3*N); |
| } |
| |
| // store registers r,g,b,a into context (mirrors load_src) |
| STAGE(store_src, float* ptr) { |
| sk_unaligned_store(ptr + 0*N, r); |
| sk_unaligned_store(ptr + 1*N, g); |
| sk_unaligned_store(ptr + 2*N, b); |
| sk_unaligned_store(ptr + 3*N, a); |
| } |
| // store registers r,g into context |
| STAGE(store_src_rg, float* ptr) { |
| sk_unaligned_store(ptr + 0*N, r); |
| sk_unaligned_store(ptr + 1*N, g); |
| } |
| // load registers r,g from context |
| STAGE(load_src_rg, float* ptr) { |
| r = sk_unaligned_load<F>(ptr + 0*N); |
| g = sk_unaligned_load<F>(ptr + 1*N); |
| } |
| // store register a into context |
| STAGE(store_src_a, float* ptr) { |
| sk_unaligned_store(ptr, a); |
| } |
| |
| // load registers dr,dg,db,da from context (mirrors store_dst) |
| STAGE(load_dst, const float* ptr) { |
| dr = sk_unaligned_load<F>(ptr + 0*N); |
| dg = sk_unaligned_load<F>(ptr + 1*N); |
| db = sk_unaligned_load<F>(ptr + 2*N); |
| da = sk_unaligned_load<F>(ptr + 3*N); |
| } |
| |
| // store registers dr,dg,db,da into context (mirrors load_dst) |
| STAGE(store_dst, float* ptr) { |
| sk_unaligned_store(ptr + 0*N, dr); |
| sk_unaligned_store(ptr + 1*N, dg); |
| sk_unaligned_store(ptr + 2*N, db); |
| sk_unaligned_store(ptr + 3*N, da); |
| } |
| |
| // Most blend modes apply the same logic to each channel. |
| #define BLEND_MODE(name) \ |
| SI F name##_channel(F s, F d, F sa, F da); \ |
| STAGE(name, NoCtx) { \ |
| r = name##_channel(r,dr,a,da); \ |
| g = name##_channel(g,dg,a,da); \ |
| b = name##_channel(b,db,a,da); \ |
| a = name##_channel(a,da,a,da); \ |
| } \ |
| SI F name##_channel(F s, F d, F sa, F da) |
| |
| SI F inv(F x) { return 1.0f - x; } |
| SI F two(F x) { return x + x; } |
| |
| BLEND_MODE(clear) { return F0; } |
| BLEND_MODE(srcatop) { return mad(s, da, d*inv(sa)); } |
| BLEND_MODE(dstatop) { return mad(d, sa, s*inv(da)); } |
| BLEND_MODE(srcin) { return s * da; } |
| BLEND_MODE(dstin) { return d * sa; } |
| BLEND_MODE(srcout) { return s * inv(da); } |
| BLEND_MODE(dstout) { return d * inv(sa); } |
| BLEND_MODE(srcover) { return mad(d, inv(sa), s); } |
| BLEND_MODE(dstover) { return mad(s, inv(da), d); } |
| |
| BLEND_MODE(modulate) { return s*d; } |
| BLEND_MODE(multiply) { return mad(s, d, mad(s, inv(da), d*inv(sa))); } |
| BLEND_MODE(plus_) { return min(s + d, 1.0f); } // We can clamp to either 1 or sa. |
| BLEND_MODE(screen) { return nmad(s, d, s + d); } |
| BLEND_MODE(xor_) { return mad(s, inv(da), d*inv(sa)); } |
| #undef BLEND_MODE |
| |
| // Most other blend modes apply the same logic to colors, and srcover to alpha. |
| #define BLEND_MODE(name) \ |
| SI F name##_channel(F s, F d, F sa, F da); \ |
| STAGE(name, NoCtx) { \ |
| r = name##_channel(r,dr,a,da); \ |
| g = name##_channel(g,dg,a,da); \ |
| b = name##_channel(b,db,a,da); \ |
| a = mad(da, inv(a), a); \ |
| } \ |
| SI F name##_channel(F s, F d, F sa, F da) |
| |
| BLEND_MODE(darken) { return s + d - max(s*da, d*sa) ; } |
| BLEND_MODE(lighten) { return s + d - min(s*da, d*sa) ; } |
| BLEND_MODE(difference) { return s + d - two(min(s*da, d*sa)); } |
| BLEND_MODE(exclusion) { return s + d - two(s*d); } |
| |
| BLEND_MODE(colorburn) { |
| return if_then_else(d == da, d + s*inv(da), |
| if_then_else(s == 0, /* s + */ d*inv(sa), |
| sa*(da - min(da, (da-d)*sa*rcp_fast(s))) + s*inv(da) + d*inv(sa))); |
| } |
| BLEND_MODE(colordodge) { |
| return if_then_else(d == 0, /* d + */ s*inv(da), |
| if_then_else(s == sa, s + d*inv(sa), |
| sa*min(da, (d*sa)*rcp_fast(sa - s)) + s*inv(da) + d*inv(sa))); |
| } |
| BLEND_MODE(hardlight) { |
| return s*inv(da) + d*inv(sa) |
| + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s))); |
| } |
| BLEND_MODE(overlay) { |
| return s*inv(da) + d*inv(sa) |
| + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s))); |
| } |
| |
| BLEND_MODE(softlight) { |
| F m = if_then_else(da > 0, d / da, 0.0f), |
| s2 = two(s), |
| m4 = two(two(m)); |
| |
| // The logic forks three ways: |
| // 1. dark src? |
| // 2. light src, dark dst? |
| // 3. light src, light dst? |
| F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1. |
| darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2. |
| liteDst = sqrt_(m) - m, |
| liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3? |
| return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc); // 1 or (2 or 3)? |
| } |
| #undef BLEND_MODE |
| |
| // We're basing our implemenation of non-separable blend modes on |
| // https://www.w3.org/TR/compositing-1/#blendingnonseparable. |
| // and |
| // https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf |
| // They're equivalent, but ES' math has been better simplified. |
| // |
| // Anything extra we add beyond that is to make the math work with premul inputs. |
| |
| SI F sat(F r, F g, F b) { return max(r, max(g,b)) - min(r, min(g,b)); } |
| SI F lum(F r, F g, F b) { return mad(r, 0.30f, mad(g, 0.59f, b*0.11f)); } |
| |
| SI void set_sat(F* r, F* g, F* b, F s) { |
| F mn = min(*r, min(*g,*b)), |
| mx = max(*r, max(*g,*b)), |
| sat = mx - mn; |
| |
| // Map min channel to 0, max channel to s, and scale the middle proportionally. |
| s = if_then_else(sat == 0.0f, 0.0f, s * rcp_fast(sat)); |
| *r = (*r - mn) * s; |
| *g = (*g - mn) * s; |
| *b = (*b - mn) * s; |
| } |
| SI void set_lum(F* r, F* g, F* b, F l) { |
| F diff = l - lum(*r, *g, *b); |
| *r += diff; |
| *g += diff; |
| *b += diff; |
| } |
|