Reland "Add pooling support on iOS."

This reverts commit f73091bb81fb3b0352f24a14479dba51069fdc8b.

Reason for revert: rolling forward after fixing prior CL

Original change's description:
> Revert "Add pooling support on iOS."
>
> This reverts commit 38a93e622b8cb6888d97551ac165406694342c7b.
>
> Reason for revert: need revert first pool change
>
> Original change's description:
> > Add pooling support on iOS.
> >
> > This replaces the `thread_local` attribute with `pthread_setspecific`
> > and `pthread_getspecific`. I don't have easy access to iOS 8/9 for
> > testing purposes, but on Mac OS X, this implementation works and
> > benchmarks the same as the `thread_local` implementation.
> >
> > Change-Id: I86db88c24d59d946adb66141b32733ebf5261c76
> > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/328837
> > Reviewed-by: Brian Osman <brianosman@google.com>
> > Commit-Queue: Brian Osman <brianosman@google.com>
> > Auto-Submit: John Stiles <johnstiles@google.com>
>
> TBR=brianosman@google.com,adlai@google.com,johnstiles@google.com
>
> Change-Id: Ic06f9e32e524b2be601ee21a5da605fd19aaa64b
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329164
> Reviewed-by: Greg Daniel <egdaniel@google.com>
> Commit-Queue: Greg Daniel <egdaniel@google.com>

TBR=egdaniel@google.com,brianosman@google.com,adlai@google.com,johnstiles@google.com

Change-Id: I0e021e9304ee88d6a29739c287eb515abff8b8a4
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329173
Commit-Queue: John Stiles <johnstiles@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
Reviewed-by: John Stiles <johnstiles@google.com>
diff --git a/src/sksl/SkSLPool.cpp b/src/sksl/SkSLPool.cpp
index 2830fe2..ac9524d 100644
--- a/src/sksl/SkSLPool.cpp
+++ b/src/sksl/SkSLPool.cpp
@@ -13,26 +13,6 @@
 
 namespace SkSL {
 
-#if defined(SK_BUILD_FOR_IOS) && \
-        (!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
-
-// iOS did not support for C++11 `thread_local` variables until iOS 9.
-// Pooling is not supported here; we allocate all nodes directly.
-struct PoolData {};
-
-Pool::~Pool() {}
-std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
-    auto pool = std::unique_ptr<Pool>(new Pool);
-    pool->fData = nullptr;
-    return pool;
-}
-void Pool::detachFromThread() {}
-void Pool::attachToThread() {}
-void* Pool::AllocIRNode() { return ::operator new(sizeof(IRNode)); }
-void Pool::FreeIRNode(void* node) { ::operator delete(node); }
-
-#else  // !defined(SK_BUILD_FOR_IOS)...
-
 namespace { struct IRNodeData {
     union {
         uint8_t fBuffer[sizeof(IRNode)];
@@ -53,15 +33,52 @@
     // Accessors.
     ptrdiff_t nodeCount() { return fNodesEnd - fNodes; }
 
-    ptrdiff_t nodeIndex(IRNodeData* node) {
+    int nodeIndex(IRNodeData* node) {
         SkASSERT(node >= fNodes);
         SkASSERT(node < fNodesEnd);
-        return node - fNodes;
+        return SkToInt(node - fNodes);
     }
 };
 
+#if defined(SK_BUILD_FOR_IOS) && \
+        (!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
+
+#include <pthread.h>
+
+static pthread_key_t get_pthread_key() {
+    static pthread_key_t sKey = []{
+        pthread_key_t key;
+        int result = pthread_key_create(&key, /*destructor=*/nullptr);
+        if (result != 0) {
+            SK_ABORT("pthread_key_create failure: %d", result);
+        }
+        return key;
+    }();
+    return sKey;
+}
+
+static PoolData* get_thread_local_pool_data() {
+    return static_cast<PoolData*>(pthread_getspecific(get_pthread_key()));
+}
+
+static void set_thread_local_pool_data(PoolData* poolData) {
+    pthread_setspecific(get_pthread_key(), poolData);
+}
+
+#else
+
 static thread_local PoolData* sPoolData = nullptr;
 
+static PoolData* get_thread_local_pool_data() {
+    return sPoolData;
+}
+
+static void set_thread_local_pool_data(PoolData* poolData) {
+    sPoolData = poolData;
+}
+
+#endif
+
 static PoolData* create_pool_data(int nodesInPool) {
     // Create a PoolData structure with extra space at the end for additional IRNode data.
     int numExtraIRNodes = nodesInPool - 1;
@@ -80,9 +97,9 @@
 }
 
 Pool::~Pool() {
-    if (sPoolData == fData) {
+    if (get_thread_local_pool_data() == fData) {
         SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
-        sPoolData = nullptr;
+        set_thread_local_pool_data(nullptr);
     }
 
     // In debug mode, report any leaked nodes.
@@ -120,27 +137,28 @@
 }
 
 void Pool::detachFromThread() {
-    VLOG("DETACH Pool:0x%016llX\n", (uint64_t)sPoolData);
-    SkASSERT(sPoolData != nullptr);
-    sPoolData = nullptr;
+    VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
+    SkASSERT(get_thread_local_pool_data() != nullptr);
+    set_thread_local_pool_data(nullptr);
 }
 
 void Pool::attachToThread() {
     VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData);
-    SkASSERT(sPoolData == nullptr);
-    sPoolData = fData;
+    SkASSERT(get_thread_local_pool_data() == nullptr);
+    set_thread_local_pool_data(fData);
 }
 
 void* Pool::AllocIRNode() {
     // Is a pool attached?
-    if (sPoolData) {
+    PoolData* poolData = get_thread_local_pool_data();
+    if (poolData) {
         // Does the pool contain a free node?
-        IRNodeData* node = sPoolData->fFreeListHead;
+        IRNodeData* node = poolData->fFreeListHead;
         if (node) {
             // Yes. Take a node from the freelist.
-            sPoolData->fFreeListHead = node->fFreeListNext;
+            poolData->fFreeListHead = node->fFreeListNext;
             VLOG("ALLOC  Pool:0x%016llX Index:%04d         0x%016llX\n",
-                 (uint64_t)sPoolData, (int)(node - &sPoolData->fNodes[0]), (uint64_t)node);
+                 (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
             return node->fBuffer;
         }
     }
@@ -148,31 +166,30 @@
     // The pool is detached or full; allocate nodes using malloc.
     void* ptr = ::operator new(sizeof(IRNode));
     VLOG("ALLOC  Pool:0x%016llX Index:____ malloc  0x%016llX\n",
-         (uint64_t)sPoolData, (uint64_t)ptr);
+         (uint64_t)poolData, (uint64_t)ptr);
     return ptr;
 }
 
 void Pool::FreeIRNode(void* node_v) {
     // Is a pool attached?
-    if (sPoolData) {
+    PoolData* poolData = get_thread_local_pool_data();
+    if (poolData) {
         // Did this node come from our pool?
         auto* node = static_cast<IRNodeData*>(node_v);
-        if (node >= &sPoolData->fNodes[0] && node < sPoolData->fNodesEnd) {
+        if (node >= &poolData->fNodes[0] && node < poolData->fNodesEnd) {
             // Yes. Push it back onto the freelist.
             VLOG("FREE   Pool:0x%016llX Index:%04d         0x%016llX\n",
-                 (uint64_t)sPoolData, (int)(node - &sPoolData->fNodes[0]), (uint64_t)node);
-            node->fFreeListNext = sPoolData->fFreeListHead;
-            sPoolData->fFreeListHead = node;
+                 (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
+            node->fFreeListNext = poolData->fFreeListHead;
+            poolData->fFreeListHead = node;
             return;
         }
     }
 
     // No pool is attached or the node was malloced; it must be freed.
     VLOG("FREE   Pool:0x%016llX Index:____ free    0x%016llX\n",
-         (uint64_t)sPoolData, (uint64_t)node_v);
+         (uint64_t)poolData, (uint64_t)node_v);
     ::operator delete(node_v);
 }
 
-#endif  // !defined(SK_BUILD_FOR_IOS)...
-
 }  // namespace SkSL