Virtual PLSRenderContextImpl
diff --git a/include/rive/pls/buffer_ring.hpp b/include/rive/pls/buffer_ring.hpp
index e099a2e..602cd90 100644
--- a/include/rive/pls/buffer_ring.hpp
+++ b/include/rive/pls/buffer_ring.hpp
@@ -27,6 +27,7 @@
 
     size_t capacity() const { return m_capacity; }
     size_t itemSizeInBytes() const { return m_itemSizeInBytes; }
+    size_t totalSizeInBytes() const { return m_capacity * m_itemSizeInBytes * kBufferRingSize; }
 
     // Maps the next buffer in the ring.
     void* mapBuffer()
@@ -168,117 +169,4 @@
 private:
     std::unique_ptr<char[]> m_cpuBuffers[kBufferRingSize];
 };
-
-// Wrapper for an abstract BufferRingImpl that supports mapping buffers, writing an array of items
-// of the same type, and submitting for rendering.
-//
-// Intended usage pattern:
-//
-//  * Call ensureMapped() to map the next buffer in the ring.
-//  * push() all items for rendering.
-//  * Call submit() to unmap and submit the currently-mapped buffer for rendering, in whatever way
-//    that is meaningful for the PLSRenderContext implementation.
-//
-template <typename T> class BufferRing
-{
-public:
-    BufferRing() = default;
-    BufferRing(std::unique_ptr<BufferRingImpl> impl) { reset(std::move(impl)); }
-    BufferRing(BufferRing&& other) : m_impl(std::move(other.m_impl)) {}
-
-    void reset(std::unique_ptr<BufferRingImpl> impl)
-    {
-        assert(!mapped());
-        assert(impl->itemSizeInBytes() == sizeof(T));
-        m_impl = std::move(impl);
-    }
-
-    size_t totalSizeInBytes() const
-    {
-        return m_impl ? kBufferRingSize * m_impl->capacity() * m_impl->itemSizeInBytes() : 0;
-    }
-
-    size_t capacity() const { return m_impl->capacity(); }
-
-    // Maps the next buffer in the ring, if one is not already mapped.
-    RIVE_ALWAYS_INLINE void ensureMapped()
-    {
-        if (!mapped())
-        {
-            m_mappedMemory = m_nextMappedItem = reinterpret_cast<T*>(m_impl->mapBuffer());
-            m_mappingEnd = m_mappedMemory + m_impl->capacity();
-        }
-    }
-
-    const BufferRingImpl* impl() const { return m_impl.get(); }
-    BufferRingImpl* impl() { return m_impl.get(); }
-
-    // Is a buffer not mapped, or, has nothing been pushed yet to the currently-mapped buffer?
-    size_t empty() const
-    {
-        assert(!m_mappedMemory == !m_nextMappedItem);
-        return m_mappedMemory == m_nextMappedItem;
-    }
-
-    // How many bytes have been written to the currently-mapped buffer?
-    // (Returns 0 if no buffer is mapped.)
-    size_t bytesWritten() const
-    {
-        assert(!m_mappedMemory == !m_mappingEnd);
-        return reinterpret_cast<uintptr_t>(m_nextMappedItem) -
-               reinterpret_cast<uintptr_t>(m_mappedMemory);
-    }
-
-    // Is a buffer currently mapped?
-    bool mapped() const
-    {
-        assert(!m_mappedMemory == !m_nextMappedItem && !m_mappedMemory == !m_mappingEnd);
-        return m_mappedMemory != nullptr;
-    }
-
-    // Is there room to push() itemCount items to the currently-mapped buffer?
-    bool hasRoomFor(size_t itemCount)
-    {
-        assert(mapped());
-        return m_nextMappedItem + itemCount <= m_mappingEnd;
-    }
-
-    // Append and write a new item to the currently-mapped buffer. In order to enforce the
-    // write-only requirement of a mapped buffer, this method does not return any pointers to the
-    // client.
-    template <typename... Args> RIVE_ALWAYS_INLINE void emplace_back(Args&&... args)
-    {
-        push() = {std::forward<Args>(args)...};
-    }
-    template <typename... Args> RIVE_ALWAYS_INLINE void set_back(Args&&... args)
-    {
-        push().set(std::forward<Args>(args)...);
-    }
-
-    // Called after all the data for a frame has been push()-ed to the mapped buffer. Unmaps and
-    // submits the currently-mapped buffer (if any) for GPU rendering, in whatever way that is
-    // meaningful for the PLSRenderContext implementation.
-    void submit()
-    {
-        if (mapped())
-        {
-            m_impl->unmapAndSubmitBuffer(bytesWritten());
-            m_mappingEnd = m_nextMappedItem = m_mappedMemory = nullptr;
-        }
-        assert(!mapped());
-    }
-
-private:
-    template <typename... Args> RIVE_ALWAYS_INLINE T& push()
-    {
-        assert(mapped());
-        assert(hasRoomFor(1));
-        return *m_nextMappedItem++;
-    }
-
-    std::unique_ptr<BufferRingImpl> m_impl;
-    T* m_mappedMemory = nullptr;
-    T* m_nextMappedItem = nullptr;
-    const T* m_mappingEnd = nullptr;
-};
 } // namespace rive::pls
diff --git a/include/rive/pls/d3d/pls_render_context_d3d.hpp b/include/rive/pls/d3d/pls_render_context_d3d_impl.hpp
similarity index 91%
rename from include/rive/pls/d3d/pls_render_context_d3d.hpp
rename to include/rive/pls/d3d/pls_render_context_d3d_impl.hpp
index b0c7b1c..ba0f0bf 100644
--- a/include/rive/pls/d3d/pls_render_context_d3d.hpp
+++ b/include/rive/pls/d3d/pls_render_context_d3d_impl.hpp
@@ -5,7 +5,7 @@
 #pragma once
 
 #include "rive/pls/d3d/d3d11.hpp"
-#include "rive/pls/pls_render_context.hpp"
+#include "rive/pls/pls_render_context_buffer_ring_impl.hpp"
 #include <map>
 
 namespace rive::pls
@@ -22,7 +22,7 @@
     ID3D11Texture2D* targetTexture() const { return m_targetTexture.Get(); }
 
 private:
-    friend class PLSRenderContextD3D;
+    friend class PLSRenderContextD3DImpl;
 
     PLSRenderTargetD3D(ID3D11Device*, size_t width, size_t height);
 
@@ -38,10 +38,10 @@
 };
 
 // D3D backend implementation of PLSRenderContext.
-class PLSRenderContextD3D : public PLSRenderContext
+class PLSRenderContextD3DImpl : public PLSRenderContextBufferRingImpl
 {
 public:
-    PLSRenderContextD3D(ComPtr<ID3D11Device>, ComPtr<ID3D11DeviceContext>, bool isIntel);
+    PLSRenderContextD3DImpl(ComPtr<ID3D11Device>, ComPtr<ID3D11DeviceContext>, bool isIntel);
 
     rcp<PLSRenderTargetD3D> makeRenderTarget(size_t width, size_t height);
 
@@ -64,7 +64,7 @@
     void allocateGradientTexture(size_t height) override;
     void allocateTessellationTexture(size_t height) override;
 
-    void onFlush(const FlushDescriptor&) override;
+    void flush(const PLSRenderContext::FlushDescriptor&) override;
 
     void setPipelineLayoutAndShaders(DrawType, const ShaderFeatures&);
 
diff --git a/include/rive/pls/gl/pls_render_context_gl.hpp b/include/rive/pls/gl/pls_render_context_gl_impl.hpp
similarity index 86%
rename from include/rive/pls/gl/pls_render_context_gl.hpp
rename to include/rive/pls/gl/pls_render_context_gl_impl.hpp
index 4a37f2b..7fb8294 100644
--- a/include/rive/pls/gl/pls_render_context_gl.hpp
+++ b/include/rive/pls/gl/pls_render_context_gl_impl.hpp
@@ -9,7 +9,7 @@
 #include "rive/pls/gl/gles3.hpp"
 #include "rive/pls/gl/pls_render_target_gl.hpp"
 #include "rive/pls/buffer_ring.hpp"
-#include "rive/pls/pls_render_context.hpp"
+#include "rive/pls/pls_render_context_buffer_ring_impl.hpp"
 #include <map>
 
 namespace rive::pls
@@ -19,17 +19,17 @@
 class PLSRenderTargetGL;
 
 // OpenGL backend implementation of PLSRenderContext.
-class PLSRenderContextGL : public PLSRenderContext
+class PLSRenderContextGLImpl : public PLSRenderContextBufferRingImpl
 {
 public:
-    static std::unique_ptr<PLSRenderContextGL> Make();
-    ~PLSRenderContextGL() override;
+    static std::unique_ptr<PLSRenderContextGLImpl> Make();
+    ~PLSRenderContextGLImpl() override;
 
     // Creates a PLSRenderTarget that draws directly into the given GL framebuffer.
     // Returns null if the framebuffer doesn't support pixel local storage.
     rcp<PLSRenderTargetGL> wrapGLRenderTarget(GLuint framebufferID, size_t width, size_t height)
     {
-        return m_plsImpl->wrapGLRenderTarget(framebufferID, width, height, m_platformFeatures);
+        return m_plsImpl->wrapGLRenderTarget(framebufferID, width, height, platformFeatures());
     }
 
     // Creates a PLSRenderTarget that draws to a new, offscreen GL framebuffer. This method is
@@ -37,7 +37,7 @@
     // results.
     rcp<PLSRenderTargetGL> makeOffscreenRenderTarget(size_t width, size_t height)
     {
-        return m_plsImpl->makeOffscreenRenderTarget(width, height, m_platformFeatures);
+        return m_plsImpl->makeOffscreenRenderTarget(width, height, platformFeatures());
     }
 
 private:
@@ -55,11 +55,9 @@
                                                                  size_t height,
                                                                  const PlatformFeatures&) = 0;
 
-        virtual void activatePixelLocalStorage(PLSRenderContextGL*,
-                                               const PLSRenderTargetGL*,
-                                               LoadAction,
-                                               bool needsClipBuffer) = 0;
-        virtual void deactivatePixelLocalStorage(PLSRenderContextGL*) = 0;
+        virtual void activatePixelLocalStorage(PLSRenderContextGLImpl*,
+                                               const PLSRenderContext::FlushDescriptor&) = 0;
+        virtual void deactivatePixelLocalStorage(PLSRenderContextGLImpl*) = 0;
 
         virtual const char* shaderDefineName() const = 0;
 
@@ -105,7 +103,7 @@
     public:
         DrawProgram(const DrawProgram&) = delete;
         DrawProgram& operator=(const DrawProgram&) = delete;
-        DrawProgram(PLSRenderContextGL*, DrawType, const ShaderFeatures&);
+        DrawProgram(PLSRenderContextGLImpl*, DrawType, const ShaderFeatures&);
         ~DrawProgram();
 
         GLuint id() const { return m_id; }
@@ -118,12 +116,7 @@
 
     class DrawShader;
 
-    PLSRenderContextGL(const PlatformFeatures&, GLExtensions, std::unique_ptr<PLSImpl>);
-
-    const PLSRenderTargetGL* renderTarget() const
-    {
-        return static_cast<const PLSRenderTargetGL*>(frameDescriptor().renderTarget.get());
-    }
+    PLSRenderContextGLImpl(const PlatformFeatures&, GLExtensions, std::unique_ptr<PLSImpl>);
 
     std::unique_ptr<BufferRingImpl> makeVertexBufferRing(size_t capacity,
                                                          size_t itemSizeInBytes) override;
@@ -143,7 +136,7 @@
     void allocateGradientTexture(size_t height) override;
     void allocateTessellationTexture(size_t height) override;
 
-    void onFlush(const FlushDescriptor&) override;
+    void flush(const PLSRenderContext::FlushDescriptor&) override;
 
     // GL state wrapping.
     void bindProgram(GLuint);
diff --git a/include/rive/pls/gl/pls_render_target_gl.hpp b/include/rive/pls/gl/pls_render_target_gl.hpp
index 17346b6..dc6664d 100644
--- a/include/rive/pls/gl/pls_render_target_gl.hpp
+++ b/include/rive/pls/gl/pls_render_target_gl.hpp
@@ -24,7 +24,7 @@
     GLuint sideFramebufferID() const { return m_sideFramebufferID; }
 
 private:
-    friend class PLSRenderContextGL;
+    friend class PLSRenderContextGLImpl;
 
     // Creates a render target that draws to an existing GL framebuffer. The caller must also call
     // allocateCoverageBackingTextures() and attach those textures to the framebuffer if needed.
diff --git a/include/rive/pls/pls.hpp b/include/rive/pls/pls.hpp
index 30c6ee1..863f88e 100644
--- a/include/rive/pls/pls.hpp
+++ b/include/rive/pls/pls.hpp
@@ -12,6 +12,11 @@
 #include "rive/shapes/paint/color.hpp"
 #include "rive/shapes/paint/stroke_join.hpp"
 
+namespace rive
+{
+class GrInnerFanTriangulator;
+}
+
 // This header defines constants and data structures for Rive's pixel local storage path rendering
 // algorithm.
 //
@@ -62,6 +67,7 @@
 // In order to support WebGL2, we implement the path data buffer as a texture.
 constexpr static size_t kPathTextureWidthInItems = 128;
 constexpr static size_t kPathTexelsPerItem = 3;
+constexpr static size_t kPathTextureWidthInTexels = kPathTextureWidthInItems * kPathTexelsPerItem;
 
 // Each contour has its own unique ID, which it uses to index a data record containing per-contour
 // information. This value is currently 16 bit.
@@ -72,6 +78,8 @@
 // In order to support WebGL2, we implement the contour data buffer as a texture.
 constexpr static size_t kContourTextureWidthInItems = 256;
 constexpr static size_t kContourTexelsPerItem = 1;
+constexpr static size_t kContourTextureWidthInTexels =
+    kContourTextureWidthInItems * kContourTexelsPerItem;
 
 // Tessellation is performed by rendering vertices into a data texture. These values define the
 // dimensions of the tessellation data texture.
@@ -389,6 +397,56 @@
 };
 static_assert(sizeof(TriangleVertex) == sizeof(float) * 3);
 
+template <typename T> class WriteOnlyMappedMemory
+{
+public:
+    WriteOnlyMappedMemory() { reset(); }
+    WriteOnlyMappedMemory(void* ptr, size_t count) { reset(ptr, count); }
+
+    void reset() { reset(nullptr, 0); }
+
+    void reset(void* ptr, size_t count)
+    {
+        m_mappedMemory = reinterpret_cast<T*>(ptr);
+        m_nextMappedItem = m_mappedMemory;
+        m_mappingEnd = m_mappedMemory + count;
+    }
+
+    operator bool() const { return m_mappedMemory; }
+
+    // How many bytes have been written to the buffer?
+    size_t bytesWritten() const
+    {
+        return reinterpret_cast<uintptr_t>(m_nextMappedItem) -
+               reinterpret_cast<uintptr_t>(m_mappedMemory);
+    }
+
+    // Is there room to push() itemCount items to the buffer?
+    bool hasRoomFor(size_t itemCount) { return m_nextMappedItem + itemCount <= m_mappingEnd; }
+
+    // Append and write a new item to the buffer. In order to enforce the write-only requirement of
+    // a mapped buffer, these methods do not return any pointers to the client.
+    template <typename... Args> RIVE_ALWAYS_INLINE void emplace_back(Args&&... args)
+    {
+        push() = {std::forward<Args>(args)...};
+    }
+    template <typename... Args> RIVE_ALWAYS_INLINE void set_back(Args&&... args)
+    {
+        push().set(std::forward<Args>(args)...);
+    }
+
+private:
+    template <typename... Args> RIVE_ALWAYS_INLINE T& push()
+    {
+        assert(hasRoomFor(1));
+        return *m_nextMappedItem++;
+    }
+
+    T* m_mappedMemory;
+    T* m_nextMappedItem;
+    const T* m_mappingEnd;
+};
+
 // Once all curves in a contour have been tessellated, we render the tessellated vertices in
 // "patches" (aka specific instanced geometry).
 //
@@ -482,6 +540,135 @@
 void GeneratePatchBufferData(PatchVertex[kPatchVertexBufferCount],
                              uint16_t indices[kPatchIndexBufferCount]);
 
+enum class DrawType : uint8_t
+{
+    midpointFanPatches, // Standard paths and/or strokes.
+    outerCurvePatches,  // Just the outer curves of a path; the interior will be triangulated.
+    interiorTriangulation
+};
+
+constexpr static uint32_t PatchSegmentSpan(DrawType drawType)
+{
+    switch (drawType)
+    {
+        case DrawType::midpointFanPatches:
+            return kMidpointFanPatchSegmentSpan;
+        case DrawType::outerCurvePatches:
+            return kOuterCurvePatchSegmentSpan;
+        default:
+            RIVE_UNREACHABLE();
+    }
+}
+
+constexpr static uint32_t PatchIndexCount(DrawType drawType)
+{
+    switch (drawType)
+    {
+        case DrawType::midpointFanPatches:
+            return kMidpointFanPatchIndexCount;
+        case DrawType::outerCurvePatches:
+            return kOuterCurvePatchIndexCount;
+        default:
+            RIVE_UNREACHABLE();
+    }
+}
+
+constexpr static uintptr_t PatchBaseIndex(DrawType drawType)
+{
+    switch (drawType)
+    {
+        case DrawType::midpointFanPatches:
+            return kMidpointFanPatchBaseIndex;
+        case DrawType::outerCurvePatches:
+            return kOuterCurvePatchBaseIndex;
+        default:
+            RIVE_UNREACHABLE();
+    }
+}
+
+// Specifies what to do with the render target at the beginning of a flush.
+enum class LoadAction : bool
+{
+    clear,
+    preserveRenderTarget
+};
+
+// Indicates how much blendMode support will be needed in the "uber" draw shader.
+enum class BlendTier : uint8_t
+{
+    srcOver,     // Every draw uses srcOver.
+    advanced,    // Draws use srcOver *and* advanced blend modes, excluding HSL modes.
+    advancedHSL, // Draws use srcOver *and* advanced blend modes *and* advanced HSL modes.
+};
+
+// Used by ShaderFeatures to generate keys and source code.
+enum class SourceType : bool
+{
+    vertexOnly,
+    wholeProgram
+};
+
+// Indicates which "uber shader" features to enable in the draw shader.
+struct ShaderFeatures
+{
+    enum PreprocessorDefines : uint32_t
+    {
+        ENABLE_ADVANCED_BLEND = 1 << 0,
+        ENABLE_PATH_CLIPPING = 1 << 1,
+        ENABLE_EVEN_ODD = 1 << 2,
+        ENABLE_HSL_BLEND_MODES = 1 << 3,
+    };
+
+    // Returns a bitmask of which preprocessor macros must be defined in order to support the
+    // current feature set.
+    uint32_t getPreprocessorDefines(SourceType) const;
+
+    struct
+    {
+        BlendTier blendTier = BlendTier::srcOver;
+        bool enablePathClipping = false;
+    } programFeatures;
+
+    struct
+    {
+        bool enableEvenOdd = false;
+    } fragmentFeatures;
+};
+
+inline static uint32_t ShaderUniqueKey(SourceType sourceType,
+                                       DrawType drawType,
+                                       const ShaderFeatures& shaderFeatures)
+{
+    return (shaderFeatures.getPreprocessorDefines(sourceType) << 1) |
+           (drawType == DrawType::interiorTriangulation);
+}
+
+// Linked list of draws to be issued by the subclass during onFlush().
+struct Draw
+{
+    Draw(DrawType drawType_, uint32_t baseVertexOrInstance_) :
+        drawType(drawType_), baseVertexOrInstance(baseVertexOrInstance_)
+    {}
+    const DrawType drawType;
+    uint32_t baseVertexOrInstance;
+    uint32_t vertexOrInstanceCount = 0; // Calculated during PLSRenderContext::flush().
+    ShaderFeatures shaderFeatures;
+    GrInnerFanTriangulator* triangulator = nullptr; // Used by "interiorTriangulation" draws.
+};
+
+// Simple gradients only have 2 texels, so we write them to mapped texture memory from the CPU
+// instead of rendering them.
+struct TwoTexelRamp
+{
+    void set(const ColorInt colors[2])
+    {
+        UnpackColorToRGBA8(colors[0], colorData);
+        UnpackColorToRGBA8(colors[1], colorData + 4);
+    }
+    uint8_t colorData[8];
+};
+static_assert(sizeof(TwoTexelRamp) == 8 * sizeof(uint8_t));
+
 // Returns the smallest number that can be added to 'value', such that 'value % alignment' == 0.
 template <uint32_t Alignment> RIVE_ALWAYS_INLINE uint32_t PaddingToAlignUp(uint32_t value)
 {
diff --git a/include/rive/pls/pls_render_context.hpp b/include/rive/pls/pls_render_context.hpp
index 065b4cd..7c0e174 100644
--- a/include/rive/pls/pls_render_context.hpp
+++ b/include/rive/pls/pls_render_context.hpp
@@ -16,7 +16,6 @@
 
 namespace rive
 {
-class GrInnerFanTriangulator;
 class RawPath;
 } // namespace rive
 
@@ -26,6 +25,7 @@
 class PLSGradient;
 class PLSPaint;
 class PLSPath;
+class PLSRenderContextImpl;
 
 // Used as a key for complex gradients.
 class GradientContentKey
@@ -86,14 +86,11 @@
 class PLSRenderContext
 {
 public:
-    virtual ~PLSRenderContext();
+    PLSRenderContext(std::unique_ptr<PLSRenderContextImpl>);
+    ~PLSRenderContext();
 
-    // Specifies what to do with the render target at the beginning of a flush.
-    enum class LoadAction : bool
-    {
-        clear,
-        preserveRenderTarget
-    };
+    PLSRenderContextImpl* impl() const { return m_impl.get(); }
+    const PlatformFeatures& platformFeatures() const { return m_platformFeatures; }
 
     // Options for controlling how and where a frame is rendered.
     struct FrameDescriptor
@@ -293,11 +290,11 @@
     template <typename T> class PerFlushLinkedList
     {
     public:
-        void reset() { m_tail = m_head = nullptr; }
-
-        bool empty() const;
+        size_t count() const;
+        bool empty() const { return count() == 0; }
         T& tail() const;
         template <typename... Args> void emplace_back(PLSRenderContext* context, Args... args);
+        void reset();
 
         struct Node
         {
@@ -306,111 +303,35 @@
             Node* next = nullptr;
         };
 
-        class Iter
+        template <typename U> class Iter
         {
         public:
             Iter(Node* current) : m_current(current) {}
             bool operator!=(const Iter& other) const { return m_current != other.m_current; }
             void operator++() { m_current = m_current->next; }
-            T& operator*() { return m_current->data; }
+            U& operator*() { return m_current->data; }
 
         private:
             Node* m_current;
         };
-        Iter begin() { return {m_head}; }
-        Iter end() { return {nullptr}; }
+        Iter<T> begin() { return {m_head}; }
+        Iter<T> end() { return {nullptr}; }
+        Iter<const T> begin() const { return {m_head}; }
+        Iter<const T> end() const { return {nullptr}; }
 
     private:
         Node* m_head = nullptr;
         Node* m_tail = nullptr;
+        size_t m_count = 0;
     };
 
-protected:
-    PLSRenderContext(const PlatformFeatures&);
-
-    virtual std::unique_ptr<BufferRingImpl> makeVertexBufferRing(size_t capacity,
-                                                                 size_t itemSizeInBytes) = 0;
-
-    virtual std::unique_ptr<TexelBufferRing> makeTexelBufferRing(TexelBufferRing::Format,
-                                                                 size_t widthInItems,
-                                                                 size_t height,
-                                                                 size_t texelsPerItem,
-                                                                 int textureIdx,
-                                                                 TexelBufferRing::Filter) = 0;
-
-    virtual std::unique_ptr<BufferRingImpl> makePixelUnpackBufferRing(size_t capacity,
-                                                                      size_t itemSizeInBytes) = 0;
-
-    virtual std::unique_ptr<BufferRingImpl> makeUniformBufferRing(size_t sizeInBytes) = 0;
-
-    virtual void allocateGradientTexture(size_t height) = 0;
-    virtual void allocateTessellationTexture(size_t height) = 0;
-
-    const TexelBufferRing* pathBufferRing()
-    {
-        return static_cast<const TexelBufferRing*>(m_pathBuffer.impl());
-    }
-    const TexelBufferRing* contourBufferRing()
-    {
-        return static_cast<const TexelBufferRing*>(m_contourBuffer.impl());
-    }
-    const BufferRingImpl* simpleColorRampsBufferRing() const
-    {
-        return m_simpleColorRampsBuffer.impl();
-    }
-    const BufferRingImpl* gradSpanBufferRing() const { return m_gradSpanBuffer.impl(); }
-    const BufferRingImpl* tessSpanBufferRing() { return m_tessSpanBuffer.impl(); }
-    const BufferRingImpl* triangleBufferRing() { return m_triangleBuffer.impl(); }
-    const BufferRingImpl* uniformBufferRing() const { return m_uniformBuffer.impl(); }
-
-    virtual void onBeginFrame() {}
-
-    // Indicates how much blendMode support will be needed in the "uber" draw shader.
-    enum class BlendTier : uint8_t
-    {
-        srcOver,     // Every draw uses srcOver.
-        advanced,    // Draws use srcOver *and* advanced blend modes, excluding HSL modes.
-        advancedHSL, // Draws use srcOver *and* advanced blend modes *and* advanced HSL modes.
-    };
-
-    // Used by ShaderFeatures to generate keys and source code.
-    enum class SourceType : bool
-    {
-        vertexOnly,
-        wholeProgram
-    };
-
-    // Indicates which "uber shader" features to enable in the draw shader.
-    struct ShaderFeatures
-    {
-        enum PreprocessorDefines : uint32_t
-        {
-            ENABLE_ADVANCED_BLEND = 1 << 0,
-            ENABLE_PATH_CLIPPING = 1 << 1,
-            ENABLE_EVEN_ODD = 1 << 2,
-            ENABLE_HSL_BLEND_MODES = 1 << 3,
-        };
-
-        // Returns a bitmask of which preprocessor macros must be defined in order to support the
-        // current feature set.
-        uint32_t getPreprocessorDefines(SourceType) const;
-
-        struct
-        {
-            BlendTier blendTier = BlendTier::srcOver;
-            bool enablePathClipping = false;
-        } programFeatures;
-
-        struct
-        {
-            bool enableEvenOdd = false;
-        } fragmentFeatures;
-    };
-
+    // protected:
+public:
     struct FlushDescriptor
     {
-        FlushType flushType;
+        const PLSRenderTarget* renderTarget;
         LoadAction loadAction;
+        ColorInt clearColor = 0;
         size_t complexGradSpanCount;
         size_t tessVertexSpanCount;
         uint16_t simpleGradTexelsWidth;
@@ -419,89 +340,23 @@
         uint32_t complexGradRowsHeight;
         uint32_t tessDataHeight;
         bool needsClipBuffer;
+        bool hasTriangleVertices;
+        bool wireframe;
+        const PerFlushLinkedList<Draw>* drawList;
     };
 
-    virtual void onFlush(const FlushDescriptor&) = 0;
-
+private:
     const PlatformFeatures m_platformFeatures;
     const size_t m_maxPathID;
-
-    enum class DrawType : uint8_t
-    {
-        midpointFanPatches, // Standard paths and/or strokes.
-        outerCurvePatches,  // Just the outer curves of a path; the interior will be triangulated.
-        interiorTriangulation
-    };
-
-    constexpr static uint32_t PatchSegmentSpan(DrawType drawType)
-    {
-        switch (drawType)
-        {
-            case DrawType::midpointFanPatches:
-                return kMidpointFanPatchSegmentSpan;
-            case DrawType::outerCurvePatches:
-                return kOuterCurvePatchSegmentSpan;
-            default:
-                RIVE_UNREACHABLE();
-        }
-    }
-
-    constexpr static uint32_t PatchIndexCount(DrawType drawType)
-    {
-        switch (drawType)
-        {
-            case DrawType::midpointFanPatches:
-                return kMidpointFanPatchIndexCount;
-            case DrawType::outerCurvePatches:
-                return kOuterCurvePatchIndexCount;
-            default:
-                RIVE_UNREACHABLE();
-        }
-    }
-
-    constexpr static uintptr_t PatchBaseIndex(DrawType drawType)
-    {
-        switch (drawType)
-        {
-            case DrawType::midpointFanPatches:
-                return kMidpointFanPatchBaseIndex;
-            case DrawType::outerCurvePatches:
-                return kOuterCurvePatchBaseIndex;
-            default:
-                RIVE_UNREACHABLE();
-        }
-    }
-
-    static uint32_t ShaderUniqueKey(SourceType sourceType,
-                                    DrawType drawType,
-                                    const ShaderFeatures& shaderFeatures)
-    {
-        return (shaderFeatures.getPreprocessorDefines(sourceType) << 1) |
-               (drawType == DrawType::interiorTriangulation);
-    }
-
-    // Linked list of draws to be issued by the subclass during onFlush().
-    struct Draw
-    {
-        Draw(DrawType drawType_, uint32_t baseVertexOrInstance_) :
-            drawType(drawType_), baseVertexOrInstance(baseVertexOrInstance_)
-        {}
-        const DrawType drawType;
-        uint32_t baseVertexOrInstance;
-        uint32_t vertexOrInstanceCount = 0; // Calculated during PLSRenderContext::flush().
-        ShaderFeatures shaderFeatures;
-        GrInnerFanTriangulator* triangulator = nullptr; // Used by "interiorTriangulation" draws.
-    };
+    const std::unique_ptr<PLSRenderContextImpl> m_impl;
 
     PerFlushLinkedList<Draw> m_drawList;
-    size_t m_drawListCount = 0;
 
     // GrTriangulator provides an upper bound on the number of vertices it will emit. Triangulations
     // are not writen out until the last minute, during flush(), and this variable provides an upper
     // bound on the number of vertices that will be written.
     size_t m_maxTriangleVertexCount = 0;
 
-private:
     static BlendTier BlendTierForBlendMode(PLSBlendMode);
 
     // Allocates a horizontal span of texels in the gradient texture and schedules either a texture
@@ -551,7 +406,7 @@
 
         // Resources allocated at flush time (after we already know exactly how big they need to
         // be).
-        size_t triangleVertexBufferSize;
+        size_t triangleVertexBufferCount;
         size_t gradientTextureHeight;
         size_t tessellationTextureHeight;
 
@@ -564,8 +419,8 @@
             maxComplexGradientSpans =
                 std::max(maxComplexGradientSpans, other.maxComplexGradientSpans);
             maxTessellationSpans = std::max(maxTessellationSpans, other.maxTessellationSpans);
-            triangleVertexBufferSize =
-                std::max(triangleVertexBufferSize, other.triangleVertexBufferSize);
+            triangleVertexBufferCount =
+                std::max(triangleVertexBufferCount, other.triangleVertexBufferCount);
             gradientTextureHeight = std::max(gradientTextureHeight, other.gradientTextureHeight);
             tessellationTextureHeight =
                 std::max(tessellationTextureHeight, other.tessellationTextureHeight);
@@ -589,9 +444,9 @@
             if (maxTessellationSpans > threshold.maxTessellationSpans)
                 scaled.maxTessellationSpans =
                     static_cast<double>(maxTessellationSpans) * scaleFactor;
-            if (triangleVertexBufferSize > threshold.triangleVertexBufferSize)
-                scaled.triangleVertexBufferSize =
-                    static_cast<double>(triangleVertexBufferSize) * scaleFactor;
+            if (triangleVertexBufferCount > threshold.triangleVertexBufferCount)
+                scaled.triangleVertexBufferCount =
+                    static_cast<double>(triangleVertexBufferCount) * scaleFactor;
             if (gradientTextureHeight > threshold.gradientTextureHeight)
                 scaled.gradientTextureHeight =
                     static_cast<double>(gradientTextureHeight) * scaleFactor;
@@ -614,7 +469,7 @@
         GPUResourceLimits resetFlushTimeLimits() const
         {
             GPUResourceLimits noFlushTimeLimits = *this;
-            noFlushTimeLimits.triangleVertexBufferSize = 0;
+            noFlushTimeLimits.triangleVertexBufferCount = 0;
             noFlushTimeLimits.gradientTextureHeight = 0;
             noFlushTimeLimits.tessellationTextureHeight = 0;
             return noFlushTimeLimits;
@@ -641,29 +496,18 @@
     // whether the buffer needs to be updated at the beginning of a flush.
     FlushUniforms m_cachedUniformData{0, 0, 0, 0, 0, m_platformFeatures};
 
-    // Simple gradients only have 2 texels, so we write them to mapped texture memory from the CPU
-    // instead of rendering them.
-    struct TwoTexelRamp
-    {
-        void set(const ColorInt colors[2])
-        {
-            UnpackColorToRGBA8(colors[0], colorData);
-            UnpackColorToRGBA8(colors[1], colorData + 4);
-        }
-        uint8_t colorData[8];
-    };
-
-    BufferRing<PathData> m_pathBuffer;
-    BufferRing<ContourData> m_contourBuffer;
-    BufferRing<TwoTexelRamp> m_simpleColorRampsBuffer; // Simple gradients get written by the CPU.
-    BufferRing<GradientSpan> m_gradSpanBuffer;         // Complex gradients get rendered by the GPU.
-    BufferRing<TessVertexSpan> m_tessSpanBuffer;
-    BufferRing<TriangleVertex> m_triangleBuffer;
-    BufferRing<FlushUniforms> m_uniformBuffer;
+    WriteOnlyMappedMemory<PathData> m_pathData;
+    WriteOnlyMappedMemory<ContourData> m_contourData;
+    // Simple gradients get written by the CPU.
+    WriteOnlyMappedMemory<TwoTexelRamp> m_simpleColorRampsData;
+    // Complex gradients get rendered by the GPU.
+    WriteOnlyMappedMemory<GradientSpan> m_gradSpanData;
+    WriteOnlyMappedMemory<TessVertexSpan> m_tessSpanData;
+    WriteOnlyMappedMemory<TriangleVertex> m_triangleVertexData;
 
     // How many rows of the gradient texture are dedicated to simple (two-texel) ramps?
     // This is also the y-coordinate at which the complex color ramps begin.
-    size_t m_reservedGradTextureRowsForSimpleRamps = 0;
+    size_t m_reservedSimpleGradientRowCount = 0;
 
     // Per-frame state.
     FrameDescriptor m_frameDescriptor;
@@ -702,10 +546,11 @@
     TrivialBlockAllocator m_trivialPerFlushAllocator{kPerFlushAllocatorInitialBlockSize};
 };
 
-template <typename T> bool PLSRenderContext::PerFlushLinkedList<T>::empty() const
+template <typename T> size_t PLSRenderContext::PerFlushLinkedList<T>::count() const
 {
     assert(!!m_head == !!m_tail);
-    return m_tail == nullptr;
+    assert(!!m_tail == !!m_count);
+    return m_count;
 }
 
 template <typename T> T& PLSRenderContext::PerFlushLinkedList<T>::tail() const
@@ -729,6 +574,13 @@
         m_tail->next = node;
     }
     m_tail = node;
+    ++m_count;
 }
 
+template <typename T> void PLSRenderContext::PerFlushLinkedList<T>::reset()
+{
+    m_tail = nullptr;
+    m_head = nullptr;
+    m_count = 0;
+}
 } // namespace rive::pls
diff --git a/include/rive/pls/pls_render_context_buffer_ring_impl.hpp b/include/rive/pls/pls_render_context_buffer_ring_impl.hpp
new file mode 100644
index 0000000..d92c162
--- /dev/null
+++ b/include/rive/pls/pls_render_context_buffer_ring_impl.hpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2023 Rive
+ */
+
+#pragma once
+
+#include "rive/pls/pls_render_context_impl.hpp"
+#include "rive/pls/buffer_ring.hpp"
+
+namespace rive::pls
+{
+class PLSRenderContextBufferRingImpl : public PLSRenderContextImpl
+{
+public:
+    PLSRenderContextBufferRingImpl(const PlatformFeatures& platformFeatures)
+    {
+        m_platformFeatures = platformFeatures;
+    }
+
+    void resizePathTexture(size_t width, size_t height) override;
+    void resizeContourTexture(size_t width, size_t height) override;
+    void resizeSimpleColorRampsBuffer(size_t sizeInBytes) override;
+    void resizeGradSpanBuffer(size_t sizeInBytes) override;
+    void resizeTessVertexSpanBuffer(size_t sizeInBytes) override;
+    void resizeTriangleVertexBuffer(size_t sizeInBytes) override;
+
+    void resizeGradientTexture(size_t height) override { allocateGradientTexture(height); }
+    void resizeTessellationTexture(size_t height) override { allocateTessellationTexture(height); }
+
+    void* mapPathTexture() override { return m_pathBuffer->mapBuffer(); }
+    void* mapContourTexture() override { return m_contourBuffer->mapBuffer(); }
+    void* mapSimpleColorRampsBuffer() override { return m_simpleColorRampsBuffer->mapBuffer(); }
+    void* mapGradSpanBuffer() override { return m_gradSpanBuffer->mapBuffer(); }
+    void* mapTessVertexSpanBuffer() override { return m_tessSpanBuffer->mapBuffer(); }
+    void* mapTriangleVertexBuffer() override { return m_triangleBuffer->mapBuffer(); }
+
+    void unmapPathTexture(size_t widthWritten, size_t heightWritten) override;
+    void unmapContourTexture(size_t widthWritten, size_t heightWritten) override;
+    void unmapSimpleColorRampsBuffer(size_t bytesWritten) override;
+    void unmapGradSpanBuffer(size_t bytesWritten) override;
+    void unmapTessVertexSpanBuffer(size_t bytesWritten) override;
+    void unmapTriangleVertexBuffer(size_t bytesWritten) override;
+
+    void updateFlushUniforms(const FlushUniforms*) override;
+
+protected:
+    const TexelBufferRing* pathBufferRing() { return m_pathBuffer.get(); }
+    const TexelBufferRing* contourBufferRing() { return m_contourBuffer.get(); }
+    const BufferRingImpl* simpleColorRampsBufferRing() const
+    {
+        return m_simpleColorRampsBuffer.get();
+    }
+    const BufferRingImpl* gradSpanBufferRing() const { return m_gradSpanBuffer.get(); }
+    const BufferRingImpl* tessSpanBufferRing() { return m_tessSpanBuffer.get(); }
+    const BufferRingImpl* triangleBufferRing() { return m_triangleBuffer.get(); }
+    const BufferRingImpl* uniformBufferRing() const { return m_uniformBuffer.get(); }
+
+    virtual std::unique_ptr<BufferRingImpl> makeVertexBufferRing(size_t capacity,
+                                                                 size_t itemSizeInBytes) = 0;
+
+    virtual std::unique_ptr<TexelBufferRing> makeTexelBufferRing(TexelBufferRing::Format,
+                                                                 size_t widthInItems,
+                                                                 size_t height,
+                                                                 size_t texelsPerItem,
+                                                                 int textureIdx,
+                                                                 TexelBufferRing::Filter) = 0;
+
+    virtual std::unique_ptr<BufferRingImpl> makePixelUnpackBufferRing(size_t capacity,
+                                                                      size_t itemSizeInBytes) = 0;
+
+    virtual std::unique_ptr<BufferRingImpl> makeUniformBufferRing(size_t sizeInBytes) = 0;
+
+    virtual void allocateGradientTexture(size_t height) = 0;
+    virtual void allocateTessellationTexture(size_t height) = 0;
+
+private:
+    std::unique_ptr<TexelBufferRing> m_pathBuffer;
+    std::unique_ptr<TexelBufferRing> m_contourBuffer;
+    std::unique_ptr<BufferRingImpl> m_simpleColorRampsBuffer;
+    std::unique_ptr<BufferRingImpl> m_gradSpanBuffer;
+    std::unique_ptr<BufferRingImpl> m_tessSpanBuffer;
+    std::unique_ptr<BufferRingImpl> m_triangleBuffer;
+    std::unique_ptr<BufferRingImpl> m_uniformBuffer;
+};
+} // namespace rive::pls
diff --git a/include/rive/pls/pls_render_context_impl.hpp b/include/rive/pls/pls_render_context_impl.hpp
new file mode 100644
index 0000000..7ef295f
--- /dev/null
+++ b/include/rive/pls/pls_render_context_impl.hpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2023 Rive
+ */
+
+#pragma once
+
+#include "rive/pls/pls_render_context.hpp"
+
+namespace rive::pls
+{
+class PLSRenderContextImpl
+{
+public:
+    virtual ~PLSRenderContextImpl() {}
+
+    const PlatformFeatures& platformFeatures() const { return m_platformFeatures; }
+
+    virtual void prepareToMapBuffers() {}
+
+    virtual void resizePathTexture(size_t width, size_t height) = 0;
+    virtual void resizeContourTexture(size_t width, size_t height) = 0;
+    virtual void resizeSimpleColorRampsBuffer(size_t sizeInBytes) = 0;
+    virtual void resizeGradSpanBuffer(size_t sizeInBytes) = 0;
+    virtual void resizeTessVertexSpanBuffer(size_t sizeInBytes) = 0;
+    virtual void resizeTriangleVertexBuffer(size_t sizeInBytes) = 0;
+    virtual void resizeGradientTexture(size_t height) = 0;
+    virtual void resizeTessellationTexture(size_t height) = 0;
+
+    virtual void* mapPathTexture() = 0;
+    virtual void* mapContourTexture() = 0;
+    virtual void* mapSimpleColorRampsBuffer() = 0;
+    virtual void* mapGradSpanBuffer() = 0;
+    virtual void* mapTessVertexSpanBuffer() = 0;
+    virtual void* mapTriangleVertexBuffer() = 0;
+
+    virtual void unmapPathTexture(size_t widthWritten, size_t heightWritten) = 0;
+    virtual void unmapContourTexture(size_t widthWritten, size_t heightWritten) = 0;
+    virtual void unmapSimpleColorRampsBuffer(size_t bytesWritten) = 0;
+    virtual void unmapGradSpanBuffer(size_t bytesWritten) = 0;
+    virtual void unmapTessVertexSpanBuffer(size_t bytesWritten) = 0;
+    virtual void unmapTriangleVertexBuffer(size_t bytesWritten) = 0;
+
+    virtual void updateFlushUniforms(const FlushUniforms*) = 0;
+
+    virtual void flush(const PLSRenderContext::FlushDescriptor&) = 0;
+
+protected:
+    PlatformFeatures m_platformFeatures;
+};
+} // namespace rive::pls
diff --git a/obfuscator/pls_renames.h b/obfuscator/pls_renames.h
index 45283ed..d5c0ca3 100644
--- a/obfuscator/pls_renames.h
+++ b/obfuscator/pls_renames.h
@@ -57,7 +57,7 @@
 #define PLSPaint r_38
 #define PLSPath r_39
 #define PLSRenderContext r_3a
-#define PLSRenderContextGL r_3b
+#define PLSRenderContextGLImpl r_3b
 #define PLSRenderContextMetal r_3c
 #define PLSRenderTarget r_3d
 #define PLSRenderTargetGL r_3e
diff --git a/out/premake5_pls_renderer.lua b/out/premake5_pls_renderer.lua
index dd3e22b..f24dd44 100644
--- a/out/premake5_pls_renderer.lua
+++ b/out/premake5_pls_renderer.lua
@@ -121,7 +121,7 @@
     do
         files {"../renderer/gl/buffer_ring_gl.cpp",
                "../renderer/gl/gl_utils.cpp",
-               "../renderer/gl/pls_render_context_gl.cpp",
+               "../renderer/gl/pls_render_context_gl_impl.cpp",
                "../renderer/gl/pls_render_target_gl.cpp"}
     end
 
diff --git a/path_fiddle/fiddle_context_d3d.cpp b/path_fiddle/fiddle_context_d3d.cpp
index 10b6829..60498f8 100644
--- a/path_fiddle/fiddle_context_d3d.cpp
+++ b/path_fiddle/fiddle_context_d3d.cpp
@@ -2,7 +2,7 @@
 
 #include "rive/pls/pls_factory.hpp"
 #include "rive/pls/pls_renderer.hpp"
-#include "rive/pls/d3d/pls_render_context_d3d.hpp"
+#include "rive/pls/d3d/pls_render_context_d3d_impl.hpp"
 #include "rive/pls/d3d/d3d11.hpp"
 #include <array>
 #include <dxgi1_2.h>
@@ -25,7 +25,8 @@
         m_d3dFactory(std::move(d3dFactory)),
         m_gpu(std::move(gpu)),
         m_gpuContext(std::move(gpuContext)),
-        m_plsContext(new PLSRenderContextD3D(m_gpu, m_gpuContext, isIntel))
+        m_plsContext(new PLSRenderContext(
+            std::make_unique<PLSRenderContextD3DImpl>(m_gpu, m_gpuContext, isIntel)))
     {}
 
     float dpiScale() const override { return 1; }
@@ -51,7 +52,8 @@
                                                        NULL,
                                                        m_swapchain.GetAddressOf()));
 
-        m_renderTarget = m_plsContext->makeRenderTarget(width, height);
+        auto d3dContext = static_cast<PLSRenderContextD3DImpl*>(m_plsContext->impl());
+        m_renderTarget = d3dContext->makeRenderTarget(width, height);
     }
 
     void toggleZoomWindow() override {}
@@ -91,7 +93,7 @@
     ComPtr<ID3D11Device> m_gpu;
     ComPtr<ID3D11DeviceContext> m_gpuContext;
     ComPtr<IDXGISwapChain1> m_swapchain;
-    std::unique_ptr<PLSRenderContextD3D> m_plsContext;
+    std::unique_ptr<PLSRenderContext> m_plsContext;
     rcp<PLSRenderTargetD3D> m_renderTarget;
 };
 
diff --git a/path_fiddle/fiddle_context_gl.cpp b/path_fiddle/fiddle_context_gl.cpp
index 03ebb40..4da4033 100644
--- a/path_fiddle/fiddle_context_gl.cpp
+++ b/path_fiddle/fiddle_context_gl.cpp
@@ -3,7 +3,7 @@
 #include "rive/pls/gl/gles3.hpp"
 #include "rive/pls/pls_factory.hpp"
 #include "rive/pls/pls_renderer.hpp"
-#include "rive/pls/gl/pls_render_context_gl.hpp"
+#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
 #include "rive/pls/gl/pls_render_target_gl.hpp"
 
 #ifdef RIVE_WASM
@@ -274,7 +274,8 @@
 
     void onSizeChanged(int width, int height) override
     {
-        m_renderTarget = m_plsContext->makeOffscreenRenderTarget(width, height);
+        auto glContext = static_cast<PLSRenderContextGLImpl*>(m_plsContext->impl());
+        m_renderTarget = glContext->makeOffscreenRenderTarget(width, height);
     }
 
     std::unique_ptr<Renderer> makeRenderer(int width, int height) override
@@ -305,7 +306,8 @@
     void shrinkGPUResourcesToFit() final { m_plsContext->shrinkGPUResourcesToFit(); }
 
 private:
-    std::unique_ptr<PLSRenderContextGL> m_plsContext = PLSRenderContextGL::Make();
+    std::unique_ptr<PLSRenderContext> m_plsContext =
+        std::make_unique<PLSRenderContext>(PLSRenderContextGLImpl::Make());
     rcp<PLSRenderTargetGL> m_renderTarget;
 };
 
diff --git a/path_fiddle/fiddle_context_metal.mm b/path_fiddle/fiddle_context_metal.mm
index e77ca29..01c107d 100644
--- a/path_fiddle/fiddle_context_metal.mm
+++ b/path_fiddle/fiddle_context_metal.mm
@@ -2,7 +2,7 @@
 
 #include "rive/pls/pls_factory.hpp"
 #include "rive/pls/pls_renderer.hpp"
-#include "rive/pls/gl/pls_render_context_gl.hpp"
+#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
 #include "rive/pls/gl/pls_render_target_gl.hpp"
 #include "rive/pls/metal/pls_render_context_metal.h"
 #include "rive/pls/metal/pls_render_target_metal.h"
diff --git a/renderer/d3d/pls_render_context_d3d.cpp b/renderer/d3d/pls_render_context_d3d_impl.cpp
similarity index 95%
rename from renderer/d3d/pls_render_context_d3d.cpp
rename to renderer/d3d/pls_render_context_d3d_impl.cpp
index 7f1ef56..4cbee67 100644
--- a/renderer/d3d/pls_render_context_d3d.cpp
+++ b/renderer/d3d/pls_render_context_d3d_impl.cpp
@@ -2,7 +2,7 @@
  * Copyright 2023 Rive
  */
 
-#include "rive/pls/d3d/pls_render_context_d3d.hpp"
+#include "rive/pls/d3d/pls_render_context_d3d_impl.hpp"
 
 #include <D3DCompiler.h>
 #include <sstream>
@@ -151,10 +151,10 @@
     return platformFeatures;
 }
 
-PLSRenderContextD3D::PLSRenderContextD3D(ComPtr<ID3D11Device> gpu,
-                                         ComPtr<ID3D11DeviceContext> gpuContext,
-                                         bool isIntel) :
-    PLSRenderContext(platform_features_d3d()),
+PLSRenderContextD3DImpl::PLSRenderContextD3DImpl(ComPtr<ID3D11Device> gpu,
+                                                 ComPtr<ID3D11DeviceContext> gpuContext,
+                                                 bool isIntel) :
+    PLSRenderContextBufferRingImpl(platform_features_d3d()),
     m_isIntel(isIntel),
     m_gpu(gpu),
     m_gpuContext(gpuContext)
@@ -345,7 +345,6 @@
         desc.Usage = D3D11_USAGE_DEFAULT;
         desc.BindFlags = bindFlags;
         desc.CPUAccessFlags = 0;
-        desc.StructureByteStride = itemSizeInBytes;
 
         for (size_t i = 0; i < kBufferRingSize; ++i)
         {
@@ -383,8 +382,9 @@
     ComPtr<ID3D11Buffer> m_buffers[kBufferRingSize];
 };
 
-std::unique_ptr<BufferRingImpl> PLSRenderContextD3D::makeVertexBufferRing(size_t capacity,
-                                                                          size_t itemSizeInBytes)
+std::unique_ptr<BufferRingImpl> PLSRenderContextD3DImpl::makeVertexBufferRing(
+    size_t capacity,
+    size_t itemSizeInBytes)
 {
     return std::make_unique<BufferRingD3D>(m_gpu.Get(),
                                            m_gpuContext,
@@ -393,7 +393,7 @@
                                            D3D11_BIND_VERTEX_BUFFER);
 }
 
-std::unique_ptr<BufferRingImpl> PLSRenderContextD3D::makePixelUnpackBufferRing(
+std::unique_ptr<BufferRingImpl> PLSRenderContextD3DImpl::makePixelUnpackBufferRing(
     size_t capacity,
     size_t itemSizeInBytes)
 {
@@ -401,7 +401,8 @@
     return std::make_unique<CPUOnlyBufferRing>(capacity, itemSizeInBytes);
 }
 
-std::unique_ptr<BufferRingImpl> PLSRenderContextD3D::makeUniformBufferRing(size_t itemSizeInBytes)
+std::unique_ptr<BufferRingImpl> PLSRenderContextD3DImpl::makeUniformBufferRing(
+    size_t itemSizeInBytes)
 {
     return std::make_unique<BufferRingD3D>(m_gpu.Get(),
                                            m_gpuContext,
@@ -456,7 +457,7 @@
     ComPtr<ID3D11ShaderResourceView> m_srvs[kBufferRingSize];
 };
 
-std::unique_ptr<TexelBufferRing> PLSRenderContextD3D::makeTexelBufferRing(
+std::unique_ptr<TexelBufferRing> PLSRenderContextD3DImpl::makeTexelBufferRing(
     TexelBufferRing::Format format,
     size_t widthInItems,
     size_t height,
@@ -510,12 +511,12 @@
     m_targetUAV = make_simple_2d_uav(gpu, m_targetTexture.Get(), DXGI_FORMAT_R8G8B8A8_UNORM);
 }
 
-rcp<PLSRenderTargetD3D> PLSRenderContextD3D::makeRenderTarget(size_t width, size_t height)
+rcp<PLSRenderTargetD3D> PLSRenderContextD3DImpl::makeRenderTarget(size_t width, size_t height)
 {
     return rcp(new PLSRenderTargetD3D(m_gpu.Get(), width, height));
 }
 
-void PLSRenderContextD3D::allocateGradientTexture(size_t height)
+void PLSRenderContextD3DImpl::allocateGradientTexture(size_t height)
 {
     m_gradTexture = make_simple_2d_texture(m_gpu.Get(),
                                            DXGI_FORMAT_R8G8B8A8_UNORM,
@@ -528,7 +529,7 @@
         make_simple_2d_rtv(m_gpu.Get(), m_gradTexture.Get(), DXGI_FORMAT_R8G8B8A8_UNORM);
 }
 
-void PLSRenderContextD3D::allocateTessellationTexture(size_t height)
+void PLSRenderContextD3DImpl::allocateTessellationTexture(size_t height)
 {
     m_tessTexture = make_simple_2d_texture(m_gpu.Get(),
                                            DXGI_FORMAT_R32G32B32A32_UINT,
@@ -541,8 +542,8 @@
         make_simple_2d_rtv(m_gpu.Get(), m_tessTexture.Get(), DXGI_FORMAT_R32G32B32A32_UINT);
 }
 
-void PLSRenderContextD3D::setPipelineLayoutAndShaders(DrawType drawType,
-                                                      const ShaderFeatures& shaderFeatures)
+void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType,
+                                                          const ShaderFeatures& shaderFeatures)
 {
     uint32_t vertexShaderKey = ShaderUniqueKey(SourceType::vertexOnly, drawType, shaderFeatures);
     auto vertexEntry = m_drawVertexShaders.find(vertexShaderKey);
@@ -670,16 +671,15 @@
     return static_cast<const TexelBufferD3D*>(texelBufferRing)->submittedSRV();
 }
 
-void PLSRenderContextD3D::onFlush(const FlushDescriptor& desc)
+void PLSRenderContextD3DImpl::flush(const PLSRenderContext::FlushDescriptor& desc)
 {
-    auto renderTarget =
-        static_cast<const PLSRenderTargetD3D*>(frameDescriptor().renderTarget.get());
+    auto renderTarget = static_cast<const PLSRenderTargetD3D*>(desc.renderTarget);
 
     constexpr static UINT kZero[4]{};
     if (desc.loadAction == LoadAction::clear)
     {
         float clearColor4f[4];
-        UnpackColorToRGBA32F(frameDescriptor().clearColor, clearColor4f);
+        UnpackColorToRGBA32F(desc.clearColor, clearColor4f);
         m_gpuContext->ClearUnorderedAccessViewFloat(renderTarget->m_targetUAV.Get(), clearColor4f);
     }
     m_gpuContext->ClearUnorderedAccessViewUint(renderTarget->m_coverageUAV.Get(), kZero);
@@ -803,7 +803,11 @@
     static_assert(kTriangleVertexDataSlot == 1);
     UINT vertexStrides[] = {sizeof(PatchVertex), sizeof(TriangleVertex)};
     UINT vertexOffsets[] = {0, 0};
-    m_gpuContext->IASetVertexBuffers(0, 2, vertexBuffers, vertexStrides, vertexOffsets);
+    m_gpuContext->IASetVertexBuffers(0,
+                                     desc.hasTriangleVertices ? 2 : 1,
+                                     vertexBuffers,
+                                     vertexStrides,
+                                     vertexOffsets);
 
     ID3D11ShaderResourceView* vertexTextureViews[] = {m_tessTextureSRV.Get(),
                                                       submitted_srv(pathBufferRing()),
@@ -820,14 +824,14 @@
                                0,
                                1};
     m_gpuContext->RSSetViewports(1, &viewport);
-    if (frameDescriptor().wireframe)
+    if (desc.wireframe)
     {
         m_gpuContext->RSSetState(m_debugWireframeState.Get());
     }
 
     m_gpuContext->PSSetShaderResources(kGradTextureIdx, 1, m_gradTextureSRV.GetAddressOf());
 
-    for (const Draw& draw : m_drawList)
+    for (const Draw& draw : *desc.drawList)
     {
         if (draw.vertexOrInstanceCount == 0)
         {
@@ -857,7 +861,7 @@
         }
     }
 
-    if (frameDescriptor().wireframe)
+    if (desc.wireframe)
     {
         m_gpuContext->RSSetState(m_rasterState.Get());
     }
diff --git a/renderer/gl/pls_impl_ext_native.cpp b/renderer/gl/pls_impl_ext_native.cpp
index 94c11d4..6bfde49 100644
--- a/renderer/gl/pls_impl_ext_native.cpp
+++ b/renderer/gl/pls_impl_ext_native.cpp
@@ -2,7 +2,7 @@
  * Copyright 2023 Rive
  */
 
-#include "rive/pls/gl/pls_render_context_gl.hpp"
+#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
 
 #include "gl/gl_utils.hpp"
 #include "rive/math/simd.hpp"
@@ -86,7 +86,7 @@
     GLint m_clearColorUniLocation = -1;
 };
 
-class PLSRenderContextGL::PLSImplEXTNative : public PLSRenderContextGL::PLSImpl
+class PLSRenderContextGLImpl::PLSImplEXTNative : public PLSRenderContextGLImpl::PLSImpl
 {
 public:
     PLSImplEXTNative(const GLExtensions& extensions) : m_extensions(extensions)
@@ -120,30 +120,29 @@
         return rcp(new PLSRenderTargetGL(width, height, platformFeatures));
     }
 
-    void activatePixelLocalStorage(PLSRenderContextGL* context,
-                                   const PLSRenderTargetGL* renderTarget,
-                                   LoadAction loadAction,
-                                   bool needsClipBuffer) override
+    void activatePixelLocalStorage(PLSRenderContextGLImpl* context,
+                                   const PLSRenderContext::FlushDescriptor& desc) override
     {
         assert(context->m_extensions.EXT_shader_pixel_local_storage);
         assert(context->m_extensions.EXT_shader_framebuffer_fetch ||
                context->m_extensions.ARM_shader_framebuffer_fetch);
 
+        auto renderTarget = static_cast<const PLSRenderTargetGL*>(desc.renderTarget);
         glBindFramebuffer(GL_FRAMEBUFFER, renderTarget->drawFramebufferID());
         glEnable(GL_SHADER_PIXEL_LOCAL_STORAGE_EXT);
 
         uint32_t ops = loadstoreops::kClearCoverage;
         float clearColor4f[4];
-        if (loadAction == LoadAction::clear)
+        if (desc.loadAction == LoadAction::clear)
         {
-            UnpackColorToRGBA32F(context->frameDescriptor().clearColor, clearColor4f);
+            UnpackColorToRGBA32F(desc.clearColor, clearColor4f);
             ops |= loadstoreops::kClearColor;
         }
         else
         {
             ops |= loadstoreops::kLoadColor;
         }
-        if (needsClipBuffer)
+        if (desc.needsClipBuffer)
         {
             ops |= loadstoreops::kClearClip;
         }
@@ -171,7 +170,7 @@
         }
     }
 
-    void deactivatePixelLocalStorage(PLSRenderContextGL* context) override
+    void deactivatePixelLocalStorage(PLSRenderContextGLImpl* context) override
     {
         // Issue a fullscreen draw that transfers the color information in pixel local storage to
         // the main framebuffer.
@@ -195,7 +194,7 @@
     GLuint m_plsLoadStoreVAO = 0;
 };
 
-std::unique_ptr<PLSRenderContextGL::PLSImpl> PLSRenderContextGL::MakePLSImplEXTNative(
+std::unique_ptr<PLSRenderContextGLImpl::PLSImpl> PLSRenderContextGLImpl::MakePLSImplEXTNative(
     const GLExtensions& extensions)
 {
     return std::make_unique<PLSImplEXTNative>(extensions);
diff --git a/renderer/gl/pls_impl_framebuffer_fetch.cpp b/renderer/gl/pls_impl_framebuffer_fetch.cpp
index 7454cd6..79a65f0 100644
--- a/renderer/gl/pls_impl_framebuffer_fetch.cpp
+++ b/renderer/gl/pls_impl_framebuffer_fetch.cpp
@@ -2,7 +2,7 @@
  * Copyright 2023 Rive
  */
 
-#include "rive/pls/gl/pls_render_context_gl.hpp"
+#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
 
 #include "rive/pls/gl/pls_render_target_gl.hpp"
 
@@ -15,7 +15,7 @@
                                               GL_COLOR_ATTACHMENT2,
                                               GL_COLOR_ATTACHMENT3};
 
-class PLSRenderContextGL::PLSImplFramebufferFetch : public PLSRenderContextGL::PLSImpl
+class PLSRenderContextGLImpl::PLSImplFramebufferFetch : public PLSRenderContextGLImpl::PLSImpl
 {
 public:
     PLSImplFramebufferFetch(const GLExtensions& extensions) : m_extensions(extensions) {}
@@ -47,13 +47,12 @@
         return renderTarget;
     }
 
-    void activatePixelLocalStorage(PLSRenderContextGL* context,
-                                   const PLSRenderTargetGL* renderTarget,
-                                   LoadAction loadAction,
-                                   bool needsClipBuffer) override
+    void activatePixelLocalStorage(PLSRenderContextGLImpl* context,
+                                   const PLSRenderContext::FlushDescriptor& desc) override
     {
         assert(context->m_extensions.EXT_shader_framebuffer_fetch);
 
+        auto renderTarget = static_cast<const PLSRenderTargetGL*>(desc.renderTarget);
         glBindFramebuffer(GL_FRAMEBUFFER, renderTarget->drawFramebufferID());
 
         // Enable multiple render targets, with a draw buffer for each PLS plane.
@@ -63,26 +62,26 @@
         // exception of the color buffer after an intermediate flush.
         static_assert(kFramebufferPlaneIdx == 0);
         glInvalidateFramebuffer(GL_FRAMEBUFFER,
-                                loadAction == LoadAction::clear ? 4 : 3,
-                                loadAction == LoadAction::clear ? kPLSDrawBuffers
-                                                                : kPLSDrawBuffers + 1);
+                                desc.loadAction == LoadAction::clear ? 4 : 3,
+                                desc.loadAction == LoadAction::clear ? kPLSDrawBuffers
+                                                                     : kPLSDrawBuffers + 1);
 
         // Clear the PLS planes.
         constexpr static uint32_t kZero[4]{};
-        if (loadAction == LoadAction::clear)
+        if (desc.loadAction == LoadAction::clear)
         {
             float clearColor4f[4];
-            UnpackColorToRGBA32F(context->frameDescriptor().clearColor, clearColor4f);
+            UnpackColorToRGBA32F(desc.clearColor, clearColor4f);
             glClearBufferfv(GL_COLOR, kFramebufferPlaneIdx, clearColor4f);
         }
         glClearBufferuiv(GL_COLOR, kCoveragePlaneIdx, kZero);
-        if (needsClipBuffer)
+        if (desc.needsClipBuffer)
         {
             glClearBufferuiv(GL_COLOR, kClipPlaneIdx, kZero);
         }
     }
 
-    void deactivatePixelLocalStorage(PLSRenderContextGL*) override
+    void deactivatePixelLocalStorage(PLSRenderContextGLImpl*) override
     {
         // Instruct the driver not to flush PLS contents from tiled memory, with the exception of
         // the color buffer.
@@ -124,8 +123,8 @@
     const GLExtensions m_extensions;
 };
 
-std::unique_ptr<PLSRenderContextGL::PLSImpl> PLSRenderContextGL::MakePLSImplFramebufferFetch(
-    const GLExtensions& extensions)
+std::unique_ptr<PLSRenderContextGLImpl::PLSImpl> PLSRenderContextGLImpl::
+    MakePLSImplFramebufferFetch(const GLExtensions& extensions)
 {
     return std::make_unique<PLSImplFramebufferFetch>(extensions);
 }
diff --git a/renderer/gl/pls_impl_rw_texture.cpp b/renderer/gl/pls_impl_rw_texture.cpp
index 383d240..353f510 100644
--- a/renderer/gl/pls_impl_rw_texture.cpp
+++ b/renderer/gl/pls_impl_rw_texture.cpp
@@ -2,7 +2,7 @@
  * Copyright 2023 Rive
  */
 
-#include "rive/pls/gl/pls_render_context_gl.hpp"
+#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
 
 #include "../out/obj/generated/glsl.exports.h"
 
@@ -13,7 +13,7 @@
                                               GL_COLOR_ATTACHMENT2,
                                               GL_COLOR_ATTACHMENT3};
 
-class PLSRenderContextGL::PLSImplRWTexture : public PLSRenderContextGL::PLSImpl
+class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl::PLSImpl
 {
     rcp<PLSRenderTargetGL> wrapGLRenderTarget(GLuint framebufferID,
                                               size_t width,
@@ -39,22 +39,22 @@
         return renderTarget;
     }
 
-    void activatePixelLocalStorage(PLSRenderContextGL* context,
-                                   const PLSRenderTargetGL* renderTarget,
-                                   LoadAction loadAction,
-                                   bool needsClipBuffer) override
+    void activatePixelLocalStorage(PLSRenderContextGLImpl*,
+                                   const PLSRenderContext::FlushDescriptor& desc) override
     {
+        auto renderTarget = static_cast<const PLSRenderTargetGL*>(desc.renderTarget);
+
         // Clear the necessary textures.
         constexpr static GLuint kZero[4]{};
         glBindFramebuffer(GL_FRAMEBUFFER, renderTarget->sideFramebufferID());
-        if (loadAction == LoadAction::clear)
+        if (desc.loadAction == LoadAction::clear)
         {
             float clearColor4f[4];
-            UnpackColorToRGBA32F(context->frameDescriptor().clearColor, clearColor4f);
+            UnpackColorToRGBA32F(desc.clearColor, clearColor4f);
             glClearBufferfv(GL_COLOR, kFramebufferPlaneIdx, clearColor4f);
         }
         glClearBufferuiv(GL_COLOR, kCoveragePlaneIdx, kZero);
-        if (needsClipBuffer)
+        if (desc.needsClipBuffer)
         {
             glClearBufferuiv(GL_COLOR, kClipPlaneIdx, kZero);
         }
@@ -81,7 +81,7 @@
                            0,
                            GL_READ_WRITE,
                            GL_RGBA8);
-        if (needsClipBuffer)
+        if (desc.needsClipBuffer)
         {
             glBindImageTexture(kClipPlaneIdx,
                                renderTarget->m_clipTextureID,
@@ -96,7 +96,7 @@
         glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
     }
 
-    void deactivatePixelLocalStorage(PLSRenderContextGL*) override
+    void deactivatePixelLocalStorage(PLSRenderContextGLImpl*) override
     {
         glMemoryBarrier(GL_ALL_BARRIER_BITS);
     }
@@ -106,7 +106,7 @@
     void onBarrier() override { return glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); }
 };
 
-std::unique_ptr<PLSRenderContextGL::PLSImpl> PLSRenderContextGL::MakePLSImplRWTexture()
+std::unique_ptr<PLSRenderContextGLImpl::PLSImpl> PLSRenderContextGLImpl::MakePLSImplRWTexture()
 {
     return std::make_unique<PLSImplRWTexture>();
 }
diff --git a/renderer/gl/pls_impl_webgl.cpp b/renderer/gl/pls_impl_webgl.cpp
index 7a91781..512c79d 100644
--- a/renderer/gl/pls_impl_webgl.cpp
+++ b/renderer/gl/pls_impl_webgl.cpp
@@ -2,7 +2,7 @@
  * Copyright 2022 Rive
  */
 
-#include "rive/pls/gl/pls_render_context_gl.hpp"
+#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
 
 #include <stdio.h>
 
@@ -15,7 +15,7 @@
 
 namespace rive::pls
 {
-class PLSRenderContextGL::PLSImplWebGL : public PLSRenderContextGL::PLSImpl
+class PLSRenderContextGLImpl::PLSImplWebGL : public PLSRenderContextGLImpl::PLSImpl
 {
     rcp<PLSRenderTargetGL> wrapGLRenderTarget(GLuint framebufferID,
                                               size_t width,
@@ -53,30 +53,29 @@
         return renderTarget;
     }
 
-    void activatePixelLocalStorage(PLSRenderContextGL* context,
-                                   const PLSRenderTargetGL* renderTarget,
-                                   LoadAction loadAction,
-                                   bool needsClipBuffer) override
+    void activatePixelLocalStorage(PLSRenderContextGLImpl*,
+                                   const PLSRenderContext::FlushDescriptor& desc) override
     {
+        auto renderTarget = static_cast<const PLSRenderTargetGL*>(desc.renderTarget);
         glBindFramebuffer(GL_FRAMEBUFFER, renderTarget->drawFramebufferID());
 
-        if (loadAction == LoadAction::clear)
+        if (desc.loadAction == LoadAction::clear)
         {
             float clearColor4f[4];
-            UnpackColorToRGBA32F(context->frameDescriptor().clearColor, clearColor4f);
+            UnpackColorToRGBA32F(desc.clearColor, clearColor4f);
             glFramebufferPixelLocalClearValuefvWEBGL(kFramebufferPlaneIdx, clearColor4f);
         }
 
-        GLenum loadOps[4] = {(GLenum)(loadAction == LoadAction::clear ? GL_LOAD_OP_CLEAR_WEBGL
-                                                                      : GL_LOAD_OP_LOAD_WEBGL),
+        GLenum loadOps[4] = {(GLenum)(desc.loadAction == LoadAction::clear ? GL_LOAD_OP_CLEAR_WEBGL
+                                                                           : GL_LOAD_OP_LOAD_WEBGL),
                              GL_LOAD_OP_ZERO_WEBGL,
                              GL_DONT_CARE,
-                             (GLenum)(needsClipBuffer ? GL_LOAD_OP_ZERO_WEBGL : GL_DONT_CARE)};
+                             (GLenum)(desc.needsClipBuffer ? GL_LOAD_OP_ZERO_WEBGL : GL_DONT_CARE)};
 
         glBeginPixelLocalStorageWEBGL(4, loadOps);
     }
 
-    void deactivatePixelLocalStorage(PLSRenderContextGL*) override
+    void deactivatePixelLocalStorage(PLSRenderContextGLImpl*) override
     {
         constexpr static GLenum kStoreOps[4] = {GL_STORE_OP_STORE_WEBGL,
                                                 GL_DONT_CARE,
@@ -88,7 +87,7 @@
     const char* shaderDefineName() const override { return GLSL_PLS_IMPL_WEBGL; }
 };
 
-std::unique_ptr<PLSRenderContextGL::PLSImpl> PLSRenderContextGL::MakePLSImplWebGL()
+std::unique_ptr<PLSRenderContextGLImpl::PLSImpl> PLSRenderContextGLImpl::MakePLSImplWebGL()
 {
     return std::make_unique<PLSImplWebGL>();
 }
diff --git a/renderer/gl/pls_render_context_gl.cpp b/renderer/gl/pls_render_context_gl_impl.cpp
similarity index 88%
rename from renderer/gl/pls_render_context_gl.cpp
rename to renderer/gl/pls_render_context_gl_impl.cpp
index 48445dd..70c0e01 100644
--- a/renderer/gl/pls_render_context_gl.cpp
+++ b/renderer/gl/pls_render_context_gl_impl.cpp
@@ -2,7 +2,7 @@
  * Copyright 2022 Rive
  */
 
-#include "rive/pls/gl/pls_render_context_gl.hpp"
+#include "rive/pls/gl/pls_render_context_gl_impl.hpp"
 
 #include "buffer_ring_gl.hpp"
 #include "gl_utils.hpp"
@@ -32,10 +32,12 @@
 });
 #endif
 
-PLSRenderContextGL::PLSRenderContextGL(const PlatformFeatures& platformFeatures,
-                                       GLExtensions extensions,
-                                       std::unique_ptr<PLSImpl> plsImpl) :
-    PLSRenderContext(platformFeatures), m_extensions(extensions), m_plsImpl(std::move(plsImpl))
+PLSRenderContextGLImpl::PLSRenderContextGLImpl(const PlatformFeatures& platformFeatures,
+                                               GLExtensions extensions,
+                                               std::unique_ptr<PLSImpl> plsImpl) :
+    PLSRenderContextBufferRingImpl(platformFeatures),
+    m_extensions(extensions),
+    m_plsImpl(std::move(plsImpl))
 
 {
     m_shaderVersionString[kShaderVersionStringBuffSize - 1] = '\0';
@@ -175,7 +177,7 @@
 #endif
 }
 
-PLSRenderContextGL::~PLSRenderContextGL()
+PLSRenderContextGLImpl::~PLSRenderContextGLImpl()
 {
     glDeleteProgram(m_colorRampProgram);
     glDeleteVertexArrays(1, &m_colorRampVAO);
@@ -192,13 +194,13 @@
     glDeleteBuffers(1, &m_patchIndicesBuffer);
 }
 
-std::unique_ptr<BufferRingImpl> PLSRenderContextGL::makeVertexBufferRing(size_t capacity,
-                                                                         size_t itemSizeInBytes)
+std::unique_ptr<BufferRingImpl> PLSRenderContextGLImpl::makeVertexBufferRing(size_t capacity,
+                                                                             size_t itemSizeInBytes)
 {
     return std::make_unique<BufferGL>(GL_ARRAY_BUFFER, capacity, itemSizeInBytes);
 }
 
-std::unique_ptr<TexelBufferRing> PLSRenderContextGL::makeTexelBufferRing(
+std::unique_ptr<TexelBufferRing> PLSRenderContextGLImpl::makeTexelBufferRing(
     TexelBufferRing::Format format,
     size_t widthInItems,
     size_t height,
@@ -214,19 +216,19 @@
                                            filter);
 }
 
-std::unique_ptr<BufferRingImpl> PLSRenderContextGL::makePixelUnpackBufferRing(
+std::unique_ptr<BufferRingImpl> PLSRenderContextGLImpl::makePixelUnpackBufferRing(
     size_t capacity,
     size_t itemSizeInBytes)
 {
     return std::make_unique<BufferGL>(GL_PIXEL_UNPACK_BUFFER, capacity, itemSizeInBytes);
 }
 
-std::unique_ptr<BufferRingImpl> PLSRenderContextGL::makeUniformBufferRing(size_t sizeInBytes)
+std::unique_ptr<BufferRingImpl> PLSRenderContextGLImpl::makeUniformBufferRing(size_t sizeInBytes)
 {
     return std::make_unique<BufferGL>(GL_UNIFORM_BUFFER, 1, sizeInBytes);
 }
 
-void PLSRenderContextGL::allocateGradientTexture(size_t height)
+void PLSRenderContextGLImpl::allocateGradientTexture(size_t height)
 {
     glDeleteTextures(1, &m_gradientTexture);
 
@@ -247,7 +249,7 @@
                            0);
 }
 
-void PLSRenderContextGL::allocateTessellationTexture(size_t height)
+void PLSRenderContextGLImpl::allocateTessellationTexture(size_t height)
 {
     glDeleteTextures(1, &m_tessVertexTexture);
 
@@ -270,13 +272,13 @@
 
 // Wraps a compiled GL shader of draw.glsl, either vertex or fragment, with a specific set of
 // features enabled via #define. The set of features to enable is dictated by ShaderFeatures.
-class PLSRenderContextGL::DrawShader
+class PLSRenderContextGLImpl::DrawShader
 {
 public:
     DrawShader(const DrawShader&) = delete;
     DrawShader& operator=(const DrawShader&) = delete;
 
-    DrawShader(PLSRenderContextGL* context,
+    DrawShader(PLSRenderContextGLImpl* context,
                GLenum shaderType,
                DrawType drawType,
                const ShaderFeatures& shaderFeatures)
@@ -317,7 +319,7 @@
                 sources.push_back(glsl::advanced_blend);
             }
         }
-        if (context->m_platformFeatures.avoidFlatVaryings)
+        if (context->platformFeatures().avoidFlatVaryings)
         {
             sources.push_back("#define " GLSL_OPTIONALLY_FLAT "\n");
         }
@@ -343,9 +345,9 @@
     GLuint m_id;
 };
 
-PLSRenderContextGL::DrawProgram::DrawProgram(PLSRenderContextGL* context,
-                                             DrawType drawType,
-                                             const ShaderFeatures& shaderFeatures)
+PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* context,
+                                                 DrawType drawType,
+                                                 const ShaderFeatures& shaderFeatures)
 {
     m_id = glCreateProgram();
 
@@ -378,14 +380,14 @@
     }
 }
 
-PLSRenderContextGL::DrawProgram::~DrawProgram() { glDeleteProgram(m_id); }
+PLSRenderContextGLImpl::DrawProgram::~DrawProgram() { glDeleteProgram(m_id); }
 
 static GLuint gl_buffer_id(const BufferRingImpl* bufferRing)
 {
     return static_cast<const BufferGL*>(bufferRing)->submittedBufferID();
 }
 
-void PLSRenderContextGL::onFlush(const FlushDescriptor& desc)
+void PLSRenderContextGLImpl::flush(const PLSRenderContext::FlushDescriptor& desc)
 {
     // All programs use the same set of per-flush uniforms.
     glBindBufferBase(GL_UNIFORM_BUFFER, 0, gl_buffer_id(uniformBufferRing()));
@@ -450,7 +452,7 @@
 
     // Compile the draw programs before activating pixel local storage.
     // (ANGLE_shader_pixel_local_storage doesn't allow shader compilation while active.)
-    for (const Draw& draw : m_drawList)
+    for (const Draw& draw : *desc.drawList)
     {
         // Compile the draw program before activating pixel local storage.
         // Cache specific compilations of draw.glsl by ShaderFeatures.
@@ -460,30 +462,28 @@
     }
 
     // Bind the currently-submitted buffer in the triangleBufferRing to its vertex array.
-    if (m_maxTriangleVertexCount > 0)
+    if (desc.hasTriangleVertices)
     {
         bindVAO(m_interiorTrianglesVAO);
         glBindBuffer(GL_ARRAY_BUFFER, gl_buffer_id(triangleBufferRing()));
         glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
     }
 
-    glViewport(0, 0, renderTarget()->width(), renderTarget()->height());
+    auto renderTarget = static_cast<const PLSRenderTargetGL*>(desc.renderTarget);
+    glViewport(0, 0, renderTarget->width(), renderTarget->height());
 
 #ifdef RIVE_DESKTOP_GL
-    if (m_extensions.ANGLE_polygon_mode && frameDescriptor().wireframe)
+    if (m_extensions.ANGLE_polygon_mode && desc.wireframe)
     {
         glPolygonModeANGLE(GL_FRONT_AND_BACK, GL_LINE_ANGLE);
         glLineWidth(2);
     }
 #endif
 
-    m_plsImpl->activatePixelLocalStorage(this,
-                                         renderTarget(),
-                                         desc.loadAction,
-                                         desc.needsClipBuffer);
+    m_plsImpl->activatePixelLocalStorage(this, desc);
 
     // Execute the DrawList.
-    for (const Draw& draw : m_drawList)
+    for (const Draw& draw : *desc.drawList)
     {
         if (draw.vertexOrInstanceCount == 0)
         {
@@ -538,14 +538,14 @@
     m_plsImpl->deactivatePixelLocalStorage(this);
 
 #ifdef RIVE_DESKTOP_GL
-    if (m_extensions.ANGLE_polygon_mode && frameDescriptor().wireframe)
+    if (m_extensions.ANGLE_polygon_mode && desc.wireframe)
     {
         glPolygonModeANGLE(GL_FRONT_AND_BACK, GL_FILL_ANGLE);
     }
 #endif
 }
 
-void PLSRenderContextGL::bindProgram(GLuint programID)
+void PLSRenderContextGLImpl::bindProgram(GLuint programID)
 {
     if (programID != m_boundProgramID)
     {
@@ -554,7 +554,7 @@
     }
 }
 
-void PLSRenderContextGL::bindVAO(GLuint vao)
+void PLSRenderContextGLImpl::bindVAO(GLuint vao)
 {
     if (vao != m_boundVAO)
     {
@@ -563,7 +563,7 @@
     }
 }
 
-std::unique_ptr<PLSRenderContextGL> PLSRenderContextGL::Make()
+std::unique_ptr<PLSRenderContextGLImpl> PLSRenderContextGLImpl::Make()
 {
     GLExtensions extensions{};
     GLint extensionCount;
@@ -665,30 +665,32 @@
     if (extensions.EXT_shader_pixel_local_storage &&
         (extensions.ARM_shader_framebuffer_fetch || extensions.EXT_shader_framebuffer_fetch))
     {
-        return std::unique_ptr<PLSRenderContextGL>(
-            new PLSRenderContextGL(platformFeatures, extensions, MakePLSImplEXTNative(extensions)));
+        return std::unique_ptr<PLSRenderContextGLImpl>(
+            new PLSRenderContextGLImpl(platformFeatures,
+                                       extensions,
+                                       MakePLSImplEXTNative(extensions)));
     }
 
     if (extensions.EXT_shader_framebuffer_fetch)
     {
-        return std::unique_ptr<PLSRenderContextGL>(
-            new PLSRenderContextGL(platformFeatures,
-                                   extensions,
-                                   MakePLSImplFramebufferFetch(extensions)));
+        return std::unique_ptr<PLSRenderContextGLImpl>(
+            new PLSRenderContextGLImpl(platformFeatures,
+                                       extensions,
+                                       MakePLSImplFramebufferFetch(extensions)));
     }
 #endif
 
 #ifdef RIVE_DESKTOP_GL
     if (extensions.ANGLE_shader_pixel_local_storage_coherent)
     {
-        return std::unique_ptr<PLSRenderContextGL>(
-            new PLSRenderContextGL(platformFeatures, extensions, MakePLSImplWebGL()));
+        return std::unique_ptr<PLSRenderContextGLImpl>(
+            new PLSRenderContextGLImpl(platformFeatures, extensions, MakePLSImplWebGL()));
     }
 
     if (extensions.ARB_fragment_shader_interlock || extensions.INTEL_fragment_shader_ordering)
     {
-        return std::unique_ptr<PLSRenderContextGL>(
-            new PLSRenderContextGL(platformFeatures, extensions, MakePLSImplRWTexture()));
+        return std::unique_ptr<PLSRenderContextGLImpl>(
+            new PLSRenderContextGLImpl(platformFeatures, extensions, MakePLSImplRWTexture()));
     }
 #endif
 
@@ -697,8 +699,8 @@
             emscripten_webgl_get_current_context()) &&
         emscripten_webgl_shader_pixel_local_storage_is_coherent())
     {
-        return std::unique_ptr<PLSRenderContextGL>(
-            new PLSRenderContextGL(platformFeatures, extensions, MakePLSImplWebGL()));
+        return std::unique_ptr<PLSRenderContextGLImpl>(
+            new PLSRenderContextGLImpl(platformFeatures, extensions, MakePLSImplWebGL()));
     }
 #endif
 
diff --git a/renderer/gr_inner_fan_triangulator.hpp b/renderer/gr_inner_fan_triangulator.hpp
index 32b90fb..1924485 100644
--- a/renderer/gr_inner_fan_triangulator.hpp
+++ b/renderer/gr_inner_fan_triangulator.hpp
@@ -48,7 +48,7 @@
     void setPathID(uint16_t pathID) { m_pathID = pathID; }
     uint16_t pathID() const { return m_pathID; }
 
-    size_t polysToTriangles(pls::BufferRing<pls::TriangleVertex>* bufferRing) const
+    size_t polysToTriangles(pls::WriteOnlyMappedMemory<pls::TriangleVertex>* bufferRing) const
 
     {
         if (m_polys == nullptr)
diff --git a/renderer/gr_triangulator.cpp b/renderer/gr_triangulator.cpp
index e1ef9e9..59ceb33 100644
--- a/renderer/gr_triangulator.cpp
+++ b/renderer/gr_triangulator.cpp
@@ -114,11 +114,11 @@
 static inline void emit_vertex(Vertex* v,
                                int winding,
                                uint16_t pathID,
-                               pls::BufferRing<pls::TriangleVertex>* bufferRing)
+                               pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory)
 {
     // GrTriangulator and pls unfortunately have opposite winding senses.
     int16_t plsWeight = -winding;
-    bufferRing->emplace_back(v->fPoint, plsWeight, pathID);
+    mappedMemory->emplace_back(v->fPoint, plsWeight, pathID);
 }
 
 static void emit_triangle(Vertex* v0,
@@ -126,22 +126,22 @@
                           Vertex* v2,
                           int winding,
                           uint16_t pathID,
-                          pls::BufferRing<pls::TriangleVertex>* bufferRing)
+                          pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory)
 {
     TESS_LOG("emit_triangle %g (%g, %g) %d\n", v0->fID, v0->fPoint.x, v0->fPoint.y, v0->fAlpha);
     TESS_LOG("              %g (%g, %g) %d\n", v1->fID, v1->fPoint.x, v1->fPoint.y, v1->fAlpha);
     TESS_LOG("              %g (%g, %g) %d\n", v2->fID, v2->fPoint.x, v2->fPoint.y, v2->fAlpha);
 #if TESSELLATOR_WIREFRAME
-    emit_vertex(v0, winding, pathID, bufferRing);
-    emit_vertex(v1, winding, pathID, bufferRing);
-    emit_vertex(v1, winding, pathID, bufferRing);
-    emit_vertex(v2, winding, pathID, bufferRing);
-    emit_vertex(v2, winding, pathID, bufferRing);
-    emit_vertex(v0, winding, pathID, bufferRing);
+    emit_vertex(v0, winding, pathID, mappedMemory);
+    emit_vertex(v1, winding, pathID, mappedMemory);
+    emit_vertex(v1, winding, pathID, mappedMemory);
+    emit_vertex(v2, winding, pathID, mappedMemory);
+    emit_vertex(v2, winding, pathID, mappedMemory);
+    emit_vertex(v0, winding, pathID, mappedMemory);
 #else
-    emit_vertex(v0, winding, pathID, bufferRing);
-    emit_vertex(v1, winding, pathID, bufferRing);
-    emit_vertex(v2, winding, pathID, bufferRing);
+    emit_vertex(v0, winding, pathID, mappedMemory);
+    emit_vertex(v1, winding, pathID, mappedMemory);
+    emit_vertex(v2, winding, pathID, mappedMemory);
 #endif
 }
 
@@ -407,10 +407,11 @@
     }
 }
 
-void GrTriangulator::emitMonotonePoly(const MonotonePoly* monotonePoly,
-                                      uint16_t pathID,
-                                      bool reverseTriangles,
-                                      pls::BufferRing<pls::TriangleVertex>* bufferRing) const
+void GrTriangulator::emitMonotonePoly(
+    const MonotonePoly* monotonePoly,
+    uint16_t pathID,
+    bool reverseTriangles,
+    pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
 {
     assert(monotonePoly->fWinding != 0);
     Edge* e = monotonePoly->fFirstEdge;
@@ -447,7 +448,7 @@
                                 monotonePoly->fWinding,
                                 pathID,
                                 reverseTriangles,
-                                bufferRing);
+                                mappedMemory);
         }
         double ax = static_cast<double>(curr->fPoint.x) - prev->fPoint.x;
         double ay = static_cast<double>(curr->fPoint.y) - prev->fPoint.y;
@@ -461,7 +462,7 @@
                          monotonePoly->fWinding,
                          pathID,
                          reverseTriangles,
-                         bufferRing);
+                         mappedMemory);
             v->fPrev->fNext = v->fNext;
             v->fNext->fPrev = v->fPrev;
             count--;
@@ -481,19 +482,20 @@
     }
 }
 
-void GrTriangulator::emitTriangle(Vertex* prev,
-                                  Vertex* curr,
-                                  Vertex* next,
-                                  int winding,
-                                  uint16_t pathID,
-                                  bool reverseTriangles,
-                                  pls::BufferRing<pls::TriangleVertex>* bufferRing) const
+void GrTriangulator::emitTriangle(
+    Vertex* prev,
+    Vertex* curr,
+    Vertex* next,
+    int winding,
+    uint16_t pathID,
+    bool reverseTriangles,
+    pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
 {
     if (reverseTriangles)
     {
         std::swap(prev, next);
     }
-    return emit_triangle(prev, curr, next, winding, pathID, bufferRing);
+    return emit_triangle(prev, curr, next, winding, pathID, mappedMemory);
 }
 
 GrTriangulator::Poly::Poly(Vertex* v, int winding) :
@@ -577,7 +579,7 @@
 void GrTriangulator::emitPoly(const Poly* poly,
                               uint16_t pathID,
                               bool reverseTriangles,
-                              pls::BufferRing<pls::TriangleVertex>* bufferRing) const
+                              pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
 {
     if (poly->fCount < 3)
     {
@@ -586,7 +588,7 @@
     TESS_LOG("emit() %d, size %d\n", poly->fID, poly->fCount);
     for (MonotonePoly* m = poly->fHead; m != nullptr; m = m->fNext)
     {
-        emitMonotonePoly(m, pathID, reverseTriangles, bufferRing);
+        emitMonotonePoly(m, pathID, reverseTriangles, mappedMemory);
     }
 }
 
@@ -2084,17 +2086,18 @@
 }
 
 // Stage 6: Triangulate the monotone polygons into a vertex buffer.
-void GrTriangulator::polysToTriangles(const Poly* polys,
-                                      FillRule overrideFillType,
-                                      uint16_t pathID,
-                                      bool reverseTriangles,
-                                      pls::BufferRing<pls::TriangleVertex>* bufferRing) const
+void GrTriangulator::polysToTriangles(
+    const Poly* polys,
+    FillRule overrideFillType,
+    uint16_t pathID,
+    bool reverseTriangles,
+    pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
 {
     for (const Poly* poly = polys; poly; poly = poly->fNext)
     {
         if (apply_fill_type(overrideFillType, poly))
         {
-            emitPoly(poly, pathID, reverseTriangles, bufferRing);
+            emitPoly(poly, pathID, reverseTriangles, mappedMemory);
         }
     }
 }
@@ -2178,11 +2181,12 @@
     return CountPoints(polys, fFillRule);
 }
 
-size_t GrTriangulator::polysToTriangles(const Poly* polys,
-                                        uint64_t maxVertexCount,
-                                        uint16_t pathID,
-                                        bool reverseTriangles,
-                                        pls::BufferRing<pls::TriangleVertex>* bufferRing) const
+size_t GrTriangulator::polysToTriangles(
+    const Poly* polys,
+    uint64_t maxVertexCount,
+    uint16_t pathID,
+    bool reverseTriangles,
+    pls::WriteOnlyMappedMemory<pls::TriangleVertex>* mappedMemory) const
 {
     if (0 == maxVertexCount || maxVertexCount > std::numeric_limits<int32_t>::max())
     {
@@ -2199,9 +2203,9 @@
     }
 #endif
 
-    size_t start = bufferRing->bytesWritten();
-    polysToTriangles(polys, fFillRule, pathID, reverseTriangles, bufferRing);
-    size_t actualCount = (bufferRing->bytesWritten() - start) / vertexStride;
+    size_t start = mappedMemory->bytesWritten();
+    polysToTriangles(polys, fFillRule, pathID, reverseTriangles, mappedMemory);
+    size_t actualCount = (mappedMemory->bytesWritten() - start) / vertexStride;
     assert(actualCount <= maxVertexCount * vertexStride);
     return actualCount;
 }
diff --git a/renderer/gr_triangulator.hpp b/renderer/gr_triangulator.hpp
index 45a4ea3..694cfe6 100644
--- a/renderer/gr_triangulator.hpp
+++ b/renderer/gr_triangulator.hpp
@@ -18,7 +18,6 @@
 #include "rive/math/vec2d.hpp"
 #include "rive/math/aabb.hpp"
 #include "rive/pls/pls.hpp"
-#include "rive/pls/buffer_ring.hpp"
 #include "rive/pls/trivial_block_allocator.hpp"
 
 namespace rive
@@ -99,7 +98,7 @@
                           FillRule overrideFillRule,
                           uint16_t pathID,
                           bool reverseTriangles,
-                          pls::BufferRing<pls::TriangleVertex>*) const;
+                          pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
 
     // The vertex sorting in step (3) is a merge sort, since it plays well with the linked list
     // of vertices (and the necessity of inserting new vertices on intersection).
@@ -148,18 +147,18 @@
     void emitMonotonePoly(const MonotonePoly*,
                           uint16_t pathID,
                           bool reverseTriangles,
-                          pls::BufferRing<pls::TriangleVertex>*) const;
+                          pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
     void emitTriangle(Vertex* prev,
                       Vertex* curr,
                       Vertex* next,
                       int winding,
                       uint16_t pathID,
                       bool reverseTriangles,
-                      pls::BufferRing<pls::TriangleVertex>*) const;
+                      pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
     void emitPoly(const Poly*,
                   uint16_t pathID,
                   bool reverseTriangles,
-                  pls::BufferRing<pls::TriangleVertex>*) const;
+                  pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
 
     Poly* makePoly(Poly** head, Vertex* v, int winding) const;
     void appendPointToContour(const Vec2D& p, VertexList* contour) const;
@@ -247,7 +246,7 @@
                             uint64_t maxVertexCount,
                             uint16_t pathID,
                             bool reverseTriangles,
-                            pls::BufferRing<pls::TriangleVertex>*) const;
+                            pls::WriteOnlyMappedMemory<pls::TriangleVertex>*) const;
 
     AABB fPathBounds;
     FillRule fFillRule;
diff --git a/renderer/metal/pls_render_context_metal.mm b/renderer/metal/pls_render_context_metal.mm
index cbf5479..240d7ea 100644
--- a/renderer/metal/pls_render_context_metal.mm
+++ b/renderer/metal/pls_render_context_metal.mm
@@ -507,13 +507,6 @@
       thisFlushLock.unlock();
     }];
 
-    if (desc.flushType == FlushType::intermediate)
-    {
-        // The frame isn't complete yet. The caller will begin preparing a new flush immediately
-        // after this method returns, so lock buffers for the next flush now.
-        lockNextBufferRingIndex();
-    }
-
     [commandBuffer commit];
 }
 } // namespace rive::pls
diff --git a/renderer/pls_render_context.cpp b/renderer/pls_render_context.cpp
index c7b6456..f93675e 100644
--- a/renderer/pls_render_context.cpp
+++ b/renderer/pls_render_context.cpp
@@ -8,6 +8,7 @@
 #include "pls_path.hpp"
 #include "pls_paint.hpp"
 #include "rive/math/math_types.hpp"
+#include "rive/pls/pls_render_context_impl.hpp"
 
 #include <string_view>
 
@@ -29,9 +30,9 @@
 
 constexpr size_t kMinTessTextureHeight = 32;
 constexpr size_t kMaxTessTextureHeight = 2048; // GL_MAX_TEXTURE_SIZE spec minimum.
-constexpr size_t kMaxTessellationVertices = kMaxTessTextureHeight * kTessTextureWidth;
+constexpr size_t kMaxTessellationVertexCount = kMaxTessTextureHeight * kTessTextureWidth;
 
-uint32_t PLSRenderContext::ShaderFeatures::getPreprocessorDefines(SourceType sourceType) const
+uint32_t ShaderFeatures::getPreprocessorDefines(SourceType sourceType) const
 {
     uint32_t defines = 0;
     if (programFeatures.blendTier != BlendTier::srcOver)
@@ -56,7 +57,7 @@
     return defines;
 }
 
-PLSRenderContext::BlendTier PLSRenderContext::BlendTierForBlendMode(PLSBlendMode blendMode)
+BlendTier PLSRenderContext::BlendTierForBlendMode(PLSBlendMode blendMode)
 {
     switch (blendMode)
     {
@@ -120,9 +121,10 @@
     return x ^ y;
 }
 
-PLSRenderContext::PLSRenderContext(const PlatformFeatures& platformFeatures) :
-    m_platformFeatures(platformFeatures),
-    m_maxPathID(MaxPathID(m_platformFeatures.pathIDGranularity))
+PLSRenderContext::PLSRenderContext(std::unique_ptr<PLSRenderContextImpl> impl) :
+    m_platformFeatures(impl->platformFeatures()),
+    m_maxPathID(MaxPathID(m_platformFeatures.pathIDGranularity)),
+    m_impl(std::move(impl))
 {}
 
 PLSRenderContext::~PLSRenderContext()
@@ -218,13 +220,6 @@
 #define COUNT_RESOURCE_SIZE(SIZE_IN_BYTES)
 #endif
 
-    // One-time allocation of the uniform buffer ring.
-    if (m_uniformBuffer.impl() == nullptr)
-    {
-        m_uniformBuffer.reset(makeUniformBufferRing(sizeof(FlushUniforms)));
-    }
-    COUNT_RESOURCE_SIZE(m_uniformBuffer.totalSizeInBytes());
-
     // Path data texture ring.
     constexpr size_t kMinPathIDCount = kPathTextureWidthInItems * 32; // 32 texels tall.
     size_t targetMaxPathID = resource_texture_height<kPathTextureWidthInItems>(targets.maxPathID) *
@@ -236,20 +231,15 @@
         resource_texture_height<kPathTextureWidthInItems>(m_currentResourceLimits.maxPathID);
     if (shouldReallocate(targetPathTextureHeight, currentPathTextureHeight))
     {
-        assert(!m_pathBuffer.mapped());
-        m_pathBuffer.reset(makeTexelBufferRing(TexelBufferRing::Format::rgba32ui,
-                                               kPathTextureWidthInItems,
-                                               targetPathTextureHeight,
-                                               kPathTexelsPerItem,
-                                               kPathTextureIdx,
-                                               TexelBufferRing::Filter::nearest));
+        assert(!m_pathData);
+        m_impl->resizePathTexture(kPathTextureWidthInTexels, targetPathTextureHeight);
         LOG_CHANGED_SIZE("path texture height",
                          currentPathTextureHeight,
                          targetPathTextureHeight,
-                         m_pathBuffer.totalSizeInBytes());
+                         m_pathBuffer->totalSizeInBytes());
         m_currentResourceLimits.maxPathID = targetMaxPathID;
     }
-    COUNT_RESOURCE_SIZE(m_pathBuffer.totalSizeInBytes());
+    COUNT_RESOURCE_SIZE(m_pathBuffer->totalSizeInBytes());
 
     // Contour data texture ring.
     constexpr size_t kMinContourIDCount = kContourTextureWidthInItems * 32; // 32 texels tall.
@@ -263,109 +253,101 @@
         resource_texture_height<kContourTextureWidthInItems>(m_currentResourceLimits.maxContourID);
     if (shouldReallocate(targetContourTextureHeight, currentContourTextureHeight))
     {
-        assert(!m_contourBuffer.mapped());
-        m_contourBuffer.reset(makeTexelBufferRing(TexelBufferRing::Format::rgba32ui,
-                                                  kContourTextureWidthInItems,
-                                                  targetContourTextureHeight,
-                                                  kContourTexelsPerItem,
-                                                  pls::kContourTextureIdx,
-                                                  TexelBufferRing::Filter::nearest));
+        assert(!m_contourData);
+        m_impl->resizeContourTexture(kContourTextureWidthInTexels, targetContourTextureHeight);
         LOG_CHANGED_SIZE("contour texture height",
                          currentContourTextureHeight,
                          targetContourTextureHeight,
-                         m_contourBuffer.totalSizeInBytes());
+                         m_contourBuffer->totalSizeInBytes());
         m_currentResourceLimits.maxContourID = targetMaxContourID;
     }
-    COUNT_RESOURCE_SIZE(m_contourBuffer.totalSizeInBytes());
+    COUNT_RESOURCE_SIZE(m_contourBuffer->totalSizeInBytes());
 
     // Simple gradient color ramp pixel unpack buffer ring.
-    size_t targetSimpleGradientRows =
+    size_t targetSimpleGradientRowCount =
         resource_texture_height<kGradTextureWidthInSimpleRamps>(targets.maxSimpleGradients);
-    targetSimpleGradientRows =
-        std::clamp(targetSimpleGradientRows, kMinSimpleColorRampRows, kMaxSimpleColorRampRows);
+    targetSimpleGradientRowCount =
+        std::clamp(targetSimpleGradientRowCount, kMinSimpleColorRampRows, kMaxSimpleColorRampRows);
     assert(m_currentResourceLimits.maxSimpleGradients % kGradTextureWidthInSimpleRamps == 0);
-    assert(m_reservedGradTextureRowsForSimpleRamps ==
+    assert(m_reservedSimpleGradientRowCount ==
            resource_texture_height<kGradTextureWidthInSimpleRamps>(
                m_currentResourceLimits.maxSimpleGradients));
-    if (shouldReallocate(targetSimpleGradientRows, m_reservedGradTextureRowsForSimpleRamps))
+    if (shouldReallocate(targetSimpleGradientRowCount, m_reservedSimpleGradientRowCount))
     {
-        assert(!m_simpleColorRampsBuffer.mapped());
-        m_simpleColorRampsBuffer.reset(
-            makePixelUnpackBufferRing(targetSimpleGradientRows * kGradTextureWidthInSimpleRamps,
-                                      sizeof(TwoTexelRamp)));
+        assert(!m_simpleColorRampsData);
+        m_impl->resizeSimpleColorRampsBuffer(targetSimpleGradientRowCount *
+                                             kGradTextureWidthInSimpleRamps * sizeof(TwoTexelRamp));
         LOG_CHANGED_SIZE("maxSimpleGradients",
-                         m_reservedGradTextureRowsForSimpleRamps * kGradTextureWidthInSimpleRamps,
-                         targetSimpleGradientRows * kGradTextureWidthInSimpleRamps,
-                         m_simpleColorRampsBuffer.totalSizeInBytes());
+                         m_reservedSimpleGradientRowCount * kGradTextureWidthInSimpleRamps,
+                         targetSimpleGradientRowCount * kGradTextureWidthInSimpleRamps,
+                         m_simpleColorRampsBuffer->totalSizeInBytes());
         m_currentResourceLimits.maxSimpleGradients =
-            targetSimpleGradientRows * kGradTextureWidthInSimpleRamps;
-        m_reservedGradTextureRowsForSimpleRamps = targetSimpleGradientRows;
+            targetSimpleGradientRowCount * kGradTextureWidthInSimpleRamps;
+        m_reservedSimpleGradientRowCount = targetSimpleGradientRowCount;
     }
-    COUNT_RESOURCE_SIZE(m_simpleColorRampsBuffer.totalSizeInBytes());
+    COUNT_RESOURCE_SIZE(m_simpleColorRampsBuffer->totalSizeInBytes());
 
     // Instance buffer ring for rendering complex gradients.
     constexpr size_t kMinComplexGradientSpans = kMinComplexGradients * 32;
     constexpr size_t kMaxComplexGradientSpans = kMaxComplexGradients * 64;
-    size_t targetComplexGradientSpans = std::clamp(targets.maxComplexGradientSpans,
-                                                   kMinComplexGradientSpans,
-                                                   kMaxComplexGradientSpans);
-    if (shouldReallocate(targetComplexGradientSpans,
+    size_t targetComplexGradientSpanCount = std::clamp(targets.maxComplexGradientSpans,
+                                                       kMinComplexGradientSpans,
+                                                       kMaxComplexGradientSpans);
+    if (shouldReallocate(targetComplexGradientSpanCount,
                          m_currentResourceLimits.maxComplexGradientSpans))
     {
-        assert(!m_gradSpanBuffer.mapped());
-        m_gradSpanBuffer.reset(
-            makeVertexBufferRing(targetComplexGradientSpans, sizeof(GradientSpan)));
+        assert(!m_gradSpanData);
+        m_impl->resizeGradSpanBuffer(targetComplexGradientSpanCount * sizeof(GradientSpan));
         LOG_CHANGED_SIZE("maxComplexGradientSpans",
                          m_currentResourceLimits.maxComplexGradientSpans,
-                         targetComplexGradientSpans,
-                         m_gradSpanBuffer.totalSizeInBytes());
-        m_currentResourceLimits.maxComplexGradientSpans = targetComplexGradientSpans;
+                         targetComplexGradientSpanCount,
+                         m_gradSpanBuffer->totalSizeInBytes());
+        m_currentResourceLimits.maxComplexGradientSpans = targetComplexGradientSpanCount;
     }
-    COUNT_RESOURCE_SIZE(m_gradSpanBuffer.totalSizeInBytes());
+    COUNT_RESOURCE_SIZE(m_gradSpanBuffer->totalSizeInBytes());
 
     // Instance buffer ring for rendering path tessellation vertices.
     constexpr size_t kMinTessellationSpans = kMinTessTextureHeight * kTessTextureWidth / 4;
     const size_t maxTessellationSpans = kMaxTessTextureHeight * kTessTextureWidth / 8; // ~100MiB
-    size_t targetTessellationSpans =
+    size_t targetTessellationSpanCount =
         std::clamp(targets.maxTessellationSpans, kMinTessellationSpans, maxTessellationSpans);
-    if (shouldReallocate(targetTessellationSpans, m_currentResourceLimits.maxTessellationSpans))
+    if (shouldReallocate(targetTessellationSpanCount, m_currentResourceLimits.maxTessellationSpans))
     {
-        assert(!m_tessSpanBuffer.mapped());
-        m_tessSpanBuffer.reset(
-            makeVertexBufferRing(targetTessellationSpans, sizeof(TessVertexSpan)));
+        assert(!m_tessSpanData);
+        m_impl->resizeTessVertexSpanBuffer(targetTessellationSpanCount * sizeof(TessVertexSpan));
         LOG_CHANGED_SIZE("maxTessellationSpans",
                          m_currentResourceLimits.maxTessellationSpans,
-                         targetTessellationSpans,
-                         m_tessSpanBuffer.totalSizeInBytes());
-        m_currentResourceLimits.maxTessellationSpans = targetTessellationSpans;
+                         targetTessellationSpanCount,
+                         m_tessSpanBuffer->totalSizeInBytes());
+        m_currentResourceLimits.maxTessellationSpans = targetTessellationSpanCount;
     }
-    COUNT_RESOURCE_SIZE(m_tessSpanBuffer.totalSizeInBytes());
+    COUNT_RESOURCE_SIZE(m_tessSpanBuffer->totalSizeInBytes());
 
     // Instance buffer ring for literal triangles fed directly by the CPU.
-    constexpr size_t kMinTriangleVertices = 3072 * 3; // 324 KiB
+    constexpr size_t kMinTriangleVertexCount = 3072 * 3; // 324 KiB
     // Triangle vertices don't have a maximum limit; we let the other components be the limiting
     // factor and allocate whatever buffer size we need at flush time.
-    size_t targetTriangleVertices =
-        std::max(targets.triangleVertexBufferSize, kMinTriangleVertices);
-    if (shouldReallocate(targetTriangleVertices, m_currentResourceLimits.triangleVertexBufferSize))
+    size_t targetTriangleVertexCount =
+        std::max(targets.triangleVertexBufferCount, kMinTriangleVertexCount);
+    if (shouldReallocate(targetTriangleVertexCount,
+                         m_currentResourceLimits.triangleVertexBufferCount))
     {
-        assert(!m_triangleBuffer.mapped());
-        m_triangleBuffer.reset(
-            makeVertexBufferRing(targetTriangleVertices, sizeof(TriangleVertex)));
-        LOG_CHANGED_SIZE("triangleVertexBufferSize",
-                         m_currentResourceLimits.triangleVertexBufferSize,
-                         targetTriangleVertices,
-                         m_triangleBuffer.totalSizeInBytes());
-        m_currentResourceLimits.triangleVertexBufferSize = targetTriangleVertices;
+        assert(!m_triangleVertexData);
+        m_impl->resizeTriangleVertexBuffer(targetTriangleVertexCount * sizeof(TriangleVertex));
+        LOG_CHANGED_SIZE("triangleVertexBufferCount",
+                         m_currentResourceLimits.triangleVertexBufferCount,
+                         targetTriangleVertexCount,
+                         m_triangleBuffer->totalSizeInBytes());
+        m_currentResourceLimits.triangleVertexBufferCount = targetTriangleVertexCount;
     }
-    COUNT_RESOURCE_SIZE(m_triangleBuffer.totalSizeInBytes());
+    COUNT_RESOURCE_SIZE(m_triangleBuffer->totalSizeInBytes());
 
     // Gradient color ramp texture.
     size_t targetGradTextureHeight =
         std::clamp(targets.gradientTextureHeight, kMinGradTextureHeight, kMaxGradTextureHeight);
     if (shouldReallocate(targetGradTextureHeight, m_currentResourceLimits.gradientTextureHeight))
     {
-        allocateGradientTexture(targetGradTextureHeight);
+        m_impl->resizeGradientTexture(targetGradTextureHeight);
         LOG_CHANGED_SIZE("gradientTextureHeight",
                          m_currentResourceLimits.gradientTextureHeight,
                          targetGradTextureHeight,
@@ -381,7 +363,7 @@
     if (shouldReallocate(targetTessTextureHeight,
                          m_currentResourceLimits.tessellationTextureHeight))
     {
-        allocateTessellationTexture(targetTessTextureHeight);
+        m_impl->resizeTessellationTexture(targetTessTextureHeight);
         LOG_CHANGED_SIZE("tessellationTextureHeight",
                          m_currentResourceLimits.tessellationTextureHeight,
                          targetTessTextureHeight,
@@ -402,7 +384,7 @@
     growExceededGPUResources(m_maxRecentResourceUsage.resetFlushTimeLimits(), kGPUResourcePadding);
     m_frameDescriptor = std::move(frameDescriptor);
     m_isFirstFlushOfFrame = true;
-    onBeginFrame();
+    m_impl->prepareToMapBuffers();
     RIVE_DEBUG_CODE(m_didBeginFrame = true);
 }
 
@@ -461,9 +443,9 @@
             newLimits.maxTessellationSpans = maxTessellationSpans;
             needsRealloc = true;
         }
-        assert(!m_pathBuffer.mapped());
-        assert(!m_contourBuffer.mapped());
-        assert(!m_tessSpanBuffer.mapped());
+        assert(!m_pathData);
+        assert(!m_contourData);
+        assert(!m_tessSpanData);
         if (needsRealloc)
         {
             // The very first draw of the flush overwhelmed our GPU resources. Since we haven't
@@ -472,22 +454,28 @@
         }
     }
 
-    m_pathBuffer.ensureMapped();
-    m_contourBuffer.ensureMapped();
-    m_tessSpanBuffer.ensureMapped();
+    if (!m_pathData)
+    {
+        assert(!m_contourData);
+        assert(!m_tessSpanData);
+        m_pathData.reset(m_impl->mapPathTexture(), m_currentResourceLimits.maxPathID);
+        m_contourData.reset(m_impl->mapContourTexture(), m_currentResourceLimits.maxContourID);
+        m_tessSpanData.reset(m_impl->mapTessVertexSpanBuffer(),
+                             m_currentResourceLimits.maxTessellationSpans);
+    }
 
     // Does the path fit in our current buffers?
     if (m_currentPathID + pathCount <= m_currentResourceLimits.maxPathID &&
         m_currentContourID + contourCount <= m_currentResourceLimits.maxContourID &&
-        m_tessSpanBuffer.hasRoomFor(maxTessellationSpans) &&
-        m_tessVertexCount + maxTessVertexCountWithInternalPadding <= kMaxTessellationVertices)
+        m_tessSpanData.hasRoomFor(maxTessellationSpans) &&
+        m_tessVertexCount + maxTessVertexCountWithInternalPadding <= kMaxTessellationVertexCount)
     {
-        assert(m_pathBuffer.hasRoomFor(pathCount));
-        assert(m_contourBuffer.hasRoomFor(contourCount));
+        assert(m_pathData.hasRoomFor(pathCount));
+        assert(m_contourData.hasRoomFor(contourCount));
         RIVE_DEBUG_CODE(m_expectedTessVertexCountAtNextReserve =
                             m_tessVertexCount +
                             tessVertexCounter.totalVertexCountIncludingReflectionsAndPadding());
-        assert(m_expectedTessVertexCountAtNextReserve <= kMaxTessellationVertices);
+        assert(m_expectedTessVertexCountAtNextReserve <= kMaxTessellationVertexCount);
         return true;
     }
 
@@ -540,8 +528,12 @@
                 return false;
             }
             rampTexelsIdx = m_simpleGradients.size() * 2;
-            m_simpleColorRampsBuffer.ensureMapped();
-            m_simpleColorRampsBuffer.set_back(colors);
+            if (!m_simpleColorRampsData)
+            {
+                m_simpleColorRampsData.reset(m_impl->mapSimpleColorRampsBuffer(),
+                                             m_currentResourceLimits.maxSimpleGradients);
+            }
+            m_simpleColorRampsData.set_back(colors);
             m_simpleGradients.insert({simpleKey, rampTexelsIdx});
         }
         row = rampTexelsIdx / kGradTextureWidth;
@@ -572,7 +564,7 @@
             // the gradient span buffer hasn't been mapped yet, we have a unique opportunity to grow
             // it if needed.
             size_t spanCount = stopCount + 1;
-            if (!m_gradSpanBuffer.mapped())
+            if (!m_gradSpanData)
             {
                 if (spanCount > m_currentResourceLimits.maxComplexGradientSpans)
                 {
@@ -583,10 +575,11 @@
                     newLimits.maxComplexGradientSpans = spanCount;
                     growExceededGPUResources(newLimits, kGPUResourceIntermediateGrowthFactor);
                 }
-                m_gradSpanBuffer.ensureMapped();
+                m_gradSpanData.reset(m_impl->mapGradSpanBuffer(),
+                                     m_currentResourceLimits.maxComplexGradientSpans);
             }
 
-            if (!m_gradSpanBuffer.hasRoomFor(spanCount))
+            if (!m_gradSpanData.hasRoomFor(spanCount))
             {
                 // We ran out of instances for rendering complex color ramps. The caller needs to
                 // flush and try again.
@@ -596,7 +589,7 @@
             // Push "GradientSpan" instances that will render each section of the color ramp.
             ColorInt lastColor = colors[0];
             uint32_t lastXFixed = 0;
-            // The viewport will start at m_reservedGradTextureRowsForSimpleRamps when rendering
+            // The viewport will start at m_reservedSimpleGradientRowCount when rendering
             // color ramps.
             uint32_t y = static_cast<uint32_t>(m_complexGradients.size());
             // "stop * w + .5" converts a stop position to an x-coordinate in the gradient texture.
@@ -609,13 +602,13 @@
                 float x = stops[i] * w + .5f;
                 uint32_t xFixed = static_cast<uint32_t>(x * (65536.f / kGradTextureWidth));
                 assert(lastXFixed <= xFixed && xFixed < 65536);
-                m_gradSpanBuffer.set_back(lastXFixed, xFixed, y, lastColor, colors[i]);
+                m_gradSpanData.set_back(lastXFixed, xFixed, y, lastColor, colors[i]);
                 lastColor = colors[i];
                 lastXFixed = xFixed;
             }
-            m_gradSpanBuffer.set_back(lastXFixed, 65535u, y, lastColor, lastColor);
+            m_gradSpanData.set_back(lastXFixed, 65535u, y, lastColor, lastColor);
 
-            row = m_reservedGradTextureRowsForSimpleRamps + m_complexGradients.size();
+            row = m_reservedSimpleGradientRowCount + m_complexGradients.size();
             m_complexGradients.emplace(std::move(key), row);
         }
     }
@@ -640,11 +633,11 @@
 
     m_currentPathIsStroked = strokeRadius != 0;
     m_currentPathNeedsMirroredContours = !m_currentPathIsStroked;
-    m_pathBuffer.set_back(matrix, strokeRadius, fillRule, paintType, clipID, blendMode, paintData);
+    m_pathData.set_back(matrix, strokeRadius, fillRule, paintType, clipID, blendMode, paintData);
 
     ++m_currentPathID;
     assert(0 < m_currentPathID && m_currentPathID <= m_maxPathID);
-    assert(m_currentPathID == m_pathBuffer.bytesWritten() / sizeof(PathData));
+    assert(m_currentPathID == m_pathData.bytesWritten() / sizeof(PathData));
 
     auto drawType = patchType == PatchType::midpointFan ? DrawType::midpointFanPatches
                                                         : DrawType::outerCurvePatches;
@@ -686,13 +679,13 @@
     RIVE_DEBUG_CODE(m_expectedTessVertexCountAtEndOfPath =
                         m_tessVertexCount + tessVertexCountWithoutPadding);
     assert(m_expectedTessVertexCountAtEndOfPath <= m_expectedTessVertexCountAtNextReserve);
-    assert(m_expectedTessVertexCountAtEndOfPath <= kMaxTessellationVertices);
+    assert(m_expectedTessVertexCountAtEndOfPath <= kMaxTessellationVertexCount);
 }
 
 void PLSRenderContext::pushContour(Vec2D midpoint, bool closed, uint32_t paddingVertexCount)
 {
     assert(m_didBeginFrame);
-    assert(!m_pathBuffer.empty());
+    assert(m_pathData.bytesWritten() > 0);
     assert(m_currentPathIsStroked || closed);
     assert(m_currentPathID != 0); // pathID can't be zero.
 
@@ -700,12 +693,10 @@
     {
         midpoint.x = closed ? 1 : 0;
     }
-    m_contourBuffer.emplace_back(midpoint,
-                                 m_currentPathID,
-                                 static_cast<uint32_t>(m_tessVertexCount));
+    m_contourData.emplace_back(midpoint, m_currentPathID, static_cast<uint32_t>(m_tessVertexCount));
     ++m_currentContourID;
     assert(0 < m_currentContourID && m_currentContourID <= kMaxContourID);
-    assert(m_currentContourID == m_contourBuffer.bytesWritten() / sizeof(ContourData));
+    assert(m_currentContourID == m_contourData.bytesWritten() / sizeof(ContourData));
 
     // The first curve of the contour will be pre-padded with 'paddingVertexCount' tessellation
     // vertices, colocated at T=0. The caller must use this argument align the end of the contour on
@@ -765,7 +756,7 @@
     constexpr static uint32_t kInvalidContourID = 0;
     assert(m_tessVertexCount == m_expectedTessVertexCountAtEndOfPath);
     RIVE_DEBUG_CODE(m_expectedTessVertexCountAtEndOfPath = m_tessVertexCount + count;)
-    assert(m_expectedTessVertexCountAtEndOfPath <= kMaxTessellationVertices);
+    assert(m_expectedTessVertexCountAtEndOfPath <= kMaxTessellationVertexCount);
     pushTessellationSpans(kEmptyCubic, {0, 0}, count, 0, 0, 1, kInvalidContourID);
     assert(m_tessVertexCount == m_expectedTessVertexCountAtEndOfPath);
 }
@@ -783,15 +774,15 @@
     int32_t x1 = x0 + totalVertexCount;
     for (;;)
     {
-        m_tessSpanBuffer.set_back(pts,
-                                  joinTangent,
-                                  static_cast<float>(y),
-                                  x0,
-                                  x1,
-                                  parametricSegmentCount,
-                                  polarSegmentCount,
-                                  joinSegmentCount,
-                                  contourIDWithFlags);
+        m_tessSpanData.set_back(pts,
+                                joinTangent,
+                                static_cast<float>(y),
+                                x0,
+                                x1,
+                                parametricSegmentCount,
+                                polarSegmentCount,
+                                joinSegmentCount,
+                                contourIDWithFlags);
         if (x1 > static_cast<int32_t>(kTessTextureWidth))
         {
             // The span was too long to fit on the current line. Wrap and draw it again, this
@@ -829,18 +820,18 @@
 
     for (;;)
     {
-        m_tessSpanBuffer.set_back(pts,
-                                  joinTangent,
-                                  static_cast<float>(y),
-                                  x0,
-                                  x1,
-                                  static_cast<float>(reflectionY),
-                                  reflectionX0,
-                                  reflectionX1,
-                                  parametricSegmentCount,
-                                  polarSegmentCount,
-                                  joinSegmentCount,
-                                  contourIDWithFlags);
+        m_tessSpanData.set_back(pts,
+                                joinTangent,
+                                static_cast<float>(y),
+                                x0,
+                                x1,
+                                static_cast<float>(reflectionY),
+                                reflectionX0,
+                                reflectionX1,
+                                parametricSegmentCount,
+                                polarSegmentCount,
+                                joinSegmentCount,
+                                contourIDWithFlags);
         if (x1 > static_cast<int32_t>(kTessTextureWidth) || reflectionX1 < 0)
         {
             // Either the span or its reflection was too long to fit on the current line. Wrap and
@@ -891,7 +882,6 @@
     if (m_drawList.empty() || m_drawList.tail().drawType != drawType)
     {
         m_drawList.emplace_back(this, drawType, baseVertex);
-        ++m_drawListCount;
     }
     ShaderFeatures* shaderFeatures = &m_drawList.tail().shaderFeatures;
     if (blendMode > PLSBlendMode::srcOver)
@@ -934,7 +924,7 @@
     // The final vertex of the final patch of each contour crosses over into the next contour. (This
     // is how we wrap around back to the beginning.) Therefore, the final contour of the flush needs
     // an out-of-contour vertex to cross into as well, so we emit a padding vertex here at the end.
-    if (!m_tessSpanBuffer.empty())
+    if (m_tessSpanData.bytesWritten() > 0)
     {
         pushPaddingVertices(1);
     }
@@ -943,14 +933,12 @@
     // when we know exactly how large they need to be.
     GPUResourceLimits newLimitsForFlushTimeResources{};
     bool needsFlushTimeRealloc = false;
-    assert(m_triangleBuffer.capacity() == m_currentResourceLimits.triangleVertexBufferSize);
-    if (m_currentResourceLimits.triangleVertexBufferSize < m_maxTriangleVertexCount)
+    if (m_currentResourceLimits.triangleVertexBufferCount < m_maxTriangleVertexCount)
     {
-        newLimitsForFlushTimeResources.triangleVertexBufferSize = m_maxTriangleVertexCount;
+        newLimitsForFlushTimeResources.triangleVertexBufferCount = m_maxTriangleVertexCount;
         needsFlushTimeRealloc = true;
     }
-    size_t requiredGradTextureHeight =
-        m_reservedGradTextureRowsForSimpleRamps + m_complexGradients.size();
+    size_t requiredGradTextureHeight = m_reservedSimpleGradientRowCount + m_complexGradients.size();
     if (m_currentResourceLimits.gradientTextureHeight < requiredGradTextureHeight)
     {
         newLimitsForFlushTimeResources.gradientTextureHeight = requiredGradTextureHeight;
@@ -969,11 +957,13 @@
     }
     if (m_maxTriangleVertexCount > 0)
     {
-        m_triangleBuffer.ensureMapped();
-        assert(m_triangleBuffer.hasRoomFor(m_maxTriangleVertexCount));
+        assert(!m_triangleVertexData);
+        m_triangleVertexData.reset(m_impl->mapTriangleVertexBuffer(),
+                                   m_currentResourceLimits.triangleVertexBufferCount);
+        assert(m_triangleVertexData.hasRoomFor(m_maxTriangleVertexCount));
     }
     assert(m_complexGradients.size() <=
-           m_currentResourceLimits.gradientTextureHeight - m_reservedGradTextureRowsForSimpleRamps);
+           m_currentResourceLimits.gradientTextureHeight - m_reservedSimpleGradientRowCount);
     assert(m_tessVertexCount <=
            m_currentResourceLimits.tessellationTextureHeight * kTessTextureWidth);
 
@@ -995,7 +985,7 @@
                 size_t actualVertexCount = maxVertexCount;
                 if (maxVertexCount > 0)
                 {
-                    actualVertexCount = draw.triangulator->polysToTriangles(&m_triangleBuffer);
+                    actualVertexCount = draw.triangulator->polysToTriangles(&m_triangleVertexData);
                 }
                 assert(actualVertexCount <= maxVertexCount);
                 draw.baseVertexOrInstance = writtenTriangleVertexCount;
@@ -1007,21 +997,51 @@
         needsClipBuffer = needsClipBuffer || draw.shaderFeatures.programFeatures.enablePathClipping;
         RIVE_DEBUG_CODE(++drawIdx;)
     }
-    assert(drawIdx == m_drawListCount);
+    assert(drawIdx == m_drawList.count());
 
     // Determine how much to draw.
-    size_t simpleColorRampCount = m_simpleColorRampsBuffer.bytesWritten() / sizeof(TwoTexelRamp);
-    size_t gradSpanCount = m_gradSpanBuffer.bytesWritten() / sizeof(GradientSpan);
-    size_t tessVertexSpanCount = m_tessSpanBuffer.bytesWritten() / sizeof(TessVertexSpan);
+    size_t simpleColorRampCount = m_simpleColorRampsData.bytesWritten() / sizeof(TwoTexelRamp);
+    size_t gradSpanCount = m_gradSpanData.bytesWritten() / sizeof(GradientSpan);
+    size_t tessVertexSpanCount = m_tessSpanData.bytesWritten() / sizeof(TessVertexSpan);
     size_t tessDataHeight = resource_texture_height<kTessTextureWidth>(m_tessVertexCount);
 
-    // Upload all non-empty buffers before flushing.
-    m_pathBuffer.submit();
-    m_contourBuffer.submit();
-    m_simpleColorRampsBuffer.submit();
-    m_gradSpanBuffer.submit();
-    m_tessSpanBuffer.submit();
-    m_triangleBuffer.submit();
+    // Unmap all non-empty buffers before flushing.
+    if (m_pathData)
+    {
+        size_t texelsWritten = m_pathData.bytesWritten() / (sizeof(uint32_t) * 4);
+        size_t widthWritten = std::min(texelsWritten, kPathTextureWidthInTexels);
+        size_t heightWritten = resource_texture_height<kPathTextureWidthInTexels>(texelsWritten);
+        m_impl->unmapPathTexture(widthWritten, heightWritten);
+        m_pathData.reset();
+    }
+    if (m_contourData)
+    {
+        size_t texelsWritten = m_contourData.bytesWritten() / (sizeof(uint32_t) * 4);
+        size_t widthWritten = std::min(texelsWritten, kContourTextureWidthInTexels);
+        size_t heightWritten = resource_texture_height<kContourTextureWidthInTexels>(texelsWritten);
+        m_impl->unmapContourTexture(widthWritten, heightWritten);
+        m_contourData.reset();
+    }
+    if (m_simpleColorRampsData)
+    {
+        m_impl->unmapSimpleColorRampsBuffer(m_simpleColorRampsData.bytesWritten());
+        m_simpleColorRampsData.reset();
+    }
+    if (m_gradSpanData)
+    {
+        m_impl->unmapGradSpanBuffer(m_gradSpanData.bytesWritten());
+        m_gradSpanData.reset();
+    }
+    if (m_tessSpanData)
+    {
+        m_impl->unmapTessVertexSpanBuffer(m_tessSpanData.bytesWritten());
+        m_tessSpanData.reset();
+    }
+    if (m_triangleVertexData)
+    {
+        m_impl->unmapTriangleVertexBuffer(m_triangleVertexData.bytesWritten());
+        m_triangleVertexData.reset();
+    }
 
     // Update the uniform buffer for drawing if needed.
     FlushUniforms uniformData(m_complexGradients.size(),
@@ -1032,33 +1052,35 @@
                               m_platformFeatures);
     if (!bits_equal(&m_cachedUniformData, &uniformData))
     {
-        m_uniformBuffer.ensureMapped();
-        m_uniformBuffer.emplace_back(uniformData);
-        m_uniformBuffer.submit();
+        m_impl->updateFlushUniforms(&uniformData);
         m_cachedUniformData = uniformData;
     }
 
     FlushDescriptor flushDesc;
-    flushDesc.flushType = flushType;
+    flushDesc.renderTarget = frameDescriptor().renderTarget.get();
     flushDesc.loadAction =
         m_isFirstFlushOfFrame ? frameDescriptor().loadAction : LoadAction::preserveRenderTarget;
+    flushDesc.clearColor = frameDescriptor().clearColor;
     flushDesc.complexGradSpanCount = gradSpanCount;
     flushDesc.tessVertexSpanCount = tessVertexSpanCount;
     flushDesc.simpleGradTexelsWidth = std::min(simpleColorRampCount * 2, kGradTextureWidth);
     flushDesc.simpleGradTexelsHeight =
         resource_texture_height<kGradTextureWidthInSimpleRamps>(simpleColorRampCount);
-    flushDesc.complexGradRowsTop = m_reservedGradTextureRowsForSimpleRamps;
+    flushDesc.complexGradRowsTop = m_reservedSimpleGradientRowCount;
     flushDesc.complexGradRowsHeight = m_complexGradients.size();
     flushDesc.tessDataHeight = tessDataHeight;
     flushDesc.needsClipBuffer = needsClipBuffer;
-    onFlush(flushDesc);
+    flushDesc.hasTriangleVertices = m_maxTriangleVertexCount > 0;
+    flushDesc.wireframe = frameDescriptor().wireframe;
+    flushDesc.drawList = &m_drawList;
+    m_impl->flush(flushDesc);
 
     m_currentFrameResourceUsage.maxPathID += m_currentPathID;
     m_currentFrameResourceUsage.maxContourID += m_currentContourID;
     m_currentFrameResourceUsage.maxSimpleGradients += m_simpleGradients.size();
     m_currentFrameResourceUsage.maxComplexGradientSpans += gradSpanCount;
     m_currentFrameResourceUsage.maxTessellationSpans += tessVertexSpanCount;
-    m_currentFrameResourceUsage.triangleVertexBufferSize += m_maxTriangleVertexCount;
+    m_currentFrameResourceUsage.triangleVertexBufferCount += m_maxTriangleVertexCount;
     m_currentFrameResourceUsage.gradientTextureHeight +=
         resource_texture_height<kGradTextureWidthInSimpleRamps>(m_simpleGradients.size()) +
         m_complexGradients.size();
@@ -1105,9 +1127,15 @@
     m_isFirstFlushOfFrame = false;
 
     m_drawList.reset();
-    m_drawListCount = 0;
 
     // Delete all objects that were allocted for this flush using the TrivialBlockAllocator.
     m_trivialPerFlushAllocator.reset();
+
+    if (flushType == FlushType::intermediate)
+    {
+        // The frame isn't complete yet. The caller will begin preparing a new flush immediately
+        // after this method returns, so lock buffers for the next flush now.
+        m_impl->prepareToMapBuffers();
+    }
 }
 } // namespace rive::pls
diff --git a/renderer/pls_render_context_buffer_ring_impl.cpp b/renderer/pls_render_context_buffer_ring_impl.cpp
new file mode 100644
index 0000000..64cfee4
--- /dev/null
+++ b/renderer/pls_render_context_buffer_ring_impl.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2022 Rive
+ */
+
+#include "rive/pls/pls_render_context_buffer_ring_impl.hpp"
+
+namespace rive::pls
+{
+void PLSRenderContextBufferRingImpl::resizePathTexture(size_t width, size_t height)
+{
+    m_pathBuffer = makeTexelBufferRing(TexelBufferRing::Format::rgba32ui,
+                                       width / kPathTexelsPerItem,
+                                       height,
+                                       kPathTexelsPerItem,
+                                       kPathTextureIdx,
+                                       TexelBufferRing::Filter::nearest);
+}
+
+void PLSRenderContextBufferRingImpl::resizeContourTexture(size_t width, size_t height)
+{
+    m_contourBuffer = makeTexelBufferRing(TexelBufferRing::Format::rgba32ui,
+                                          width / kContourTexelsPerItem,
+                                          height,
+                                          kContourTexelsPerItem,
+                                          pls::kContourTextureIdx,
+                                          TexelBufferRing::Filter::nearest);
+}
+
+void PLSRenderContextBufferRingImpl::resizeSimpleColorRampsBuffer(size_t sizeInBytes)
+{
+    m_simpleColorRampsBuffer =
+        makePixelUnpackBufferRing(sizeInBytes / sizeof(TwoTexelRamp), sizeof(TwoTexelRamp));
+}
+
+void PLSRenderContextBufferRingImpl::resizeGradSpanBuffer(size_t sizeInBytes)
+{
+    m_gradSpanBuffer =
+        makeVertexBufferRing(sizeInBytes / sizeof(GradientSpan), sizeof(GradientSpan));
+}
+
+void PLSRenderContextBufferRingImpl::resizeTessVertexSpanBuffer(size_t sizeInBytes)
+{
+    m_tessSpanBuffer =
+        makeVertexBufferRing(sizeInBytes / sizeof(TessVertexSpan), sizeof(TessVertexSpan));
+}
+
+void PLSRenderContextBufferRingImpl::resizeTriangleVertexBuffer(size_t sizeInBytes)
+{
+    m_triangleBuffer =
+        makeVertexBufferRing(sizeInBytes / sizeof(TriangleVertex), sizeof(TriangleVertex));
+}
+
+void PLSRenderContextBufferRingImpl::unmapPathTexture(size_t widthWritten, size_t heightWritten)
+{
+    m_pathBuffer->unmapAndSubmitBuffer(heightWritten * widthWritten * 4 * 4);
+}
+
+void PLSRenderContextBufferRingImpl::unmapContourTexture(size_t widthWritten, size_t heightWritten)
+{
+    return m_contourBuffer->unmapAndSubmitBuffer(heightWritten * widthWritten * 4 * 4);
+}
+
+void PLSRenderContextBufferRingImpl::unmapSimpleColorRampsBuffer(size_t bytesWritten)
+{
+    m_simpleColorRampsBuffer->unmapAndSubmitBuffer(bytesWritten);
+}
+
+void PLSRenderContextBufferRingImpl::unmapGradSpanBuffer(size_t bytesWritten)
+{
+    m_gradSpanBuffer->unmapAndSubmitBuffer(bytesWritten);
+}
+
+void PLSRenderContextBufferRingImpl::unmapTessVertexSpanBuffer(size_t bytesWritten)
+{
+    m_tessSpanBuffer->unmapAndSubmitBuffer(bytesWritten);
+}
+
+void PLSRenderContextBufferRingImpl::unmapTriangleVertexBuffer(size_t bytesWritten)
+{
+    m_triangleBuffer->unmapAndSubmitBuffer(bytesWritten);
+}
+
+void PLSRenderContextBufferRingImpl::updateFlushUniforms(const FlushUniforms* uniformData)
+{
+    if (m_uniformBuffer == nullptr)
+    {
+        m_uniformBuffer = makeUniformBufferRing(sizeof(FlushUniforms));
+    }
+    memcpy(m_uniformBuffer->mapBuffer(), uniformData, sizeof(FlushUniforms));
+    m_uniformBuffer->unmapAndSubmitBuffer(sizeof(FlushUniforms));
+}
+} // namespace rive::pls