Revert "Merge GrOpList and GrRTOpList and rename to GrOpsTask."

This reverts commit 2a5954140b49d18e5161a30a4ae2c7ac28bc1993.

Reason for revert: breaking everything

Original change's description:
> Merge GrOpList and GrRTOpList and rename to GrOpsTask.
> 
> Change-Id: I8f4f2218a30fd0541a8f79f7bb9850f9500cd243
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236343
> Commit-Queue: Greg Daniel <egdaniel@google.com>
> Reviewed-by: Brian Salomon <bsalomon@google.com>

TBR=egdaniel@google.com,bsalomon@google.com,robertphillips@google.com

Change-Id: I27840ea0343e8e6b388556afb7bd2e76386d611d
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236349
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
diff --git a/gn/gpu.gni b/gn/gpu.gni
index bb4210d..b37ab4f 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -121,8 +121,8 @@
   "$_src/gpu/GrNonAtomicRef.h",
   "$_src/gpu/GrOpFlushState.cpp",
   "$_src/gpu/GrOpFlushState.h",
-  "$_src/gpu/GrOpsTask.cpp",
-  "$_src/gpu/GrOpsTask.h",
+  "$_src/gpu/GrOpList.cpp",
+  "$_src/gpu/GrOpList.h",
   "$_src/gpu/GrPaint.cpp",
   "$_src/gpu/GrPaint.h",
   "$_src/gpu/GrPathRendererChain.cpp",
@@ -167,6 +167,8 @@
   "$_src/gpu/GrRenderTargetContext.cpp",
   "$_src/gpu/GrRenderTargetContext.h",
   "$_src/gpu/GrRenderTargetContextPriv.h",
+  "$_src/gpu/GrRenderTargetOpList.cpp",
+  "$_src/gpu/GrRenderTargetOpList.h",
   "$_src/gpu/GrResourceAllocator.cpp",
   "$_src/gpu/GrResourceAllocator.h",
   "$_src/gpu/GrResourceCache.cpp",
@@ -574,7 +576,7 @@
   "$_src/gpu/ccpr/GrCCPathProcessor.h",
   "$_src/gpu/ccpr/GrCCPerFlushResources.cpp",
   "$_src/gpu/ccpr/GrCCPerFlushResources.h",
-  "$_src/gpu/ccpr/GrCCPerOpsTaskPaths.h",
+  "$_src/gpu/ccpr/GrCCPerOpListPaths.h",
   "$_src/gpu/ccpr/GrCCQuadraticShader.cpp",
   "$_src/gpu/ccpr/GrCCQuadraticShader.h",
   "$_src/gpu/ccpr/GrCCStrokeGeometry.cpp",
diff --git a/include/gpu/GrGpuResource.h b/include/gpu/GrGpuResource.h
index 3e4c6c6..ce74d74 100644
--- a/include/gpu/GrGpuResource.h
+++ b/include/gpu/GrGpuResource.h
@@ -24,7 +24,7 @@
  *
  * Gpu resources can have three types of refs:
  *   1) Normal ref (+ by ref(), - by unref()): These are used by code that is issuing draw calls
- *      that read and write the resource via GrOpsTask and by any object that must own a
+ *      that read and write the resource via GrOpList and by any object that must own a
  *      GrGpuResource and is itself owned (directly or indirectly) by Skia-client code.
  *   2) Pending read (+ by addPendingRead(), - by completedRead()): GrContext has scheduled a read
  *      of the resource by the GPU as a result of a skia API call but hasn't executed it yet.
diff --git a/include/private/GrRecordingContext.h b/include/private/GrRecordingContext.h
index 1f44bee..447d87f 100644
--- a/include/private/GrRecordingContext.h
+++ b/include/private/GrRecordingContext.h
@@ -40,7 +40,7 @@
 
     GrRecordingContext(GrBackendApi, const GrContextOptions&, uint32_t contextID);
     bool init(sk_sp<const GrCaps>, sk_sp<GrSkSLFPFactoryCache>) override;
-    void setupDrawingManager(bool sortOpsTasks, bool reduceOpsTaskSplitting);
+    void setupDrawingManager(bool sortOpLists, bool reduceOpListSplitting);
 
     void abandonContext() override;
 
diff --git a/include/private/GrTypesPriv.h b/include/private/GrTypesPriv.h
index 99da2eb..1c5b2b7 100644
--- a/include/private/GrTypesPriv.h
+++ b/include/private/GrTypesPriv.h
@@ -219,7 +219,7 @@
 };
 
 /**
- * This enum is used to specify the load operation to be used when an opsTask/GrGpuCommandBuffer
+ * This enum is used to specify the load operation to be used when an opList/GrGpuCommandBuffer
  * begins execution.
  */
 enum class GrLoadOp {
@@ -229,7 +229,7 @@
 };
 
 /**
- * This enum is used to specify the store operation to be used when an opsTask/GrGpuCommandBuffer
+ * This enum is used to specify the store operation to be used when an opList/GrGpuCommandBuffer
  * ends execution.
  */
 enum class GrStoreOp {
diff --git a/include/private/SkDeferredDisplayList.h b/include/private/SkDeferredDisplayList.h
index e8fc35b..9ae29d0 100644
--- a/include/private/SkDeferredDisplayList.h
+++ b/include/private/SkDeferredDisplayList.h
@@ -19,7 +19,7 @@
 #include <map>
 class GrRenderTask;
 class GrRenderTargetProxy;
-struct GrCCPerOpsTaskPaths;
+struct GrCCPerOpListPaths;
 #endif
 
 /*
@@ -67,7 +67,7 @@
 
 #if SK_SUPPORT_GPU
     // This needs to match the same type in GrCoverageCountingPathRenderer.h
-    using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
+    using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
 
     SkTArray<sk_sp<GrRenderTask>>   fRenderTasks;
     PendingPathsMap                 fPendingPaths;  // This is the path data from CCPR.
diff --git a/src/core/SkDeferredDisplayList.cpp b/src/core/SkDeferredDisplayList.cpp
index c89cd99..7d2b971 100644
--- a/src/core/SkDeferredDisplayList.cpp
+++ b/src/core/SkDeferredDisplayList.cpp
@@ -13,7 +13,7 @@
 
 #if SK_SUPPORT_GPU
 #include "src/gpu/GrRenderTask.h"
-#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
+#include "src/gpu/ccpr/GrCCPerOpListPaths.h"
 #endif
 
 SkDeferredDisplayList::SkDeferredDisplayList(const SkSurfaceCharacterization& characterization,
diff --git a/src/core/SkTTopoSort.h b/src/core/SkTTopoSort.h
index bc1b07e..9df95ee 100644
--- a/src/core/SkTTopoSort.h
+++ b/src/core/SkTTopoSort.h
@@ -77,7 +77,7 @@
 //
 // TODO: potentially add a version that takes a seed node and just outputs that
 // node and all the nodes on which it depends. This could be used to partially
-// flush a GrRenderTask DAG.
+// flush a GrOpList DAG.
 template <typename T, typename Traits = T>
 bool SkTTopoSort(SkTArray<sk_sp<T>>* graph) {
     SkTArray<sk_sp<T>> result;
diff --git a/src/gpu/GrAuditTrail.cpp b/src/gpu/GrAuditTrail.cpp
index 9c72acf..609fe84 100644
--- a/src/gpu/GrAuditTrail.cpp
+++ b/src/gpu/GrAuditTrail.cpp
@@ -18,7 +18,7 @@
     auditOp->fName = op->name();
     auditOp->fBounds = op->bounds();
     auditOp->fClientID = kGrAuditTrailInvalidID;
-    auditOp->fOpsTaskID = kGrAuditTrailInvalidID;
+    auditOp->fOpListID = kGrAuditTrailInvalidID;
     auditOp->fChildID = kGrAuditTrailInvalidID;
 
     // consume the current stack trace if any
@@ -40,15 +40,15 @@
     }
 
     // Our algorithm doesn't bother to reorder inside of an OpNode so the ChildID will start at 0
-    auditOp->fOpsTaskID = fOpsTask.count();
+    auditOp->fOpListID = fOpList.count();
     auditOp->fChildID = 0;
 
     // We use the op pointer as a key to find the OpNode we are 'glomming' ops onto
-    fIDLookup.set(op->uniqueID(), auditOp->fOpsTaskID);
+    fIDLookup.set(op->uniqueID(), auditOp->fOpListID);
     OpNode* opNode = new OpNode(proxyID);
     opNode->fBounds = op->bounds();
     opNode->fChildren.push_back(auditOp);
-    fOpsTask.emplace_back(opNode);
+    fOpList.emplace_back(opNode);
 }
 
 void GrAuditTrail::opsCombined(const GrOp* consumer, const GrOp* consumed) {
@@ -56,22 +56,22 @@
     int* indexPtr = fIDLookup.find(consumer->uniqueID());
     SkASSERT(indexPtr);
     int index = *indexPtr;
-    SkASSERT(index < fOpsTask.count() && fOpsTask[index]);
-    OpNode& consumerOp = *fOpsTask[index];
+    SkASSERT(index < fOpList.count() && fOpList[index]);
+    OpNode& consumerOp = *fOpList[index];
 
     // Look up the op which will be glommed
     int* consumedPtr = fIDLookup.find(consumed->uniqueID());
     SkASSERT(consumedPtr);
     int consumedIndex = *consumedPtr;
-    SkASSERT(consumedIndex < fOpsTask.count() && fOpsTask[consumedIndex]);
-    OpNode& consumedOp = *fOpsTask[consumedIndex];
+    SkASSERT(consumedIndex < fOpList.count() && fOpList[consumedIndex]);
+    OpNode& consumedOp = *fOpList[consumedIndex];
 
     // steal all of consumed's ops
     for (int i = 0; i < consumedOp.fChildren.count(); i++) {
         Op* childOp = consumedOp.fChildren[i];
 
         // set the ids for the child op
-        childOp->fOpsTaskID = index;
+        childOp->fOpListID = index;
         childOp->fChildID = consumerOp.fChildren.count();
         consumerOp.fChildren.push_back(childOp);
     }
@@ -79,15 +79,15 @@
     // Update the bounds for the combineWith node
     consumerOp.fBounds = consumer->bounds();
 
-    // remove the old node from our opsTask and clear the combinee's lookup
+    // remove the old node from our opList and clear the combinee's lookup
     // NOTE: because we can't change the shape of the oplist, we use a sentinel
-    fOpsTask[consumedIndex].reset(nullptr);
+    fOpList[consumedIndex].reset(nullptr);
     fIDLookup.remove(consumed->uniqueID());
 }
 
-void GrAuditTrail::copyOutFromOpsTask(OpInfo* outOpInfo, int opsTaskID) {
-    SkASSERT(opsTaskID < fOpsTask.count());
-    const OpNode* bn = fOpsTask[opsTaskID].get();
+void GrAuditTrail::copyOutFromOpList(OpInfo* outOpInfo, int opListID) {
+    SkASSERT(opListID < fOpList.count());
+    const OpNode* bn = fOpList[opListID].get();
     SkASSERT(bn);
     outOpInfo->fBounds = bn->fBounds;
     outOpInfo->fProxyUniqueID    = bn->fProxyUniqueID;
@@ -105,30 +105,30 @@
         // We track which oplistID we're currently looking at.  If it changes, then we need to push
         // back a new op info struct.  We happen to know that ops are in sequential order in the
         // oplist, otherwise we'd have to do more bookkeeping
-        int currentOpsTaskID = kGrAuditTrailInvalidID;
+        int currentOpListID = kGrAuditTrailInvalidID;
         for (int i = 0; i < (*opsLookup)->count(); i++) {
             const Op* op = (**opsLookup)[i];
 
             // Because we will copy out all of the ops associated with a given op list id everytime
             // the id changes, we only have to update our struct when the id changes.
-            if (kGrAuditTrailInvalidID == currentOpsTaskID || op->fOpsTaskID != currentOpsTaskID) {
+            if (kGrAuditTrailInvalidID == currentOpListID || op->fOpListID != currentOpListID) {
                 OpInfo& outOpInfo = outInfo->push_back();
 
                 // copy out all of the ops so the client can display them even if they have a
                 // different clientID
-                this->copyOutFromOpsTask(&outOpInfo, op->fOpsTaskID);
+                this->copyOutFromOpList(&outOpInfo, op->fOpListID);
             }
         }
     }
 }
 
-void GrAuditTrail::getBoundsByOpsTaskID(OpInfo* outInfo, int opsTaskID) {
-    this->copyOutFromOpsTask(outInfo, opsTaskID);
+void GrAuditTrail::getBoundsByOpListID(OpInfo* outInfo, int opListID) {
+    this->copyOutFromOpList(outInfo, opListID);
 }
 
 void GrAuditTrail::fullReset() {
     SkASSERT(fEnabled);
-    fOpsTask.reset();
+    fOpList.reset();
     fIDLookup.reset();
     // free all client ops
     fClientIDLookup.foreach ([](const int&, Ops** ops) { delete *ops; });
@@ -152,7 +152,7 @@
 
 void GrAuditTrail::toJson(SkJSONWriter& writer) const {
     writer.beginObject();
-    JsonifyTArray(writer, "Ops", fOpsTask);
+    JsonifyTArray(writer, "Ops", fOpList);
     writer.endObject();
 }
 
@@ -178,7 +178,7 @@
     writer.beginObject();
     writer.appendString("Name", fName.c_str());
     writer.appendS32("ClientID", fClientID);
-    writer.appendS32("OpsTaskID", fOpsTaskID);
+    writer.appendS32("OpListID", fOpListID);
     writer.appendS32("ChildID", fChildID);
     skrect_to_json(writer, "Bounds", fBounds);
     if (fStackTrace.count()) {
diff --git a/src/gpu/GrAuditTrail.h b/src/gpu/GrAuditTrail.h
index dd6549e..74f47d3 100644
--- a/src/gpu/GrAuditTrail.h
+++ b/src/gpu/GrAuditTrail.h
@@ -51,12 +51,12 @@
         GrAuditTrail* fAuditTrail;
     };
 
-    class AutoManageOpsTask {
+    class AutoManageOpList {
     public:
-        AutoManageOpsTask(GrAuditTrail* auditTrail)
+        AutoManageOpList(GrAuditTrail* auditTrail)
                 : fAutoEnable(auditTrail), fAuditTrail(auditTrail) {}
 
-        ~AutoManageOpsTask() { fAuditTrail->fullReset(); }
+        ~AutoManageOpList() { fAuditTrail->fullReset(); }
 
     private:
         AutoEnable fAutoEnable;
@@ -116,7 +116,7 @@
     };
 
     void getBoundsByClientID(SkTArray<OpInfo>* outInfo, int clientID);
-    void getBoundsByOpsTaskID(OpInfo* outInfo, int opsTaskID);
+    void getBoundsByOpListID(OpInfo* outInfo, int opListID);
 
     void fullReset();
 
@@ -130,7 +130,7 @@
         SkTArray<SkString> fStackTrace;
         SkRect fBounds;
         int fClientID;
-        int fOpsTaskID;
+        int fOpListID;
         int fChildID;
     };
     typedef SkTArray<std::unique_ptr<Op>, true> OpPool;
@@ -145,9 +145,9 @@
         Ops                            fChildren;
         const GrSurfaceProxy::UniqueID fProxyUniqueID;
     };
-    typedef SkTArray<std::unique_ptr<OpNode>, true> OpsTask;
+    typedef SkTArray<std::unique_ptr<OpNode>, true> OpList;
 
-    void copyOutFromOpsTask(OpInfo* outOpInfo, int opsTask);
+    void copyOutFromOpList(OpInfo* outOpInfo, int opListID);
 
     template <typename T>
     static void JsonifyTArray(SkJSONWriter& writer, const char* name, const T& array);
@@ -155,7 +155,7 @@
     OpPool fOpPool;
     SkTHashMap<uint32_t, int> fIDLookup;
     SkTHashMap<int, Ops*> fClientIDLookup;
-    OpsTask fOpsTask;
+    OpList fOpList;
     SkTArray<SkString> fCurrentStackTrace;
 
     // The client can pass in an optional client ID which we will use to mark the ops
diff --git a/src/gpu/GrClipStackClip.cpp b/src/gpu/GrClipStackClip.cpp
index 5d862d4..07470ce 100644
--- a/src/gpu/GrClipStackClip.cpp
+++ b/src/gpu/GrClipStackClip.cpp
@@ -245,10 +245,10 @@
         }
     }
 
-    // The opsTask ID must not be looked up until AFTER producing the clip mask (if any). That step
-    // can cause a flush or otherwise change which opstask our draw is going into.
-    uint32_t opsTaskID = renderTargetContext->getOpsTask()->uniqueID();
-    if (auto clipFPs = reducedClip.finishAndDetachAnalyticFPs(ccpr, opsTaskID)) {
+    // The opList ID must not be looked up until AFTER producing the clip mask (if any). That step
+    // can cause a flush or otherwise change which opList our draw is going into.
+    uint32_t opListID = renderTargetContext->getOpList()->uniqueID();
+    if (auto clipFPs = reducedClip.finishAndDetachAnalyticFPs(ccpr, opListID)) {
         out->addCoverageFP(std::move(clipFPs));
     }
 
diff --git a/src/gpu/GrCopyRenderTask.cpp b/src/gpu/GrCopyRenderTask.cpp
index 34af9a1..6c41370 100644
--- a/src/gpu/GrCopyRenderTask.cpp
+++ b/src/gpu/GrCopyRenderTask.cpp
@@ -54,7 +54,7 @@
 
 void GrCopyRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
     // This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
-    // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
+    // fEndOfOpListOpIndices will remain in sync), so we create a fake op# to capture the fact that
     // we read fSrcProxy and copy to fTarget.
     alloc->addInterval(fSrcProxy.get(), alloc->curOp(), alloc->curOp(),
                        GrResourceAllocator::ActualUse::kYes);
diff --git a/src/gpu/GrDDLContext.cpp b/src/gpu/GrDDLContext.cpp
index 4d14504..4cfdad4 100644
--- a/src/gpu/GrDDLContext.cpp
+++ b/src/gpu/GrDDLContext.cpp
@@ -52,7 +52,7 @@
             return false;
         }
 
-        // DDL contexts/drawing managers always sort the oplists and attempt to reduce opsTask
+        // DDL contexts/drawing managers always sort the oplists and attempt to reduce opList
         // splitting.
         this->setupDrawingManager(true, true);
 
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 75abb7f..3a5e750 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -95,13 +95,13 @@
     return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get();
 }
 
-void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) {
-    fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin());
+void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& opLists) {
+    fRenderTasks.push_back_n(opLists.count(), opLists.begin());
 }
 
-void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) {
-    SkASSERT(renderTasks->empty());
-    renderTasks->swap(fRenderTasks);
+void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* opLists) {
+    SkASSERT(opLists->empty());
+    opLists->swap(fRenderTasks);
 }
 
 void GrDrawingManager::RenderTaskDAG::prepForFlush() {
@@ -112,18 +112,18 @@
     }
 
 #ifdef SK_DEBUG
-    // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
-    // share the same backing GrSurfaceProxy it means the opsTask was artificially split.
+    // This block checks for any unnecessary splits in the opLists. If two sequential opLists
+    // share the same backing GrSurfaceProxy it means the opList was artificially split.
     if (fRenderTasks.count()) {
-        GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask();
+        GrRenderTargetOpList* prevOpList = fRenderTasks[0]->asRenderTargetOpList();
         for (int i = 1; i < fRenderTasks.count(); ++i) {
-            GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
+            GrRenderTargetOpList* curOpList = fRenderTasks[i]->asRenderTargetOpList();
 
-            if (prevOpsTask && curOpsTask) {
-                SkASSERT(prevOpsTask->fTarget.get() != curOpsTask->fTarget.get());
+            if (prevOpList && curOpList) {
+                SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get());
             }
 
-            prevOpsTask = curOpsTask;
+            prevOpList = curOpList;
         }
     }
 #endif
@@ -146,7 +146,7 @@
         // no renderTask should receive a dependency
         fRenderTasks[i]->makeClosed(*caps);
 
-        // We shouldn't need to do this, but it turns out some clients still hold onto opsTasks
+        // We shouldn't need to do this, but it turns out some clients still hold onto opLists
         // after a cleanup.
         // MDB TODO: is this still true?
         if (!fRenderTasks[i]->unique()) {
@@ -164,7 +164,7 @@
                                    const GrPathRendererChain::Options& optionsForPathRendererChain,
                                    const GrTextContext::Options& optionsForTextContext,
                                    bool sortRenderTasks,
-                                   bool reduceOpsTaskSplitting)
+                                   bool reduceOpListSplitting)
         : fContext(context)
         , fOptionsForPathRendererChain(optionsForPathRendererChain)
         , fOptionsForTextContext(optionsForTextContext)
@@ -173,7 +173,7 @@
         , fPathRendererChain(nullptr)
         , fSoftwarePathRenderer(nullptr)
         , fFlushing(false)
-        , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
+        , fReduceOpListSplitting(reduceOpListSplitting) {
 }
 
 void GrDrawingManager::cleanup() {
@@ -256,11 +256,11 @@
     auto resourceCache = direct->priv().getResourceCache();
 
     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
-    // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
-    // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
+    // to flush mid-draw. In that case, the SkGpuDevice's opLists won't be closed but need to be
+    // flushed anyway. Closing such opLists here will mean new ones will be created to replace them
     // if the SkGpuDevice(s) write to them again.
     fDAG.closeAll(fContext->priv().caps());
-    fActiveOpsTask = nullptr;
+    fActiveOpList = nullptr;
 
     fDAG.prepForFlush();
     if (!fCpuBufferCache) {
@@ -286,21 +286,21 @@
             onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
                                       fFlushingRenderTaskIDs.count(), &renderTargetContexts);
             for (const auto& rtc : renderTargetContexts) {
-                sk_sp<GrOpsTask> onFlushOpsTask = sk_ref_sp(rtc->getOpsTask());
-                if (!onFlushOpsTask) {
+                sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList());
+                if (!onFlushOpList) {
                     continue;   // Odd - but not a big deal
                 }
 #ifdef SK_DEBUG
                 // OnFlush callbacks are already invoked during flush, and are therefore expected to
                 // handle resource allocation & usage on their own. (No deferred or lazy proxies!)
-                onFlushOpsTask->visitProxies_debugOnly([](GrSurfaceProxy* p, GrMipMapped) {
+                onFlushOpList->visitProxies_debugOnly([](GrSurfaceProxy* p, GrMipMapped) {
                     SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
                     SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
                 });
 #endif
-                onFlushOpsTask->makeClosed(*fContext->priv().caps());
-                onFlushOpsTask->prepare(&flushState);
-                fOnFlushCBOpsTasks.push_back(std::move(onFlushOpsTask));
+                onFlushOpList->makeClosed(*fContext->priv().caps());
+                onFlushOpList->prepare(&flushState);
+                fOnFlushCBOpLists.push_back(std::move(onFlushOpList));
             }
             renderTargetContexts.reset();
         }
@@ -323,7 +323,7 @@
             if (fDAG.renderTask(i)) {
                 fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
             }
-            alloc.markEndOfOpsTask(i);
+            alloc.markEndOfOpList(i);
         }
         alloc.determineRecyclability();
 
@@ -354,7 +354,7 @@
 
 #ifdef SK_DEBUG
     for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
-        // If there are any remaining opsTaskss at this point, make sure they will not survive the
+        // If there are any remaining opLists at this point, make sure they will not survive the
         // flush. Otherwise we need to call endFlush() on them.
         // http://skbug.com/7111
         SkASSERT(!fDAG.renderTask(i) || fDAG.renderTask(i)->unique());
@@ -366,7 +366,7 @@
 #ifdef SK_DEBUG
     // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
     // When we move to partial flushes this assert will no longer be valid.
-    // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opsTasks
+    // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opLists
     // will be stored in the DDL's GrOpMemoryPools.
     GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool();
     opMemoryPool->isEmpty();
@@ -401,7 +401,7 @@
     SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
 
 #if GR_FLUSH_TIME_OP_SPEW
-    SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n",
+    SkDebugf("Flushing opLists: %d to %d out of [%d, %d]\n",
                             startIndex, stopIndex, 0, fDAG.numRenderTasks());
     for (int i = startIndex; i < stopIndex; ++i) {
         if (fDAG.renderTask(i)) {
@@ -435,12 +435,12 @@
     static constexpr int kMaxRenderTasksBeforeFlush = 100;
 
     // Execute the onFlush op lists first, if any.
-    for (sk_sp<GrOpsTask>& onFlushOpsTask : fOnFlushCBOpsTasks) {
-        if (!onFlushOpsTask->execute(flushState)) {
-            SkDebugf("WARNING: onFlushOpsTask failed to execute.\n");
+    for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) {
+        if (!onFlushOpList->execute(flushState)) {
+            SkDebugf("WARNING: onFlushOpList failed to execute.\n");
         }
-        SkASSERT(onFlushOpsTask->unique());
-        onFlushOpsTask = nullptr;
+        SkASSERT(onFlushOpList->unique());
+        onFlushOpList = nullptr;
         (*numRenderTasksExecuted)++;
         if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
             flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
@@ -448,7 +448,7 @@
             *numRenderTasksExecuted = 0;
         }
     }
-    fOnFlushCBOpsTasks.reset();
+    fOnFlushCBOpLists.reset();
 
     // Execute the normal op lists.
     for (int i = startIndex; i < stopIndex; ++i) {
@@ -547,7 +547,7 @@
 
     // no renderTask should receive a new command after this
     fDAG.closeAll(fContext->priv().caps());
-    fActiveOpsTask = nullptr;
+    fActiveOpList = nullptr;
 
     fDAG.swap(&ddl->fRenderTasks);
 
@@ -564,19 +564,19 @@
                                           GrRenderTargetProxy* newDest) {
     SkDEBUGCODE(this->validate());
 
-    if (fActiveOpsTask) {
+    if (fActiveOpList) {
         // This is  a temporary fix for the partial-MDB world. In that world we're not
-        // reordering so ops that (in the single opsTask world) would've just glommed onto the
-        // end of the single opsTask but referred to a far earlier RT need to appear in their
-        // own opsTask.
-        fActiveOpsTask->makeClosed(*fContext->priv().caps());
-        fActiveOpsTask = nullptr;
+        // reordering so ops that (in the single opList world) would've just glommed onto the
+        // end of the single opList but referred to a far earlier RT need to appear in their
+        // own opList.
+        fActiveOpList->makeClosed(*fContext->priv().caps());
+        fActiveOpList = nullptr;
     }
 
     this->addDDLTarget(newDest);
 
     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
-    // The lazy proxy that references it (in the copied opsTasks) will steal its GrTexture.
+    // The lazy proxy that references it (in the copied opLists) will steal its GrTexture.
     ddl->fLazyProxyData->fReplayDest = newDest;
 
     if (ddl->fPendingPaths.size()) {
@@ -592,30 +592,30 @@
 
 #ifdef SK_DEBUG
 void GrDrawingManager::validate() const {
-    if (fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
-        SkASSERT(!fActiveOpsTask);
+    if (fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
+        SkASSERT(!fActiveOpList);
     } else {
-        if (fActiveOpsTask) {
+        if (fActiveOpList) {
             SkASSERT(!fDAG.empty());
-            SkASSERT(!fActiveOpsTask->isClosed());
-            SkASSERT(fActiveOpsTask == fDAG.back());
+            SkASSERT(!fActiveOpList->isClosed());
+            SkASSERT(fActiveOpList == fDAG.back());
         }
 
         for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
-            if (fActiveOpsTask != fDAG.renderTask(i)) {
+            if (fActiveOpList != fDAG.renderTask(i)) {
                 SkASSERT(fDAG.renderTask(i)->isClosed());
             }
         }
 
         if (!fDAG.empty() && !fDAG.back()->isClosed()) {
-            SkASSERT(fActiveOpsTask == fDAG.back());
+            SkASSERT(fActiveOpList == fDAG.back());
         }
     }
 }
 #endif
 
 void GrDrawingManager::closeRenderTasksForNewRenderTask(GrSurfaceProxy* target) {
-    if (target && fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
+    if (target && fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
         // In this case we need to close all the renderTasks that rely on the current contents of
         // 'target'. That is bc we're going to update the content of the proxy so they need to be
         // split in case they use both the old and new content. (This is a bit of an overkill: they
@@ -624,43 +624,46 @@
         if (GrRenderTask* lastRenderTask = target->getLastRenderTask()) {
             lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
         }
-    } else if (fActiveOpsTask) {
+    } else if (fActiveOpList) {
         // This is  a temporary fix for the partial-MDB world. In that world we're not
-        // reordering so ops that (in the single opsTask world) would've just glommed onto the
-        // end of the single opsTask but referred to a far earlier RT need to appear in their
-        // own opsTask.
-        fActiveOpsTask->makeClosed(*fContext->priv().caps());
-        fActiveOpsTask = nullptr;
+        // reordering so ops that (in the single opList world) would've just glommed onto the
+        // end of the single opList but referred to a far earlier RT need to appear in their
+        // own opList.
+        fActiveOpList->makeClosed(*fContext->priv().caps());
+        fActiveOpList = nullptr;
     }
 }
 
-sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(sk_sp<GrRenderTargetProxy> rtp, bool managedOpsTask) {
+sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(sk_sp<GrRenderTargetProxy> rtp,
+                                                          bool managedOpList) {
     SkDEBUGCODE(this->validate());
     SkASSERT(fContext);
 
     this->closeRenderTasksForNewRenderTask(rtp.get());
 
-    sk_sp<GrOpsTask> opsTask(new GrOpsTask(fContext->priv().refOpMemoryPool(), rtp,
-                                           fContext->priv().auditTrail()));
-    SkASSERT(rtp->getLastRenderTask() == opsTask.get());
+    sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
+                                                        fContext->priv().refOpMemoryPool(),
+                                                        rtp,
+                                                        fContext->priv().auditTrail()));
+    SkASSERT(rtp->getLastRenderTask() == opList.get());
 
-    if (managedOpsTask) {
-        fDAG.add(opsTask);
+    if (managedOpList) {
+        fDAG.add(opList);
 
-        if (!fDAG.sortingRenderTasks() || !fReduceOpsTaskSplitting) {
-            fActiveOpsTask = opsTask.get();
+        if (!fDAG.sortingRenderTasks() || !fReduceOpListSplitting) {
+            fActiveOpList = opList.get();
         }
     }
 
     SkDEBUGCODE(this->validate());
-    return opsTask;
+    return opList;
 }
 
 GrRenderTask* GrDrawingManager::newTextureResolveRenderTask(
         sk_sp<GrTextureProxy> textureProxy, GrTextureResolveFlags flags, const GrCaps& caps) {
-    // Unlike in the "new opsTask" cases, we do not want to close the active opsTask, nor (if we are
-    // in sorting and opsTask reduction mode) the render tasks that depend on the proxy's current
-    // state. This is because those opsTasks can still receive new ops and because if they refer to
+    // Unlike in the "new opList" cases, we do not want to close the active opList, nor (if we are
+    // in sorting and opList reduction mode) the render tasks that depend on the proxy's current
+    // state. This is because those opLists can still receive new ops and because if they refer to
     // the mipmapped version of 'textureProxy', they will then come to depend on the render task
     // being created here.
     // NOTE: In either case, 'textureProxy' should already be closed at this point (i.e., its state
@@ -671,8 +674,8 @@
     SkASSERT(!previousTaskBeforeMipsResolve || previousTaskBeforeMipsResolve->isClosed());
     SkASSERT(textureProxy->getLastRenderTask() == textureResolveTask.get());
 
-    // Add the new textureResolveTask before the fActiveOpsTask (if not in
-    // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
+    // Add the new textureResolveTask before the fActiveOpList (if not in
+    // sorting/opList-splitting-reduction mode) because it will depend upon this resolve task.
     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
     return fDAG.addBeforeLast(std::move(textureResolveTask));
 }
@@ -702,7 +705,7 @@
     fDAG.add(std::move(task));
     // We have closed the previous active oplist but since a new oplist isn't being added there
     // shouldn't be an active one.
-    SkASSERT(!fActiveOpsTask);
+    SkASSERT(!fActiveOpList);
     SkDEBUGCODE(this->validate());
 }
 
@@ -729,7 +732,7 @@
     fDAG.add(std::move(task));
     // We have closed the previous active oplist but since a new oplist isn't being added there
     // shouldn't be an active one.
-    SkASSERT(!fActiveOpsTask);
+    SkASSERT(!fActiveOpList);
     SkDEBUGCODE(this->validate());
     return true;
 }
@@ -803,7 +806,7 @@
         GrColorType colorType,
         sk_sp<SkColorSpace> colorSpace,
         const SkSurfaceProps* surfaceProps,
-        bool managedOpsTask) {
+        bool managedOpList) {
     if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
         return nullptr;
     }
@@ -823,7 +826,7 @@
                                       colorType,
                                       std::move(colorSpace),
                                       surfaceProps,
-                                      managedOpsTask));
+                                      managedOpList));
 }
 
 std::unique_ptr<GrTextureContext> GrDrawingManager::makeTextureContext(
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index 25b0343..6b5fc1d 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -21,10 +21,11 @@
 class GrCoverageCountingPathRenderer;
 class GrOnFlushCallbackObject;
 class GrOpFlushState;
-class GrOpsTask;
+class GrOpList;
 class GrRecordingContext;
 class GrRenderTargetContext;
 class GrRenderTargetProxy;
+class GrRenderTargetOpList;
 class GrSoftwarePathRenderer;
 class GrTextureContext;
 class SkDeferredDisplayList;
@@ -39,15 +40,15 @@
                                                                    GrColorType,
                                                                    sk_sp<SkColorSpace>,
                                                                    const SkSurfaceProps*,
-                                                                   bool managedOpsTask = true);
+                                                                   bool managedOpList = true);
     std::unique_ptr<GrTextureContext> makeTextureContext(sk_sp<GrSurfaceProxy>,
                                                          GrColorType,
                                                          SkAlphaType,
                                                          sk_sp<SkColorSpace>);
 
-    // A managed opsTask is controlled by the drawing manager (i.e., sorted & flushed with the
+    // A managed opList is controlled by the drawing manager (i.e., sorted & flushed with the
     // others). An unmanaged one is created and used by the onFlushCallback.
-    sk_sp<GrOpsTask> newOpsTask(sk_sp<GrRenderTargetProxy>, bool managedOpsTask);
+    sk_sp<GrRenderTargetOpList> newRTOpList(sk_sp<GrRenderTargetProxy>, bool managedOpList);
 
     // Create a new, specialized, render task that will regenerate mipmap levels and/or resolve
     // MSAA (depending on GrTextureResolveFlags). This method will add the new render task to the
@@ -121,8 +122,8 @@
         ~RenderTaskDAG();
 
         // Currently, when explicitly allocating resources, this call will topologically sort the
-        // GrRenderTasks.
-        // MDB TODO: remove once incremental GrRenderTask sorting is enabled
+        // opLists.
+        // MDB TODO: remove once incremental opList sorting is enabled
         void prepForFlush();
 
         void closeAll(const GrCaps* caps);
@@ -134,10 +135,10 @@
 
         void reset();
 
-        // These calls forceably remove a GrRenderTask from the DAG. They are problematic bc they
-        // just remove the GrRenderTask but don't cleanup any refering pointers (i.e., dependency
-        // pointers in the DAG). They work right now bc they are only called at flush time, after
-        // the topological sort is complete (so the dangling pointers aren't used).
+        // These calls forceably remove an opList from the DAG. They are problematic bc they just
+        // remove the opList but don't cleanup any refering pointers (i.e., dependency pointers
+        // in the DAG). They work right now bc they are only called at flush time, after the
+        // topological sort is complete (so the dangling pointers aren't used).
         void removeRenderTask(int index);
         void removeRenderTasks(int startIndex, int stopIndex);
 
@@ -168,18 +169,18 @@
     GrDrawingManager(GrRecordingContext*, const GrPathRendererChain::Options&,
                      const GrTextContext::Options&,
                      bool sortRenderTasks,
-                     bool reduceOpsTaskSplitting);
+                     bool reduceOpListSplitting);
 
     bool wasAbandoned() const;
 
     void cleanup();
 
-    // Closes the target's dependent render tasks (or, if not in sorting/opsTask-splitting-reduction
-    // mode, closes fActiveOpsTask) in preparation for us opening a new opsTask that will write to
+    // Closes the target's dependent render tasks (or, if not in sorting/opList-splitting-reduction
+    // mode, closes fActiveOpList) in preparation for us opening a new opList that will write to
     // 'target'.
     void closeRenderTasksForNewRenderTask(GrSurfaceProxy* target);
 
-    // return true if any GrRenderTasks were actually executed; false otherwise
+    // return true if any opLists were actually executed; false otherwise
     bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*,
                             int* numRenderTasksExecuted);
 
@@ -208,11 +209,11 @@
     sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
 
     RenderTaskDAG                     fDAG;
-    GrOpsTask*                        fActiveOpsTask = nullptr;
-    // These are the IDs of the opsTask currently being flushed (in internalFlush)
+    GrOpList*                         fActiveOpList = nullptr;
+    // These are the IDs of the opLists currently being flushed (in internalFlush)
     SkSTArray<8, uint32_t, true>      fFlushingRenderTaskIDs;
-    // These are the new opsTask generated by the onFlush CBs
-    SkSTArray<8, sk_sp<GrOpsTask>>    fOnFlushCBOpsTasks;
+    // These are the new opLists generated by the onFlush CBs
+    SkSTArray<8, sk_sp<GrOpList>>     fOnFlushCBOpLists;
 
     std::unique_ptr<GrTextContext>    fTextContext;
 
@@ -221,7 +222,7 @@
 
     GrTokenTracker                    fTokenTracker;
     bool                              fFlushing;
-    bool                              fReduceOpsTaskSplitting;
+    bool                              fReduceOpListSplitting;
 
     SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
 
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index 71e1557..84d9c41 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -289,7 +289,7 @@
                             GrGpuBuffer* transferBuffer, size_t offset);
 
     // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
-    // take place at higher levels and this function implement faster copy paths. The rect
+    // take place at the GrOpList level and this function implement faster copy paths. The rect
     // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
     // src/dst bounds and non-empty. They must also be in their exact device space coords, including
     // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
@@ -310,14 +310,14 @@
         return fSamplePatternDictionary.retrieveSampleLocations(samplePatternKey);
     }
 
-    // Returns a GrGpuRTCommandBuffer which GrOpsTasks send draw commands to instead of directly
+    // Returns a GrGpuRTCommandBuffer which GrOpLists send draw commands to instead of directly
     // to the Gpu object. The 'bounds' rect is the content rect of the destination.
     virtual GrGpuRTCommandBuffer* getCommandBuffer(
             GrRenderTarget*, GrSurfaceOrigin, const SkRect& bounds,
             const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
             const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) = 0;
 
-    // Returns a GrGpuTextureCommandBuffer which GrOpsTasks send texture commands to instead of
+    // Returns a GrGpuTextureCommandBuffer which GrOpLists send texture commands to instead of
     // directly to the Gpu object.
     virtual GrGpuTextureCommandBuffer* getCommandBuffer(GrTexture*, GrSurfaceOrigin) = 0;
 
diff --git a/src/gpu/GrLegacyDirectContext.cpp b/src/gpu/GrLegacyDirectContext.cpp
index dbdcd5c..ffd224d 100644
--- a/src/gpu/GrLegacyDirectContext.cpp
+++ b/src/gpu/GrLegacyDirectContext.cpp
@@ -28,9 +28,9 @@
 #endif
 
 #ifdef SK_DISABLE_REDUCE_OPLIST_SPLITTING
-static const bool kDefaultReduceOpsTaskSplitting = false;
+static const bool kDefaultReduceOpListSplitting = false;
 #else
-static const bool kDefaultReduceOpsTaskSplitting = false;
+static const bool kDefaultReduceOpListSplitting = false;
 #endif
 
 class GrLegacyDirectContext : public GrContext {
@@ -82,14 +82,14 @@
             return false;
         }
 
-        bool reduceOpsTaskSplitting = kDefaultReduceOpsTaskSplitting;
+        bool reduceOpListSplitting = kDefaultReduceOpListSplitting;
         if (GrContextOptions::Enable::kNo == this->options().fReduceOpListSplitting) {
-            reduceOpsTaskSplitting = false;
+            reduceOpListSplitting = false;
         } else if (GrContextOptions::Enable::kYes == this->options().fReduceOpListSplitting) {
-            reduceOpsTaskSplitting = true;
+            reduceOpListSplitting = true;
         }
 
-        this->setupDrawingManager(true, reduceOpsTaskSplitting);
+        this->setupDrawingManager(true, reduceOpListSplitting);
 
         SkASSERT(this->caps());
 
diff --git a/src/gpu/GrOnFlushResourceProvider.h b/src/gpu/GrOnFlushResourceProvider.h
index f26f8d3..cc36c10 100644
--- a/src/gpu/GrOnFlushResourceProvider.h
+++ b/src/gpu/GrOnFlushResourceProvider.h
@@ -15,7 +15,9 @@
 #include "src/gpu/GrResourceProvider.h"
 
 class GrDrawingManager;
+class GrOpList;
 class GrOnFlushResourceProvider;
+class GrRenderTargetOpList;
 class GrRenderTargetContext;
 class GrSurfaceProxy;
 class SkColorSpace;
@@ -31,11 +33,11 @@
 
     /*
      * The onFlush callback allows subsystems (e.g., text, path renderers) to create atlases
-     * for a specific flush. All the GrOpsTask IDs required for the flush are passed into the
+     * for a specific flush. All the GrOpList IDs required for the flush are passed into the
      * callback. The callback should return the render target contexts used to render the atlases
      * in 'results'.
      */
-    virtual void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
+    virtual void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
                           SkTArray<std::unique_ptr<GrRenderTargetContext>>* results) = 0;
 
     /**
@@ -43,7 +45,7 @@
      * released. startTokenForNextFlush can be used to track resources used in the current flush.
      */
     virtual void postFlush(GrDeferredUploadToken startTokenForNextFlush,
-                           const uint32_t* opsTaskIDs, int numOpsTaskIDs) {}
+                           const uint32_t* opListIDs, int numOpListIDs) {}
 
     /**
      * Tells the callback owner to hold onto this object when freeing GPU resources
diff --git a/src/gpu/GrOpFlushState.h b/src/gpu/GrOpFlushState.h
index c9178e2..fa3778b 100644
--- a/src/gpu/GrOpFlushState.h
+++ b/src/gpu/GrOpFlushState.h
@@ -23,7 +23,7 @@
 class GrGpuRTCommandBuffer;
 class GrResourceProvider;
 
-/** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpsTask flush. */
+/** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpList flush. */
 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target {
 public:
     // vertexSpace and indexSpace may either be null or an alloation of size
@@ -47,7 +47,7 @@
             const GrUserStencilSettings* = &GrUserStencilSettings::kUnused);
 
     GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; }
-    // Helper function used by Ops that are only called via OpsTasks
+    // Helper function used by Ops that are only called via RenderTargetOpLists
     GrGpuRTCommandBuffer* rtCommandBuffer();
     void setCommandBuffer(GrGpuCommandBuffer* buffer) { fCommandBuffer = buffer; }
 
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
new file mode 100644
index 0000000..2035dd5
--- /dev/null
+++ b/src/gpu/GrOpList.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpList.h"
+
+GrOpList::GrOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
+                   sk_sp<GrSurfaceProxy> surfaceProxy,
+                   GrAuditTrail* auditTrail)
+        : GrRenderTask(std::move(surfaceProxy))
+        , fOpMemoryPool(std::move(opMemoryPool))
+        , fAuditTrail(auditTrail) {
+    SkASSERT(fOpMemoryPool);
+}
+
+GrOpList::~GrOpList() {
+}
+
+void GrOpList::endFlush() {
+    if (fTarget && this == fTarget->getLastRenderTask()) {
+        fTarget->setLastRenderTask(nullptr);
+    }
+
+    fTarget.reset();
+    fDeferredProxies.reset();
+    fAuditTrail = nullptr;
+}
diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h
new file mode 100644
index 0000000..a4ee09e
--- /dev/null
+++ b/src/gpu/GrOpList.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOpList_DEFINED
+#define GrOpList_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTDArray.h"
+#include "src/gpu/GrRenderTask.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class GrAuditTrail;
+class GrOpMemoryPool;
+class GrGpuBuffer;
+
+class GrOpList : public GrRenderTask {
+public:
+    GrOpList(sk_sp<GrOpMemoryPool>, sk_sp<GrSurfaceProxy>, GrAuditTrail*);
+    ~GrOpList() override;
+
+    void endFlush() override;
+
+protected:
+    // This is a backpointer to the GrOpMemoryPool that holds the memory for this opLists' ops.
+    // In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
+    // constituent opLists survive.
+    sk_sp<GrOpMemoryPool> fOpMemoryPool;
+    GrAuditTrail*         fAuditTrail;
+};
+
+#endif
diff --git a/src/gpu/GrPathRenderer.h b/src/gpu/GrPathRenderer.h
index a1d0741..26f7c7f 100644
--- a/src/gpu/GrPathRenderer.h
+++ b/src/gpu/GrPathRenderer.h
@@ -28,7 +28,7 @@
 class SkPath;
 
 /**
- *  Base class for drawing paths into a GrOpsTask.
+ *  Base class for drawing paths into a GrOpList.
  */
 class GrPathRenderer : public SkRefCnt {
 public:
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index 3e5e18d..1de2199 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -11,6 +11,7 @@
 #include "src/gpu/GrCaps.h"
 #include "src/gpu/GrGpu.h"
 #include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetOpList.h"
 #include "src/gpu/GrXferProcessor.h"
 
 #include "src/gpu/ops/GrOp.h"
diff --git a/src/gpu/GrRecordingContext.cpp b/src/gpu/GrRecordingContext.cpp
index 9043f6a..58544ed 100644
--- a/src/gpu/GrRecordingContext.cpp
+++ b/src/gpu/GrRecordingContext.cpp
@@ -66,7 +66,7 @@
     return true;
 }
 
-void GrRecordingContext::setupDrawingManager(bool sortOpsTasks, bool reduceOpsTaskSplitting) {
+void GrRecordingContext::setupDrawingManager(bool sortOpLists, bool reduceOpListSplitting) {
     GrPathRendererChain::Options prcOptions;
     prcOptions.fAllowPathMaskCaching = this->options().fAllowPathMaskCaching;
 #if GR_TEST_UTILS
@@ -83,7 +83,7 @@
     if (!this->proxyProvider()->renderingDirectly()) {
         // DDL TODO: remove this crippling of the path renderer chain
         // Disable the small path renderer bc of the proxies in the atlas. They need to be
-        // unified when the opsTasks are added back to the destination drawing manager.
+        // unified when the opLists are added back to the destination drawing manager.
         prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall;
     }
 
@@ -100,8 +100,8 @@
     fDrawingManager.reset(new GrDrawingManager(this,
                                                prcOptions,
                                                textContextOptions,
-                                               sortOpsTasks,
-                                               reduceOpsTaskSplitting));
+                                               sortOpLists,
+                                               reduceOpListSplitting));
 }
 
 void GrRecordingContext::abandonContext() {
diff --git a/src/gpu/GrReducedClip.cpp b/src/gpu/GrReducedClip.cpp
index 9224e00..e485992 100644
--- a/src/gpu/GrReducedClip.cpp
+++ b/src/gpu/GrReducedClip.cpp
@@ -656,7 +656,7 @@
 
     if (fCCPRClipPaths.count() < fMaxCCPRClipPaths && GrAA::kYes == aa) {
         // Set aside CCPR paths for later. We will create their clip FPs once we know the ID of the
-        // opsTask they will operate in.
+        // opList they will operate in.
         SkPath& ccprClipPath = fCCPRClipPaths.push_back(deviceSpacePath);
         if (Invert::kYes == invert) {
             ccprClipPath.toggleInverseFillType();
@@ -972,7 +972,7 @@
 }
 
 std::unique_ptr<GrFragmentProcessor> GrReducedClip::finishAndDetachAnalyticFPs(
-        GrCoverageCountingPathRenderer* ccpr, uint32_t opsTaskID) {
+        GrCoverageCountingPathRenderer* ccpr, uint32_t opListID) {
     // Make sure finishAndDetachAnalyticFPs hasn't been called already.
     SkDEBUGCODE(for (const auto& fp : fAnalyticFPs) { SkASSERT(fp); })
 
@@ -981,7 +981,7 @@
         for (const SkPath& ccprClipPath : fCCPRClipPaths) {
             SkASSERT(ccpr);
             SkASSERT(fHasScissor);
-            auto fp = ccpr->makeClipProcessor(opsTaskID, ccprClipPath, fScissor, *fCaps);
+            auto fp = ccpr->makeClipProcessor(opListID, ccprClipPath, fScissor, *fCaps);
             fAnalyticFPs.push_back(std::move(fp));
         }
         fCCPRClipPaths.reset();
diff --git a/src/gpu/GrReducedClip.h b/src/gpu/GrReducedClip.h
index ca95bd0..25b05df 100644
--- a/src/gpu/GrReducedClip.h
+++ b/src/gpu/GrReducedClip.h
@@ -88,16 +88,16 @@
     int numAnalyticFPs() const { return fAnalyticFPs.count() + fCCPRClipPaths.count(); }
 
     /**
-     * Called once the client knows the ID of the opsTask that the clip FPs will operate in. This
-     * method finishes any outstanding work that was waiting for the opsTask ID, then detaches and
+     * Called once the client knows the ID of the opList that the clip FPs will operate in. This
+     * method finishes any outstanding work that was waiting for the opList ID, then detaches and
      * returns this class's list of FPs that complete the clip.
      *
      * NOTE: this must be called AFTER producing the clip mask (if any) because draw calls on
      * the render target context, surface allocations, and even switching render targets (pre MDB)
-     * may cause flushes or otherwise change which opsTask the actual draw is going into.
+     * may cause flushes or otherwise change which opList the actual draw is going into.
      */
     std::unique_ptr<GrFragmentProcessor> finishAndDetachAnalyticFPs(
-            GrCoverageCountingPathRenderer*, uint32_t opsTaskID);
+            GrCoverageCountingPathRenderer*, uint32_t opListID);
 
 private:
     void walkStack(const SkClipStack&, const SkRect& queryBounds);
@@ -145,7 +145,7 @@
     uint32_t fMaskGenID;
     bool fMaskRequiresAA;
     SkSTArray<4, std::unique_ptr<GrFragmentProcessor>> fAnalyticFPs;
-    SkSTArray<4, SkPath> fCCPRClipPaths; // Will convert to FPs once we have an opsTask ID for CCPR.
+    SkSTArray<4, SkPath> fCCPRClipPaths; // Will convert to FPs once we have an opList ID for CCPR.
 };
 
 #endif
diff --git a/src/gpu/GrRenderTarget.cpp b/src/gpu/GrRenderTarget.cpp
index 9f915e5..2942427 100644
--- a/src/gpu/GrRenderTarget.cpp
+++ b/src/gpu/GrRenderTarget.cpp
@@ -13,6 +13,7 @@
 #include "src/gpu/GrContextPriv.h"
 #include "src/gpu/GrGpu.h"
 #include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetOpList.h"
 #include "src/gpu/GrRenderTargetPriv.h"
 #include "src/gpu/GrSamplePatternDictionary.h"
 #include "src/gpu/GrStencilAttachment.h"
diff --git a/src/gpu/GrRenderTarget.h b/src/gpu/GrRenderTarget.h
index 4e59422..7a6f44c 100644
--- a/src/gpu/GrRenderTarget.h
+++ b/src/gpu/GrRenderTarget.h
@@ -12,6 +12,7 @@
 #include "include/gpu/GrSurface.h"
 
 class GrCaps;
+class GrRenderTargetOpList;
 class GrRenderTargetPriv;
 class GrStencilAttachment;
 class GrBackendRenderTarget;
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index 862b79c..ff9cf20 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -29,6 +29,7 @@
 #include "src/gpu/GrFixedClip.h"
 #include "src/gpu/GrGpuResourcePriv.h"
 #include "src/gpu/GrMemoryPool.h"
+#include "src/gpu/GrOpList.h"
 #include "src/gpu/GrPathRenderer.h"
 #include "src/gpu/GrRecordingContextPriv.h"
 #include "src/gpu/GrRenderTarget.h"
@@ -135,21 +136,21 @@
     GrDrawingManager* fDrawingManager;
 };
 
-// In MDB mode the reffing of the 'getLastOpsTask' call's result allows in-progress
-// GrOpsTask to be picked up and added to by renderTargetContexts lower in the call
-// stack. When this occurs with a closed GrOpsTask, a new one will be allocated
-// when the renderTargetContext attempts to use it (via getOpsTask).
+// In MDB mode the reffing of the 'getLastOpList' call's result allows in-progress
+// GrOpLists to be picked up and added to by renderTargetContexts lower in the call
+// stack. When this occurs with a closed GrOpList, a new one will be allocated
+// when the renderTargetContext attempts to use it (via getOpList).
 GrRenderTargetContext::GrRenderTargetContext(GrRecordingContext* context,
                                              sk_sp<GrRenderTargetProxy> rtp,
                                              GrColorType colorType,
                                              sk_sp<SkColorSpace> colorSpace,
                                              const SkSurfaceProps* surfaceProps,
-                                             bool managedOpsTask)
+                                             bool managedOpList)
         : GrSurfaceContext(context, colorType, kPremul_SkAlphaType, std::move(colorSpace))
         , fRenderTargetProxy(std::move(rtp))
-        , fOpsTask(sk_ref_sp(fRenderTargetProxy->getLastOpsTask()))
+        , fOpList(sk_ref_sp(fRenderTargetProxy->getLastRenderTargetOpList()))
         , fSurfaceProps(SkSurfacePropsCopyOrDefault(surfaceProps))
-        , fManagedOpsTask(managedOpsTask) {
+        , fManagedOpList(managedOpList) {
     fTextTarget.reset(new TextTarget(this));
     SkDEBUGCODE(this->validate();)
 }
@@ -159,8 +160,8 @@
     SkASSERT(fRenderTargetProxy);
     fRenderTargetProxy->validate(fContext);
 
-    if (fOpsTask && !fOpsTask->isClosed()) {
-        SkASSERT(fRenderTargetProxy->getLastRenderTask() == fOpsTask.get());
+    if (fOpList && !fOpList->isClosed()) {
+        SkASSERT(fRenderTargetProxy->getLastRenderTask() == fOpList.get());
     }
 }
 #endif
@@ -200,15 +201,19 @@
     return GrMipMapped::kNo;
 }
 
-GrOpsTask* GrRenderTargetContext::getOpsTask() {
+GrRenderTargetOpList* GrRenderTargetContext::getRTOpList() {
     ASSERT_SINGLE_OWNER
     SkDEBUGCODE(this->validate();)
 
-    if (!fOpsTask || fOpsTask->isClosed()) {
-        fOpsTask = this->drawingManager()->newOpsTask(fRenderTargetProxy, fManagedOpsTask);
+    if (!fOpList || fOpList->isClosed()) {
+        fOpList = this->drawingManager()->newRTOpList(fRenderTargetProxy, fManagedOpList);
     }
 
-    return fOpsTask.get();
+    return fOpList.get();
+}
+
+GrOpList* GrRenderTargetContext::getOpList() {
+    return this->getRTOpList();
 }
 
 void GrRenderTargetContext::drawGlyphRunList(
@@ -239,7 +244,7 @@
 
     AutoCheckFlush acf(this->drawingManager());
 
-    this->getOpsTask()->discard();
+    this->getRTOpList()->discard();
 }
 
 void GrRenderTargetContext::clear(const SkIRect* rect,
@@ -295,16 +300,16 @@
     }
 
     if (isFull) {
-        GrOpsTask* opsTask = this->getOpsTask();
-        if (opsTask->resetForFullscreenClear(this->canDiscardPreviousOpsOnFullClear()) &&
+        GrRenderTargetOpList* opList = this->getRTOpList();
+        if (opList->resetForFullscreenClear(this->canDiscardPreviousOpsOnFullClear()) &&
             !this->caps()->performColorClearsAsDraws()) {
             // The op list was emptied and native clears are allowed, so just use the load op
-            opsTask->setColorLoadOp(GrLoadOp::kClear, color);
+            opList->setColorLoadOp(GrLoadOp::kClear, color);
             return;
         } else {
             // Will use an op for the clear, reset the load op to discard since the op will
             // blow away the color buffer contents
-            opsTask->setColorLoadOp(GrLoadOp::kDiscard);
+            opList->setColorLoadOp(GrLoadOp::kDiscard);
         }
 
         // Must add an op to the list (either because we couldn't use a load op, or because the
@@ -364,7 +369,7 @@
         }
     }
 
-    // TODO: in a post-MDB world this should be handled at the OpsTask level.
+    // TODO: in a post-MDB world this should be handled at the OpList level.
     // This makes sure to always add an op to the list, instead of marking the clear as a load op.
     // This code follows very similar logic to internalClear() below, but critical differences are
     // highlighted in line related to absClear()'s unique behavior.
@@ -389,9 +394,9 @@
         }
     } else {
         // Reset the oplist like in internalClear(), but do not rely on a load op for the clear
-        fRenderTargetContext->getOpsTask()->resetForFullscreenClear(
+        fRenderTargetContext->getRTOpList()->resetForFullscreenClear(
                 fRenderTargetContext->canDiscardPreviousOpsOnFullClear());
-        fRenderTargetContext->getOpsTask()->setColorLoadOp(GrLoadOp::kDiscard);
+        fRenderTargetContext->getRTOpList()->setColorLoadOp(GrLoadOp::kDiscard);
 
         if (fRenderTargetContext->caps()->performColorClearsAsDraws()) {
             // This draws a quad covering the worst case dimensions instead of just the logical
@@ -784,11 +789,11 @@
             *fRenderTargetContext->caps());
 }
 
-GrOpsTask::CanDiscardPreviousOps GrRenderTargetContext::canDiscardPreviousOpsOnFullClear(
+GrRenderTargetOpList::CanDiscardPreviousOps GrRenderTargetContext::canDiscardPreviousOpsOnFullClear(
         ) const {
 #if GR_TEST_UTILS
     if (fPreserveOpsOnFullClear_TestingOnly) {
-        return GrOpsTask::CanDiscardPreviousOps::kNo;
+        return GrRenderTargetOpList::CanDiscardPreviousOps::kNo;
     }
 #endif
     // Regardless of how the clear is implemented (native clear or a fullscreen quad), all prior ops
@@ -797,7 +802,7 @@
     // Although the clear will ignore the stencil buffer, following draw ops may not so we can't get
     // rid of all the preceding ops. Beware! If we ever add any ops that have a side effect beyond
     // modifying the stencil buffer we will need a more elaborate tracking system (skbug.com/7002).
-    return GrOpsTask::CanDiscardPreviousOps(!fNumStencilSamples);
+    return GrRenderTargetOpList::CanDiscardPreviousOps(!fNumStencilSamples);
 }
 
 void GrRenderTargetContext::setNeedsStencil(bool multisampled) {
@@ -830,7 +835,7 @@
             // code note when the instantiated stencil buffer is already clear and skip the clear
             // altogether. And on tilers, loading the stencil buffer cleared is even faster than
             // preserving the previous contents.
-            this->getOpsTask()->setStencilLoadOp(GrLoadOp::kClear);
+            this->getRTOpList()->setStencilLoadOp(GrLoadOp::kClear);
         }
     }
 }
@@ -2002,7 +2007,7 @@
                 kAdopt_GrWrapOwnership);
         std::unique_ptr<GrOp> waitOp(GrSemaphoreOp::MakeWait(fContext, std::move(sema),
                                                              fRenderTargetProxy.get()));
-        this->getOpsTask()->addWaitOp(
+        this->getRTOpList()->addWaitOp(
                 std::move(waitOp), GrTextureResolveManager(this->drawingManager()), *this->caps());
     }
     return true;
@@ -2279,7 +2284,7 @@
 }
 
 void GrRenderTargetContext::addOp(std::unique_ptr<GrOp> op) {
-    this->getOpsTask()->addOp(
+    this->getRTOpList()->addOp(
             std::move(op), GrTextureResolveManager(this->drawingManager()), *this->caps());
 }
 
@@ -2335,12 +2340,12 @@
     }
 
     op->setClippedBounds(bounds);
-    auto opsTask = this->getOpsTask();
+    auto opList = this->getRTOpList();
     if (willAddFn) {
-        willAddFn(op.get(), opsTask->uniqueID());
+        willAddFn(op.get(), opList->uniqueID());
     }
-    opsTask->addDrawOp(std::move(op), analysis, std::move(appliedClip), dstProxy,
-                       GrTextureResolveManager(this->drawingManager()), *this->caps());
+    opList->addDrawOp(std::move(op), analysis, std::move(appliedClip), dstProxy,
+                      GrTextureResolveManager(this->drawingManager()), *this->caps());
 }
 
 bool GrRenderTargetContext::setupDstProxy(const GrClip& clip, const GrOp& op,
diff --git a/src/gpu/GrRenderTargetContext.h b/src/gpu/GrRenderTargetContext.h
index 10094ec..aa3281d 100644
--- a/src/gpu/GrRenderTargetContext.h
+++ b/src/gpu/GrRenderTargetContext.h
@@ -14,8 +14,8 @@
 #include "include/core/SkSurface.h"
 #include "include/core/SkSurfaceProps.h"
 #include "include/private/GrTypesPriv.h"
-#include "src/gpu/GrOpsTask.h"
 #include "src/gpu/GrPaint.h"
+#include "src/gpu/GrRenderTargetOpList.h"
 #include "src/gpu/GrRenderTargetProxy.h"
 #include "src/gpu/GrSurfaceContext.h"
 #include "src/gpu/GrXferProcessor.h"
@@ -502,12 +502,12 @@
 #if GR_TEST_UTILS
     bool testingOnly_IsInstantiated() const { return fRenderTargetProxy->isInstantiated(); }
     void testingOnly_SetPreserveOpsOnFullClear() { fPreserveOpsOnFullClear_TestingOnly = true; }
-    GrOpsTask* testingOnly_PeekLastOpsTask() { return fOpsTask.get(); }
+    GrRenderTargetOpList* testingOnly_PeekLastOpList() { return fOpList.get(); }
 #endif
 
 protected:
     GrRenderTargetContext(GrRecordingContext*, sk_sp<GrRenderTargetProxy>, GrColorType,
-                          sk_sp<SkColorSpace>, const SkSurfaceProps*, bool managedOpsTask = true);
+                          sk_sp<SkColorSpace>, const SkSurfaceProps*, bool managedOpList = true);
 
     SkDEBUGCODE(void validate() const override;)
 
@@ -518,7 +518,7 @@
     GrAAType chooseAAType(GrAA);
 
     friend class GrAtlasTextBlob;               // for access to add[Mesh]DrawOp
-    friend class GrClipStackClip;               // for access to getOpsTask
+    friend class GrClipStackClip;               // for access to getOpList
 
     friend class GrDrawingManager; // for ctor
     friend class GrRenderTargetContextPriv;
@@ -541,7 +541,7 @@
                              std::unique_ptr<GrFragmentProcessor>,
                              sk_sp<GrTextureProxy>);
 
-    GrOpsTask::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const;
+    GrRenderTargetOpList::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const;
     void setNeedsStencil(bool multisampled);
 
     void internalClear(const GrFixedClip&, const SkPMColor4f&, CanClearFullscreen);
@@ -603,7 +603,7 @@
     void addOp(std::unique_ptr<GrOp>);
 
     // Allows caller of addDrawOp to know which op list an op will be added to.
-    using WillAddOpFn = void(GrOp*, uint32_t opsTaskID);
+    using WillAddOpFn = void(GrOp*, uint32_t opListID);
     // These perform processing specific to GrDrawOp-derived ops before recording them into an
     // op list. Before adding the op to an op list the WillAddOpFn is called. Note that it
     // will not be called in the event that the op is discarded. Moreover, the op may merge into
@@ -621,17 +621,18 @@
     void asyncReadPixels(const SkIRect& rect, SkColorType colorType, ReadPixelsCallback callback,
                          ReadPixelsContext context);
 
-    GrOpsTask* getOpsTask();
+    GrRenderTargetOpList* getRTOpList();
+    GrOpList* getOpList();
 
     std::unique_ptr<GrTextTarget> fTextTarget;
     sk_sp<GrRenderTargetProxy> fRenderTargetProxy;
 
-    // In MDB-mode the GrOpsTask can be closed by some other renderTargetContext that has picked
-    // it up. For this reason, the GrOpsTask should only ever be accessed via 'getOpsTask'.
-    sk_sp<GrOpsTask> fOpsTask;
+    // In MDB-mode the GrOpList can be closed by some other renderTargetContext that has picked
+    // it up. For this reason, the GrOpList should only ever be accessed via 'getOpList'.
+    sk_sp<GrRenderTargetOpList> fOpList;
 
     SkSurfaceProps fSurfaceProps;
-    bool fManagedOpsTask;
+    bool fManagedOpList;
 
     int fNumStencilSamples = 0;
 #if GR_TEST_UTILS
diff --git a/src/gpu/GrRenderTargetContextPriv.h b/src/gpu/GrRenderTargetContextPriv.h
index 1440ec2..f816ea7 100644
--- a/src/gpu/GrRenderTargetContextPriv.h
+++ b/src/gpu/GrRenderTargetContextPriv.h
@@ -8,9 +8,9 @@
 #ifndef GrRenderTargetContextPriv_DEFINED
 #define GrRenderTargetContextPriv_DEFINED
 
-#include "src/gpu/GrOpsTask.h"
 #include "src/gpu/GrPathRendering.h"
 #include "src/gpu/GrRenderTargetContext.h"
+#include "src/gpu/GrRenderTargetOpList.h"
 
 class GrFixedClip;
 class GrHardClip;
@@ -27,20 +27,20 @@
     // TODO: remove after clipping overhaul.
     void setLastClip(uint32_t clipStackGenID, const SkIRect& devClipBounds,
                      int numClipAnalyticFPs) {
-        GrOpsTask* opsTask = fRenderTargetContext->getOpsTask();
-        opsTask->fLastClipStackGenID = clipStackGenID;
-        opsTask->fLastDevClipBounds = devClipBounds;
-        opsTask->fLastClipNumAnalyticFPs = numClipAnalyticFPs;
+        GrRenderTargetOpList* opList = fRenderTargetContext->getRTOpList();
+        opList->fLastClipStackGenID = clipStackGenID;
+        opList->fLastDevClipBounds = devClipBounds;
+        opList->fLastClipNumAnalyticFPs = numClipAnalyticFPs;
     }
 
     // called to determine if we have to render the clip into SB.
     // TODO: remove after clipping overhaul.
     bool mustRenderClip(uint32_t clipStackGenID, const SkIRect& devClipBounds,
                         int numClipAnalyticFPs) const {
-        GrOpsTask* opsTask = fRenderTargetContext->getOpsTask();
-        return opsTask->fLastClipStackGenID != clipStackGenID ||
-               !opsTask->fLastDevClipBounds.contains(devClipBounds) ||
-               opsTask->fLastClipNumAnalyticFPs != numClipAnalyticFPs;
+        GrRenderTargetOpList* opList = fRenderTargetContext->getRTOpList();
+        return opList->fLastClipStackGenID != clipStackGenID ||
+               !opList->fLastDevClipBounds.contains(devClipBounds) ||
+               opList->fLastClipNumAnalyticFPs != numClipAnalyticFPs;
     }
 
     using CanClearFullscreen = GrRenderTargetContext::CanClearFullscreen;
@@ -104,7 +104,7 @@
         return fRenderTargetContext->fRenderTargetProxy->uniqueID();
     }
 
-    uint32_t testingOnly_getOpsTaskID();
+    uint32_t testingOnly_getOpListID();
 
     using WillAddOpFn = GrRenderTargetContext::WillAddOpFn;
     void testingOnly_addDrawOp(std::unique_ptr<GrDrawOp>);
diff --git a/src/gpu/GrOpsTask.cpp b/src/gpu/GrRenderTargetOpList.cpp
similarity index 87%
rename from src/gpu/GrOpsTask.cpp
rename to src/gpu/GrRenderTargetOpList.cpp
index bcf0034..2d7c2c9 100644
--- a/src/gpu/GrOpsTask.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -1,11 +1,11 @@
 /*
- * Copyright 2019 Google Inc.
+ * Copyright 2010 Google Inc.
  *
  * Use of this source code is governed by a BSD-style license that can be
  * found in the LICENSE file.
  */
 
-#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrRenderTargetOpList.h"
 
 #include "include/private/GrRecordingContext.h"
 #include "src/core/SkExchange.h"
@@ -40,14 +40,15 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-inline GrOpsTask::OpChain::List::List(std::unique_ptr<GrOp> op)
+inline GrRenderTargetOpList::OpChain::List::List(std::unique_ptr<GrOp> op)
         : fHead(std::move(op)), fTail(fHead.get()) {
     this->validate();
 }
 
-inline GrOpsTask::OpChain::List::List(List&& that) { *this = std::move(that); }
+inline GrRenderTargetOpList::OpChain::List::List(List&& that) { *this = std::move(that); }
 
-inline GrOpsTask::OpChain::List& GrOpsTask::OpChain::List::operator=(List&& that) {
+inline GrRenderTargetOpList::OpChain::List& GrRenderTargetOpList::OpChain::List::operator=(
+        List&& that) {
     fHead = std::move(that.fHead);
     fTail = that.fTail;
     that.fTail = nullptr;
@@ -55,7 +56,7 @@
     return *this;
 }
 
-inline std::unique_ptr<GrOp> GrOpsTask::OpChain::List::popHead() {
+inline std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::List::popHead() {
     SkASSERT(fHead);
     auto temp = fHead->cutChain();
     std::swap(temp, fHead);
@@ -66,7 +67,7 @@
     return temp;
 }
 
-inline std::unique_ptr<GrOp> GrOpsTask::OpChain::List::removeOp(GrOp* op) {
+inline std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::List::removeOp(GrOp* op) {
 #ifdef SK_DEBUG
     auto head = op;
     while (head->prevInChain()) { head = head->prevInChain(); }
@@ -88,7 +89,7 @@
     return temp;
 }
 
-inline void GrOpsTask::OpChain::List::pushHead(std::unique_ptr<GrOp> op) {
+inline void GrRenderTargetOpList::OpChain::List::pushHead(std::unique_ptr<GrOp> op) {
     SkASSERT(op);
     SkASSERT(op->isChainHead());
     SkASSERT(op->isChainTail());
@@ -101,13 +102,13 @@
     }
 }
 
-inline void GrOpsTask::OpChain::List::pushTail(std::unique_ptr<GrOp> op) {
+inline void GrRenderTargetOpList::OpChain::List::pushTail(std::unique_ptr<GrOp> op) {
     SkASSERT(op->isChainTail());
     fTail->chainConcat(std::move(op));
     fTail = fTail->nextInChain();
 }
 
-inline void GrOpsTask::OpChain::List::validate() const {
+inline void GrRenderTargetOpList::OpChain::List::validate() const {
 #ifdef SK_DEBUG
     if (fHead) {
         SkASSERT(fTail);
@@ -118,9 +119,9 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-GrOpsTask::OpChain::OpChain(std::unique_ptr<GrOp> op,
-                            GrProcessorSet::Analysis processorAnalysis,
-                            GrAppliedClip* appliedClip, const DstProxy* dstProxy)
+GrRenderTargetOpList::OpChain::OpChain(std::unique_ptr<GrOp> op,
+                                       GrProcessorSet::Analysis processorAnalysis,
+                                       GrAppliedClip* appliedClip, const DstProxy* dstProxy)
         : fList{std::move(op)}
         , fProcessorAnalysis(processorAnalysis)
         , fAppliedClip(appliedClip) {
@@ -131,7 +132,7 @@
     fBounds = fList.head()->bounds();
 }
 
-void GrOpsTask::OpChain::visitProxies(const GrOp::VisitProxyFunc& func) const {
+void GrRenderTargetOpList::OpChain::visitProxies(const GrOp::VisitProxyFunc& func) const {
     if (fList.empty()) {
         return;
     }
@@ -146,7 +147,7 @@
     }
 }
 
-void GrOpsTask::OpChain::deleteOps(GrOpMemoryPool* pool) {
+void GrRenderTargetOpList::OpChain::deleteOps(GrOpMemoryPool* pool) {
     while (!fList.empty()) {
         pool->release(fList.popHead());
     }
@@ -154,7 +155,7 @@
 
 // Concatenates two op chains and attempts to merge ops across the chains. Assumes that we know that
 // the two chains are chainable. Returns the new chain.
-GrOpsTask::OpChain::List GrOpsTask::OpChain::DoConcat(
+GrRenderTargetOpList::OpChain::List GrRenderTargetOpList::OpChain::DoConcat(
         List chainA, List chainB, const GrCaps& caps, GrOpMemoryPool* pool,
         GrAuditTrail* auditTrail) {
     // We process ops in chain b from head to tail. We attempt to merge with nodes in a, starting
@@ -230,7 +231,7 @@
 
 // Attempts to concatenate the given chain onto our own and merge ops across the chains. Returns
 // whether the operation succeeded. On success, the provided list will be returned empty.
-bool GrOpsTask::OpChain::tryConcat(
+bool GrRenderTargetOpList::OpChain::tryConcat(
         List* list, GrProcessorSet::Analysis processorAnalysis, const DstProxy& dstProxy,
         const GrAppliedClip* appliedClip, const SkRect& bounds, const GrCaps& caps,
         GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
@@ -288,8 +289,8 @@
     return true;
 }
 
-bool GrOpsTask::OpChain::prependChain(OpChain* that, const GrCaps& caps, GrOpMemoryPool* pool,
-                                      GrAuditTrail* auditTrail) {
+bool GrRenderTargetOpList::OpChain::prependChain(OpChain* that, const GrCaps& caps,
+                                                 GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
     if (!that->tryConcat(
             &fList, fProcessorAnalysis, fDstProxy, fAppliedClip, fBounds, caps, pool, auditTrail)) {
         this->validate();
@@ -312,7 +313,7 @@
     return true;
 }
 
-std::unique_ptr<GrOp> GrOpsTask::OpChain::appendOp(
+std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::appendOp(
         std::unique_ptr<GrOp> op, GrProcessorSet::Analysis processorAnalysis,
         const DstProxy* dstProxy, const GrAppliedClip* appliedClip, const GrCaps& caps,
         GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
@@ -335,7 +336,7 @@
     return nullptr;
 }
 
-inline void GrOpsTask::OpChain::validate() const {
+inline void GrRenderTargetOpList::OpChain::validate() const {
 #ifdef SK_DEBUG
     fList.validate();
     for (const auto& op : GrOp::ChainRange<>(fList.head())) {
@@ -348,46 +349,71 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-GrOpsTask::GrOpsTask(sk_sp<GrOpMemoryPool> opMemoryPool,
-                     sk_sp<GrRenderTargetProxy> rtProxy,
-                     GrAuditTrail* auditTrail)
-        : GrRenderTask(std::move(rtProxy))
-        , fOpMemoryPool(std::move(opMemoryPool))
-        , fAuditTrail(auditTrail)
+GrRenderTargetOpList::GrRenderTargetOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
+                                           sk_sp<GrRenderTargetProxy> proxy,
+                                           GrAuditTrail* auditTrail)
+        : INHERITED(std::move(opMemoryPool), std::move(proxy), auditTrail)
         , fLastClipStackGenID(SK_InvalidUniqueID)
         SkDEBUGCODE(, fNumClips(0)) {
-    SkASSERT(fOpMemoryPool);
     fTarget->setLastRenderTask(this);
 }
 
-void GrOpsTask::deleteOps() {
+void GrRenderTargetOpList::deleteOps() {
     for (auto& chain : fOpChains) {
         chain.deleteOps(fOpMemoryPool.get());
     }
     fOpChains.reset();
 }
 
-GrOpsTask::~GrOpsTask() {
+GrRenderTargetOpList::~GrRenderTargetOpList() {
     this->deleteOps();
 }
 
 ////////////////////////////////////////////////////////////////////////////////
 
-void GrOpsTask::endFlush() {
-    fLastClipStackGenID = SK_InvalidUniqueID;
-    this->deleteOps();
-    fClipAllocator.reset();
-
-    if (fTarget && this == fTarget->getLastRenderTask()) {
-        fTarget->setLastRenderTask(nullptr);
-    }
-
-    fTarget.reset();
-    fDeferredProxies.reset();
-    fAuditTrail = nullptr;
+#ifdef SK_DEBUG
+static const char* load_op_to_name(GrLoadOp op) {
+    return GrLoadOp::kLoad == op ? "load" : GrLoadOp::kClear == op ? "clear" : "discard";
 }
 
-void GrOpsTask::onPrepare(GrOpFlushState* flushState) {
+void GrRenderTargetOpList::dump(bool printDependencies) const {
+    INHERITED::dump(printDependencies);
+
+    SkDebugf("ColorLoadOp: %s %x StencilLoadOp: %s\n",
+             load_op_to_name(fColorLoadOp),
+             GrLoadOp::kClear == fColorLoadOp ? fLoadClearColor.toBytes_RGBA() : 0x0,
+             load_op_to_name(fStencilLoadOp));
+
+    SkDebugf("ops (%d):\n", fOpChains.count());
+    for (int i = 0; i < fOpChains.count(); ++i) {
+        SkDebugf("*******************************\n");
+        if (!fOpChains[i].head()) {
+            SkDebugf("%d: <combined forward or failed instantiation>\n", i);
+        } else {
+            SkDebugf("%d: %s\n", i, fOpChains[i].head()->name());
+            SkRect bounds = fOpChains[i].bounds();
+            SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
+                     bounds.fTop, bounds.fRight, bounds.fBottom);
+            for (const auto& op : GrOp::ChainRange<>(fOpChains[i].head())) {
+                SkString info = SkTabString(op.dumpInfo(), 1);
+                SkDebugf("%s\n", info.c_str());
+                bounds = op.bounds();
+                SkDebugf("\tClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
+                         bounds.fTop, bounds.fRight, bounds.fBottom);
+            }
+        }
+    }
+}
+
+void GrRenderTargetOpList::visitProxies_debugOnly(const GrOp::VisitProxyFunc& func) const {
+    for (const OpChain& chain : fOpChains) {
+        chain.visitProxies(func);
+    }
+}
+
+#endif
+
+void GrRenderTargetOpList::onPrepare(GrOpFlushState* flushState) {
     SkASSERT(fTarget->peekRenderTarget());
     SkASSERT(this->isClosed());
 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
@@ -429,7 +455,7 @@
 
     // TODO:
     // We would like to (at this level) only ever clear & discard. We would need
-    // to stop splitting up higher level OpsTasks for copyOps to achieve that.
+    // to stop splitting up higher level opLists for copyOps to achieve that.
     // Note: we would still need SB loads and stores but they would happen at a
     // lower level (inside the VK command buffer).
     const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo stencilLoadAndStoreInfo {
@@ -443,7 +469,7 @@
 // TODO: this is where GrOp::renderTarget is used (which is fine since it
 // is at flush time). However, we need to store the RenderTargetProxy in the
 // Ops and instantiate them here.
-bool GrOpsTask::onExecute(GrOpFlushState* flushState) {
+bool GrRenderTargetOpList::onExecute(GrOpFlushState* flushState) {
     if (this->isNoOp()) {
         return false;
     }
@@ -499,15 +525,31 @@
     return true;
 }
 
-void GrOpsTask::setColorLoadOp(GrLoadOp op, const SkPMColor4f& color) {
+void GrRenderTargetOpList::endFlush() {
+    fLastClipStackGenID = SK_InvalidUniqueID;
+    this->deleteOps();
+    fClipAllocator.reset();
+    INHERITED::endFlush();
+}
+
+void GrRenderTargetOpList::discard() {
+    // Discard calls to in-progress opLists are ignored. Calls at the start update the
+    // opLists' color & stencil load ops.
+    if (this->isEmpty()) {
+        fColorLoadOp = GrLoadOp::kDiscard;
+        fStencilLoadOp = GrLoadOp::kDiscard;
+    }
+}
+
+void GrRenderTargetOpList::setColorLoadOp(GrLoadOp op, const SkPMColor4f& color) {
     fColorLoadOp = op;
     fLoadClearColor = color;
 }
 
-bool GrOpsTask::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPreviousOps) {
+bool GrRenderTargetOpList::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPreviousOps) {
     // Mark the color load op as discard (this may be followed by a clearColorOnLoad call to make
     // the load op kClear, or it may be followed by an explicit op). In the event of an absClear()
-    // after a regular clear(), we could end up with a clear load op and a real clear op in the task
+    // after a regular clear(), we could end up with a clear load op and a real clear op in the list
     // if the load op were not reset here.
     fColorLoadOp = GrLoadOp::kDiscard;
 
@@ -521,87 +563,19 @@
         this->deleteOps();
         fDeferredProxies.reset();
 
-        // If the opsTask is using a render target which wraps a vulkan command buffer, we can't do
-        // a clear load since we cannot change the render pass that we are using. Thus we fall back
-        // to making a clear op in this case.
+        // If the opList is using a render target which wraps a vulkan command buffer, we can't do a
+        // clear load since we cannot change the render pass that we are using. Thus we fall back to
+        // making a clear op in this case.
         return !fTarget->asRenderTargetProxy()->wrapsVkSecondaryCB();
     }
 
-    // Could not empty the task, so an op must be added to handle the clear
+    // Could not empty the list, so an op must be added to handle the clear
     return false;
 }
 
-void GrOpsTask::discard() {
-    // Discard calls to in-progress opsTasks are ignored. Calls at the start update the
-    // opsTasks' color & stencil load ops.
-    if (this->isEmpty()) {
-        fColorLoadOp = GrLoadOp::kDiscard;
-        fStencilLoadOp = GrLoadOp::kDiscard;
-    }
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
-#ifdef SK_DEBUG
-static const char* load_op_to_name(GrLoadOp op) {
-    return GrLoadOp::kLoad == op ? "load" : GrLoadOp::kClear == op ? "clear" : "discard";
-}
-
-void GrOpsTask::dump(bool printDependencies) const {
-    GrRenderTask::dump(printDependencies);
-
-    SkDebugf("ColorLoadOp: %s %x StencilLoadOp: %s\n",
-             load_op_to_name(fColorLoadOp),
-             GrLoadOp::kClear == fColorLoadOp ? fLoadClearColor.toBytes_RGBA() : 0x0,
-             load_op_to_name(fStencilLoadOp));
-
-    SkDebugf("ops (%d):\n", fOpChains.count());
-    for (int i = 0; i < fOpChains.count(); ++i) {
-        SkDebugf("*******************************\n");
-        if (!fOpChains[i].head()) {
-            SkDebugf("%d: <combined forward or failed instantiation>\n", i);
-        } else {
-            SkDebugf("%d: %s\n", i, fOpChains[i].head()->name());
-            SkRect bounds = fOpChains[i].bounds();
-            SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
-                     bounds.fTop, bounds.fRight, bounds.fBottom);
-            for (const auto& op : GrOp::ChainRange<>(fOpChains[i].head())) {
-                SkString info = SkTabString(op.dumpInfo(), 1);
-                SkDebugf("%s\n", info.c_str());
-                bounds = op.bounds();
-                SkDebugf("\tClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
-                         bounds.fTop, bounds.fRight, bounds.fBottom);
-            }
-        }
-    }
-}
-
-void GrOpsTask::visitProxies_debugOnly(const GrOp::VisitProxyFunc& func) const {
-    for (const OpChain& chain : fOpChains) {
-        chain.visitProxies(func);
-    }
-}
-
-#endif
-
-////////////////////////////////////////////////////////////////////////////////
-
-bool GrOpsTask::onIsUsed(GrSurfaceProxy* proxyToCheck) const {
-    bool used = false;
-
-    auto visit = [ proxyToCheck, &used ] (GrSurfaceProxy* p, GrMipMapped) {
-        if (p == proxyToCheck) {
-            used = true;
-        }
-    };
-    for (const OpChain& recordedOp : fOpChains) {
-        recordedOp.visitProxies(visit);
-    }
-
-    return used;
-}
-
-void GrOpsTask::handleInternalAllocationFailure() {
+void GrRenderTargetOpList::handleInternalAllocationFailure() {
     bool hasUninstantiatedProxy = false;
     auto checkInstantiation = [&hasUninstantiatedProxy](GrSurfaceProxy* p, GrMipMapped) {
         if (!p->isInstantiated()) {
@@ -618,7 +592,23 @@
     }
 }
 
-void GrOpsTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
+bool GrRenderTargetOpList::onIsUsed(GrSurfaceProxy* proxyToCheck) const {
+    bool used = false;
+
+    auto visit = [ proxyToCheck, &used ] (GrSurfaceProxy* p, GrMipMapped) {
+        if (p == proxyToCheck) {
+            used = true;
+        }
+    };
+    for (const OpChain& recordedOp : fOpChains) {
+        recordedOp.visitProxies(visit);
+    }
+
+    return used;
+}
+
+void GrRenderTargetOpList::gatherProxyIntervals(GrResourceAllocator* alloc) const {
+
     for (int i = 0; i < fDeferredProxies.count(); ++i) {
         SkASSERT(!fDeferredProxies[i]->isInstantiated());
         // We give all the deferred proxies a write usage at the very start of flushing. This
@@ -629,7 +619,7 @@
         alloc->addInterval(fDeferredProxies[i], 0, 0, GrResourceAllocator::ActualUse::kNo);
     }
 
-    // Add the interval for all the writes to this GrOpsTasks's target
+    // Add the interval for all the writes to this opList's target
     if (fOpChains.count()) {
         unsigned int cur = alloc->curOp();
 
@@ -657,14 +647,14 @@
     }
 }
 
-void GrOpsTask::recordOp(
+void GrRenderTargetOpList::recordOp(
         std::unique_ptr<GrOp> op, GrProcessorSet::Analysis processorAnalysis, GrAppliedClip* clip,
         const DstProxy* dstProxy, const GrCaps& caps) {
     SkDEBUGCODE(op->validate();)
     SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxy && dstProxy->proxy()));
     SkASSERT(fTarget);
 
-    // A closed GrOpsTask should never receive new/more ops
+    // A closed GrOpList should never receive new/more ops
     SkASSERT(!this->isClosed());
     if (!op->bounds().isFinite()) {
         fOpMemoryPool->release(std::move(op));
@@ -676,7 +666,7 @@
     // 2) intersect with something
     // 3) find a 'blocker'
     GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget->uniqueID());
-    GrOP_INFO("opsTask: %d Recording (%s, opID: %u)\n"
+    GrOP_INFO("opList: %d Recording (%s, opID: %u)\n"
               "\tBounds [L: %.2f, T: %.2f R: %.2f B: %.2f]\n",
                this->uniqueID(),
                op->name(),
@@ -716,9 +706,9 @@
     fOpChains.emplace_back(std::move(op), processorAnalysis, clip, dstProxy);
 }
 
-void GrOpsTask::forwardCombine(const GrCaps& caps) {
+void GrRenderTargetOpList::forwardCombine(const GrCaps& caps) {
     SkASSERT(!this->isClosed());
-    GrOP_INFO("opsTask: %d ForwardCombine %d ops:\n", this->uniqueID(), fOpChains.count());
+    GrOP_INFO("opList: %d ForwardCombine %d ops:\n", this->uniqueID(), fOpChains.count());
 
     for (int i = 0; i < fOpChains.count() - 1; ++i) {
         OpChain& chain = fOpChains[i];
diff --git a/src/gpu/GrOpsTask.h b/src/gpu/GrRenderTargetOpList.h
similarity index 86%
rename from src/gpu/GrOpsTask.h
rename to src/gpu/GrRenderTargetOpList.h
index a89b22c..fc675e3 100644
--- a/src/gpu/GrOpsTask.h
+++ b/src/gpu/GrRenderTargetOpList.h
@@ -1,47 +1,41 @@
 /*
- * Copyright 2019 Google Inc.
+ * Copyright 2010 Google Inc.
  *
  * Use of this source code is governed by a BSD-style license that can be
  * found in the LICENSE file.
  */
 
-#ifndef GrOpsTask_DEFINED
-#define GrOpsTask_DEFINED
+#ifndef GrRenderTargetOpList_DEFINED
+#define GrRenderTargetOpList_DEFINED
 
 #include "include/core/SkMatrix.h"
-#include "include/core/SkRefCnt.h"
 #include "include/core/SkStrokeRec.h"
 #include "include/core/SkTypes.h"
-#include "include/private/SkColorData.h"
 #include "include/private/SkTArray.h"
-#include "include/private/SkTDArray.h"
 #include "src/core/SkArenaAlloc.h"
 #include "src/core/SkClipStack.h"
 #include "src/core/SkStringUtils.h"
 #include "src/core/SkTLazy.h"
 #include "src/gpu/GrAppliedClip.h"
+#include "src/gpu/GrOpList.h"
 #include "src/gpu/GrPathRendering.h"
 #include "src/gpu/GrPrimitiveProcessor.h"
-#include "src/gpu/GrRenderTask.h"
 #include "src/gpu/ops/GrDrawOp.h"
 #include "src/gpu/ops/GrOp.h"
 
 class GrAuditTrail;
-class GrCaps;
 class GrClearOp;
-class GrGpuBuffer;
-class GrOpMemoryPool;
+class GrCaps;
 class GrRenderTargetProxy;
 
-class GrOpsTask : public GrRenderTask {
+class GrRenderTargetOpList final : public GrOpList {
 private:
     using DstProxy = GrXferProcessor::DstProxy;
 
 public:
-    GrOpsTask(sk_sp<GrOpMemoryPool>, sk_sp<GrRenderTargetProxy>, GrAuditTrail*);
-    ~GrOpsTask() override;
+    GrRenderTargetOpList(sk_sp<GrOpMemoryPool>, sk_sp<GrRenderTargetProxy>, GrAuditTrail*);
 
-    GrOpsTask* asOpsTask() override { return this; }
+    ~GrRenderTargetOpList() override;
 
     bool isEmpty() const { return fOpChains.empty(); }
 
@@ -95,13 +89,23 @@
 
     void discard();
 
+    GrRenderTargetOpList* asRenderTargetOpList() override { return this; }
+
     SkDEBUGCODE(void dump(bool printDependencies) const override;)
     SkDEBUGCODE(int numClips() const override { return fNumClips; })
     SkDEBUGCODE(void visitProxies_debugOnly(const GrOp::VisitProxyFunc&) const;)
 
 private:
+    friend class GrRenderTargetContextPriv; // for stencil clip state. TODO: this is invasive
+
+    // The RTC and RTOpList have to work together to handle buffer clears. In most cases, buffer
+    // clearing can be done natively, in which case the op list's load ops are sufficient. In other
+    // cases, draw ops must be used, which makes the RTC the best place for those decisions. This,
+    // however, requires that the RTC be able to coordinate with the op list to achieve similar ends
+    friend class GrRenderTargetContext;
+
     bool isNoOp() const {
-        // TODO: GrLoadOp::kDiscard -> [empty OpsTask] -> GrStoreOp::kStore should also be a no-op.
+        // TODO: GrLoadOp::kDiscard -> [empty opList] -> GrStoreOp::kStore should also be a no-op.
         // We don't count it as a no-op right now because of Vulkan. There are real cases where we
         // store a discard, and if we skip that render pass, then the next time we load the render
         // target, Vulkan detects loading of uninitialized memory and complains. If we don't skip
@@ -114,7 +118,7 @@
                GrLoadOp::kDiscard != fColorLoadOp;
     }
 
-    void deleteOps();
+    bool onIsUsed(GrSurfaceProxy*) const override;
 
     // Must only be called if native stencil buffer clearing is enabled
     void setStencilLoadOp(GrLoadOp op) { fStencilLoadOp = op; }
@@ -136,6 +140,8 @@
     // Returns true if the clear can be converted into a load op (barring device caps).
     bool resetForFullscreenClear(CanDiscardPreviousOps);
 
+    void deleteOps();
+
     class OpChain {
     public:
         OpChain(const OpChain&) = delete;
@@ -207,9 +213,6 @@
         SkRect fBounds;
     };
 
-
-    bool onIsUsed(GrSurfaceProxy*) const override;
-
     void handleInternalAllocationFailure() override;
 
     void gatherProxyIntervals(GrResourceAllocator*) const override;
@@ -224,39 +227,26 @@
         return (this->isNoOp()) ? ExpectedOutcome::kTargetUnchanged : ExpectedOutcome::kTargetDirty;
     }
 
-    friend class GrRenderTargetContextPriv; // for stencil clip state. TODO: this is invasive
+    GrLoadOp                       fColorLoadOp    = GrLoadOp::kLoad;
+    SkPMColor4f                    fLoadClearColor = SK_PMColor4fTRANSPARENT;
+    GrLoadOp                       fStencilLoadOp  = GrLoadOp::kLoad;
 
-    // The RTC and OpsTask have to work together to handle buffer clears. In most cases, buffer
-    // clearing can be done natively, in which case the op list's load ops are sufficient. In other
-    // cases, draw ops must be used, which makes the RTC the best place for those decisions. This,
-    // however, requires that the RTC be able to coordinate with the op list to achieve similar ends
-    friend class GrRenderTargetContext;
-
-    // This is a backpointer to the GrOpMemoryPool that holds the memory for this GrOpsTask's ops.
-    // In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
-    // constituent GrOpsTask survives.
-    sk_sp<GrOpMemoryPool> fOpMemoryPool;
-    GrAuditTrail* fAuditTrail;
-
-    GrLoadOp fColorLoadOp = GrLoadOp::kLoad;
-    SkPMColor4f fLoadClearColor = SK_PMColor4fTRANSPARENT;
-    GrLoadOp fStencilLoadOp = GrLoadOp::kLoad;
-
-    uint32_t fLastClipStackGenID;
-    SkIRect fLastDevClipBounds;
-    int fLastClipNumAnalyticFPs;
+    uint32_t                       fLastClipStackGenID;
+    SkIRect                        fLastDevClipBounds;
+    int                            fLastClipNumAnalyticFPs;
 
     // We must track if we have a wait op so that we don't delete the op when we have a full clear.
     bool fHasWaitOp = false;;
 
-    // For ops/opsTask we have mean: 5 stdDev: 28
-    SkSTArray<25, OpChain, true> fOpChains;
+    // For ops/opList we have mean: 5 stdDev: 28
+    SkSTArray<25, OpChain, true>   fOpChains;
 
     // MDB TODO: 4096 for the first allocation of the clip space will be huge overkill.
     // Gather statistics to determine the correct size.
-    SkArenaAlloc fClipAllocator{4096};
-    SkDEBUGCODE(int fNumClips;)
+    SkArenaAlloc                   fClipAllocator{4096};
+    SkDEBUGCODE(int                fNumClips;)
 
+    typedef GrOpList INHERITED;
 };
 
 #endif
diff --git a/src/gpu/GrRenderTargetProxy.cpp b/src/gpu/GrRenderTargetProxy.cpp
index 8faa0a4..6d831fa 100644
--- a/src/gpu/GrRenderTargetProxy.cpp
+++ b/src/gpu/GrRenderTargetProxy.cpp
@@ -12,7 +12,7 @@
 #include "src/gpu/GrCaps.h"
 #include "src/gpu/GrContextPriv.h"
 #include "src/gpu/GrGpuResourcePriv.h"
-#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrRenderTargetOpList.h"
 #include "src/gpu/GrRenderTargetPriv.h"
 #include "src/gpu/GrResourceProvider.h"
 #include "src/gpu/GrSurfacePriv.h"
diff --git a/src/gpu/GrRenderTask.cpp b/src/gpu/GrRenderTask.cpp
index 639f4cf..0d54208 100644
--- a/src/gpu/GrRenderTask.cpp
+++ b/src/gpu/GrRenderTask.cpp
@@ -119,7 +119,7 @@
 
     // Does this proxy have mipmaps that need to be regenerated?
     if (GrMipMapped::kYes == mipMapped && textureProxy->mipMapsAreDirty()) {
-        // Create a renderTask that resolves the texture's mipmap data.
+        // Create an opList that resolves the texture's mipmap data.
         GrRenderTask* textureResolveTask = textureResolveManager.newTextureResolveRenderTask(
                 sk_ref_sp(textureProxy), GrTextureResolveFlags::kMipMaps, caps);
 
@@ -130,7 +130,7 @@
                  textureResolveTask->fDeferredProxies.back() == textureProxy);
 
         // The GrTextureResolveRenderTask factory should have also marked the mipmaps clean, set the
-        // last renderTask on the textureProxy to textureResolveTask, and closed textureResolveTask.
+        // last opList on the textureProxy to textureResolveTask, and closed textureResolveTask.
         SkASSERT(!textureProxy->mipMapsAreDirty());
         SkASSERT(textureProxy->getLastRenderTask() == textureResolveTask);
         SkASSERT(textureResolveTask->isClosed());
diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h
index f13a0de..7b2c776 100644
--- a/src/gpu/GrRenderTask.h
+++ b/src/gpu/GrRenderTask.h
@@ -15,12 +15,13 @@
 #include "src/gpu/GrTextureResolveManager.h"
 
 class GrOpFlushState;
-class GrOpsTask;
+class GrOpList;
+class GrRenderTargetOpList;
 class GrResourceAllocator;
 
 // This class abstracts a task that targets a single GrSurfaceProxy, participates in the
 // GrDrawingManager's DAG, and implements the onExecute method to modify its target proxy's
-// contents. (e.g., an opsTask that executes a command buffer, a task to regenerate mipmaps, etc.)
+// contents. (e.g., an opList that executes a command buffer, a task to regenerate mipmaps, etc.)
 class GrRenderTask : public SkRefCnt {
 public:
     GrRenderTask(sk_sp<GrSurfaceProxy> target);
@@ -53,9 +54,9 @@
     uint32_t uniqueID() const { return fUniqueID; }
 
     /*
-     * Safely cast this GrRenderTask to a GrOpsTask (if possible).
+     * Safely cast this GrRenderTask to a GrRenderTargetOpList (if possible).
      */
-    virtual GrOpsTask* asOpsTask() { return nullptr; }
+    virtual GrRenderTargetOpList* asRenderTargetOpList() { return nullptr; }
 
     /*
      * Dump out the GrRenderTask dependency DAG
@@ -164,9 +165,9 @@
     const uint32_t         fUniqueID;
     uint32_t               fFlags;
 
-    // 'this' GrRenderTask relies on the output of the GrRenderTasks in 'fDependencies'
+    // 'this' GrOpList relies on the output of the GrOpLists in 'fDependencies'
     SkSTArray<1, GrRenderTask*, true> fDependencies;
-    // 'this' GrRenderTask's output is relied on by the GrRenderTasks in 'fDependents'
+    // 'this' GrOpList's output is relied on by the GrOpLists in 'fDependents'
     SkSTArray<1, GrRenderTask*, true> fDependents;
 };
 
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index ecc9a42..f73f5be 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -9,7 +9,7 @@
 
 #include "src/gpu/GrDeinstantiateProxyTracker.h"
 #include "src/gpu/GrGpuResourcePriv.h"
-#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrOpList.h"
 #include "src/gpu/GrRenderTargetProxy.h"
 #include "src/gpu/GrResourceCache.h"
 #include "src/gpu/GrResourceProvider.h"
@@ -53,17 +53,16 @@
     }
 }
 
-void GrResourceAllocator::markEndOfOpsTask(int opsTaskIndex) {
-    SkASSERT(!fAssigned);      // We shouldn't be adding any opsTasks after (or during) assignment
+void GrResourceAllocator::markEndOfOpList(int opListIndex) {
+    SkASSERT(!fAssigned);      // We shouldn't be adding any opLists after (or during) assignment
 
-    SkASSERT(fEndOfOpsTaskOpIndices.count() == opsTaskIndex);
-    if (!fEndOfOpsTaskOpIndices.empty()) {
-        SkASSERT(fEndOfOpsTaskOpIndices.back() < this->curOp());
+    SkASSERT(fEndOfOpListOpIndices.count() == opListIndex);
+    if (!fEndOfOpListOpIndices.empty()) {
+        SkASSERT(fEndOfOpListOpIndices.back() < this->curOp());
     }
 
-    // This is the first op index of the next opsTask
-    fEndOfOpsTaskOpIndices.push_back(this->curOp());
-    SkASSERT(fEndOfOpsTaskOpIndices.count() <= fNumOpsTasks);
+    fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList
+    SkASSERT(fEndOfOpListOpIndices.count() <= fNumOpLists);
 }
 
 GrResourceAllocator::~GrResourceAllocator() {
@@ -114,7 +113,7 @@
             if (0 == start && 0 == end) {
                 // This interval is for the initial upload to a deferred proxy. Due to the vagaries
                 // of how deferred proxies are collected they can appear as uploads multiple times
-                // in a single opsTasks' list and as uploads in several opsTasks.
+                // in a single opLists' list and as uploads in several opLists.
                 SkASSERT(0 == intvl->start());
             } else if (isDirectDstRead) {
                 // Direct reads from the render target itself should occur w/in the existing
@@ -353,30 +352,30 @@
     }
 }
 
-bool GrResourceAllocator::onOpsTaskBoundary() const {
+bool GrResourceAllocator::onOpListBoundary() const {
     if (fIntvlList.empty()) {
-        SkASSERT(fCurOpsTaskIndex+1 <= fNumOpsTasks);
-        // Although technically on an opsTask boundary there is no need to force an
+        SkASSERT(fCurOpListIndex+1 <= fNumOpLists);
+        // Although technically on an opList boundary there is no need to force an
         // intermediate flush here
         return false;
     }
 
     const Interval* tmp = fIntvlList.peekHead();
-    return fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start();
+    return fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start();
 }
 
 void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
-    *stopIndex = fCurOpsTaskIndex+1;
+    *stopIndex = fCurOpListIndex+1;
 
     // This is interrupting the allocation of resources for this flush. We need to
     // proactively clear the active interval list of any intervals that aren't
     // guaranteed to survive the partial flush lest they become zombies (i.e.,
     // holding a deleted surface proxy).
     const Interval* tmp = fIntvlList.peekHead();
-    SkASSERT(fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start());
+    SkASSERT(fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start());
 
-    fCurOpsTaskIndex++;
-    SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
+    fCurOpListIndex++;
+    SkASSERT(fCurOpListIndex < fNumOpLists);
 
     this->expire(tmp->start());
 }
@@ -386,28 +385,28 @@
     *outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
                                         : AssignError::kNoError;
 
-    SkASSERT(fNumOpsTasks == fEndOfOpsTaskOpIndices.count());
+    SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
 
     fIntvlHash.reset(); // we don't need the interval hash anymore
 
-    if (fCurOpsTaskIndex >= fEndOfOpsTaskOpIndices.count()) {
+    if (fCurOpListIndex >= fEndOfOpListOpIndices.count()) {
         return false; // nothing to render
     }
 
-    *startIndex = fCurOpsTaskIndex;
-    *stopIndex = fEndOfOpsTaskOpIndices.count();
+    *startIndex = fCurOpListIndex;
+    *stopIndex = fEndOfOpListOpIndices.count();
 
     if (fIntvlList.empty()) {
-        fCurOpsTaskIndex = fEndOfOpsTaskOpIndices.count();
+        fCurOpListIndex = fEndOfOpListOpIndices.count();
         return true;          // no resources to assign
     }
 
 #if GR_ALLOCATION_SPEW
-    SkDebugf("assigning opsTasks %d through %d out of %d numOpsTasks\n",
-             *startIndex, *stopIndex, fNumOpsTasks);
-    SkDebugf("EndOfOpsTaskIndices: ");
-    for (int i = 0; i < fEndOfOpsTaskOpIndices.count(); ++i) {
-        SkDebugf("%d ", fEndOfOpsTaskOpIndices[i]);
+    SkDebugf("assigning opLists %d through %d out of %d numOpLists\n",
+             *startIndex, *stopIndex, fNumOpLists);
+    SkDebugf("EndOfOpListIndices: ");
+    for (int i = 0; i < fEndOfOpListOpIndices.count(); ++i) {
+        SkDebugf("%d ", fEndOfOpListOpIndices[i]);
     }
     SkDebugf("\n");
 #endif
@@ -418,9 +417,9 @@
     this->dumpIntervals();
 #endif
     while (Interval* cur = fIntvlList.popHead()) {
-        if (fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= cur->start()) {
-            fCurOpsTaskIndex++;
-            SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
+        if (fEndOfOpListOpIndices[fCurOpListIndex] <= cur->start()) {
+            fCurOpListIndex++;
+            SkASSERT(fCurOpListIndex < fNumOpLists);
         }
 
         this->expire(cur->start());
@@ -438,8 +437,8 @@
             fActiveIntvls.insertByIncreasingEnd(cur);
 
             if (fResourceProvider->overBudget()) {
-                // Only force intermediate draws on opsTask boundaries
-                if (this->onOpsTaskBoundary()) {
+                // Only force intermediate draws on opList boundaries
+                if (this->onOpListBoundary()) {
                     this->forceIntermediateFlush(stopIndex);
                     return true;
                 }
@@ -485,8 +484,8 @@
         fActiveIntvls.insertByIncreasingEnd(cur);
 
         if (fResourceProvider->overBudget()) {
-            // Only force intermediate draws on opsTask boundaries
-            if (this->onOpsTaskBoundary()) {
+            // Only force intermediate draws on opList boundaries
+            if (this->onOpListBoundary()) {
                 this->forceIntermediateFlush(stopIndex);
                 return true;
             }
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index 0dc7b46..b448c2d 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -38,24 +38,24 @@
  *     adds the new interval to the active list (that is sorted by increasing end index)
  *
  * Note: the op indices (used in the usage intervals) come from the order of the ops in
- * their opsTasks after the opsTask DAG has been linearized.
+ * their opLists after the opList DAG has been linearized.
  *
  *************************************************************************************************
  * How does instantiation failure handling work when explicitly allocating?
  *
  * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
- * gathered (i.e., in GrOpsTask::gatherProxyIntervals).
+ * gathered (i.e., in GrOpList::gatherProxyIntervals).
  *
  * The allocator will churn through this list but could fail anywhere.
  *
  * Allocation failure handling occurs at two levels:
  *
- * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped.
+ * 1) If the GrSurface backing an opList fails to allocate then the entire opList is dropped.
  *
  * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped
- * (via GrOpsTask::purgeOpsWithUninstantiatedProxies)
+ * (via GrOpList::purgeOpsWithUninstantiatedProxies)
  *
- * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and
+ * The pass to determine which ops to drop is a bit laborious so we only check the opLists and
  * individual ops when something goes wrong in allocation (i.e., when the return code from
  * GrResourceAllocator::assign is bad)
  *
@@ -70,10 +70,10 @@
 public:
     GrResourceAllocator(GrResourceProvider* resourceProvider,
                         GrDeinstantiateProxyTracker* tracker
-                        SkDEBUGCODE(, int numOpsTasks))
+                        SkDEBUGCODE(, int numOpLists))
             : fResourceProvider(resourceProvider)
             , fDeinstantiateTracker(tracker)
-            SkDEBUGCODE(, fNumOpsTasks(numOpsTasks)) {
+            SkDEBUGCODE(, fNumOpLists(numOpLists)) {
     }
 
     ~GrResourceAllocator();
@@ -82,7 +82,7 @@
     void incOps() { fNumOps++; }
 
     /** Indicates whether a given call to addInterval represents an actual usage of the
-     *  provided proxy. This is mainly here to accomodate deferred proxies attached to opsTasks.
+     *  provided proxy. This is mainly here to accomodate deferred proxies attached to opLists.
      *  In that case we need to create an extra long interval for them (due to the upload) but
      *  don't want to count that usage/reference towards the proxy's recyclability.
      */
@@ -101,16 +101,16 @@
         kFailedProxyInstantiation
     };
 
-    // Returns true when the opsTasks from 'startIndex' to 'stopIndex' should be executed;
+    // Returns true when the opLists from 'startIndex' to 'stopIndex' should be executed;
     // false when nothing remains to be executed.
     // If any proxy fails to instantiate, the AssignError will be set to kFailedProxyInstantiation.
     // If this happens, the caller should remove all ops which reference an uninstantiated proxy.
-    // This is used to execute a portion of the queued opsTasks in order to reduce the total
+    // This is used to execute a portion of the queued opLists in order to reduce the total
     // amount of GPU resources required.
     bool assign(int* startIndex, int* stopIndex, AssignError* outError);
 
     void determineRecyclability();
-    void markEndOfOpsTask(int opsTaskIndex);
+    void markEndOfOpList(int opListIndex);
 
 #if GR_ALLOCATION_SPEW
     void dumpIntervals();
@@ -122,7 +122,7 @@
     // Remove dead intervals from the active list
     void expire(unsigned int curIndex);
 
-    bool onOpsTaskBoundary() const;
+    bool onOpListBoundary() const;
     void forceIntermediateFlush(int* stopIndex);
 
     // These two methods wrap the interactions with the free pool
@@ -269,9 +269,9 @@
     IntervalList                 fActiveIntvls;      // List of live intervals during assignment
                                                      // (sorted by increasing end)
     unsigned int                 fNumOps = 0;
-    SkTArray<unsigned int>       fEndOfOpsTaskOpIndices;
-    int                          fCurOpsTaskIndex = 0;
-    SkDEBUGCODE(const int        fNumOpsTasks = -1;)
+    SkTArray<unsigned int>       fEndOfOpListOpIndices;
+    int                          fCurOpListIndex = 0;
+    SkDEBUGCODE(const int        fNumOpLists = -1;)
 
     SkDEBUGCODE(bool             fAssigned = false;)
 
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index d3264d3..4adb2fa 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -45,7 +45,7 @@
         kNone            = 0x0,
 
         /** If the caller intends to do direct reads/writes to/from the CPU then this flag must be
-         *  set when accessing resources during a GrOpsTask flush. This includes the execution of
+         *  set when accessing resources during a GrOpList flush. This includes the execution of
          *  GrOp objects. The reason is that these memory operations are done immediately and
          *  will occur out of order WRT the operations being flushed.
          *  Make this automatic: https://bug.skia.org/4156
diff --git a/src/gpu/GrSoftwarePathRenderer.cpp b/src/gpu/GrSoftwarePathRenderer.cpp
index c562436..a3d3f49 100644
--- a/src/gpu/GrSoftwarePathRenderer.cpp
+++ b/src/gpu/GrSoftwarePathRenderer.cpp
@@ -16,6 +16,7 @@
 #include "src/gpu/GrDeferredProxyUploader.h"
 #include "src/gpu/GrGpuResourcePriv.h"
 #include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpList.h"
 #include "src/gpu/GrProxyProvider.h"
 #include "src/gpu/GrRecordingContextPriv.h"
 #include "src/gpu/GrRenderTargetContextPriv.h"
diff --git a/src/gpu/GrSurface.cpp b/src/gpu/GrSurface.cpp
index df451a8..db5a707 100644
--- a/src/gpu/GrSurface.cpp
+++ b/src/gpu/GrSurface.cpp
@@ -8,6 +8,7 @@
 #include "include/gpu/GrContext.h"
 #include "include/gpu/GrSurface.h"
 #include "include/gpu/GrTexture.h"
+#include "src/gpu/GrOpList.h"
 #include "src/gpu/GrRenderTarget.h"
 #include "src/gpu/GrResourceProvider.h"
 #include "src/gpu/GrSurfacePriv.h"
diff --git a/src/gpu/GrSurfaceContext.cpp b/src/gpu/GrSurfaceContext.cpp
index 3a4726b..dff02b0 100644
--- a/src/gpu/GrSurfaceContext.cpp
+++ b/src/gpu/GrSurfaceContext.cpp
@@ -15,6 +15,7 @@
 #include "src/gpu/GrDataUtils.h"
 #include "src/gpu/GrDrawingManager.h"
 #include "src/gpu/GrGpu.h"
+#include "src/gpu/GrOpList.h"
 #include "src/gpu/GrRecordingContextPriv.h"
 #include "src/gpu/GrRenderTargetContext.h"
 #include "src/gpu/GrSurfaceContextPriv.h"
@@ -27,10 +28,10 @@
     SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
 #define RETURN_FALSE_IF_ABANDONED  if (this->fContext->priv().abandoned()) { return false; }
 
-// In MDB mode the reffing of the 'getLastOpsTask' call's result allows in-progress
-// GrOpsTasks to be picked up and added to by renderTargetContexts lower in the call
-// stack. When this occurs with a closed GrOpsTask, a new one will be allocated
-// when the renderTargetContext attempts to use it (via getOpsTask).
+// In MDB mode the reffing of the 'getLastOpList' call's result allows in-progress
+// GrOpLists to be picked up and added to by renderTargetContexts lower in the call
+// stack. When this occurs with a closed GrOpList, a new one will be allocated
+// when the renderTargetContext attempts to use it (via getOpList).
 GrSurfaceContext::GrSurfaceContext(GrRecordingContext* context,
                                    GrColorType colorType,
                                    SkAlphaType alphaType,
diff --git a/src/gpu/GrSurfaceContext.h b/src/gpu/GrSurfaceContext.h
index fccc52b..6bbeb72 100644
--- a/src/gpu/GrSurfaceContext.h
+++ b/src/gpu/GrSurfaceContext.h
@@ -19,6 +19,7 @@
 
 class GrAuditTrail;
 class GrDrawingManager;
+class GrOpList;
 class GrRecordingContext;
 class GrRenderTargetContext;
 class GrRenderTargetProxy;
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index 484087b..f9ec3c0 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -16,7 +16,7 @@
 #include "src/gpu/GrClip.h"
 #include "src/gpu/GrContextPriv.h"
 #include "src/gpu/GrGpuResourcePriv.h"
-#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrOpList.h"
 #include "src/gpu/GrProxyProvider.h"
 #include "src/gpu/GrRecordingContextPriv.h"
 #include "src/gpu/GrStencilAttachment.h"
@@ -107,7 +107,7 @@
 }
 
 GrSurfaceProxy::~GrSurfaceProxy() {
-    // For this to be deleted the opsTask that held a ref on it (if there was one) must have been
+    // For this to be deleted the opList that held a ref on it (if there was one) must have been
     // deleted. Which would have cleared out this back pointer.
     SkASSERT(!fLastRenderTask);
 }
@@ -302,8 +302,8 @@
     fLastRenderTask = renderTask;
 }
 
-GrOpsTask* GrSurfaceProxy::getLastOpsTask() {
-    return fLastRenderTask ? fLastRenderTask->asOpsTask() : nullptr;
+GrRenderTargetOpList* GrSurfaceProxy::getLastRenderTargetOpList() {
+    return fLastRenderTask ? fLastRenderTask->asRenderTargetOpList() : nullptr;
 }
 
 int GrSurfaceProxy::worstCaseWidth() const {
diff --git a/src/gpu/GrSurfaceProxy.h b/src/gpu/GrSurfaceProxy.h
index 5b0297d..4c78019 100644
--- a/src/gpu/GrSurfaceProxy.h
+++ b/src/gpu/GrSurfaceProxy.h
@@ -18,8 +18,8 @@
 
 class GrCaps;
 class GrContext_Base;
-class GrOpsTask;
 class GrRecordingContext;
+class GrRenderTargetOpList;
 class GrRenderTargetProxy;
 class GrRenderTask;
 class GrResourceProvider;
@@ -281,7 +281,7 @@
     void setLastRenderTask(GrRenderTask*);
     GrRenderTask* getLastRenderTask() { return fLastRenderTask; }
 
-    GrOpsTask* getLastOpsTask();
+    GrRenderTargetOpList* getLastRenderTargetOpList();
 
     /**
      * Retrieves the amount of GPU memory that will be or currently is used by this resource
@@ -439,12 +439,12 @@
     // the instantiation method.
     mutable size_t         fGpuMemorySize;
 
-    // The last GrRenderTask that wrote to or is currently going to write to this surface
-    // The GrRenderTask can be closed (e.g., no surface context is currently bound
+    // The last opList that wrote to or is currently going to write to this surface
+    // The opList can be closed (e.g., no surface context is currently bound
     // to this proxy).
     // This back-pointer is required so that we can add a dependancy between
-    // the GrRenderTask used to create the current contents of this surface
-    // and the GrRenderTask of a destination surface to which this one is being drawn or copied.
+    // the opList used to create the current contents of this surface
+    // and the opList of a destination surface to which this one is being drawn or copied.
     // This pointer is unreffed. GrRenderTasks own a ref on their surface proxies.
     GrRenderTask*          fLastRenderTask;
 
diff --git a/src/gpu/GrTextureProxy.h b/src/gpu/GrTextureProxy.h
index 5870510..986975b 100644
--- a/src/gpu/GrTextureProxy.h
+++ b/src/gpu/GrTextureProxy.h
@@ -145,7 +145,7 @@
 
     // This tracks the mipmap status at the proxy level and is thus somewhat distinct from the
     // backing GrTexture's mipmap status. In particular, this status is used to determine when
-    // mipmap levels need to be explicitly regenerated during the execution of a DAG of opsTasks.
+    // mipmap levels need to be explicitly regenerated during the execution of a DAG of opLists.
     GrMipMapsStatus  fMipMapsStatus;
     // TEMPORARY: We are in the process of moving GrMipMapsStatus from the texture to the proxy.
     // We track the fInitialMipMapsStatus here so we can assert that the proxy did indeed expect
diff --git a/src/gpu/GrTextureResolveRenderTask.cpp b/src/gpu/GrTextureResolveRenderTask.cpp
index 1e52b87..613f6bd 100644
--- a/src/gpu/GrTextureResolveRenderTask.cpp
+++ b/src/gpu/GrTextureResolveRenderTask.cpp
@@ -27,7 +27,7 @@
             textureProxyPtr, GrMipMapped::kNo, GrTextureResolveManager(nullptr), caps);
     textureProxyPtr->setLastRenderTask(resolveTask.get());
 
-    // We only resolve the texture; nobody should try to do anything else with this opsTask.
+    // We only resolve the texture; nobody should try to do anything else with this opList.
     resolveTask->makeClosed(caps);
 
     if (GrTextureResolveFlags::kMipMaps & flags) {
@@ -41,7 +41,7 @@
 
 void GrTextureResolveRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
     // This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
-    // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
+    // fEndOfOpListOpIndices will remain in sync), so we create a fake op# to capture the fact that
     // we manipulate fTarget.
     alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp(),
                        GrResourceAllocator::ActualUse::kYes);
diff --git a/src/gpu/GrTransferFromRenderTask.cpp b/src/gpu/GrTransferFromRenderTask.cpp
index 7826bce..3e6dfc5 100644
--- a/src/gpu/GrTransferFromRenderTask.cpp
+++ b/src/gpu/GrTransferFromRenderTask.cpp
@@ -13,7 +13,7 @@
 
 void GrTransferFromRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
     // This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
-    // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
+    // fEndOfOpListOpIndices will remain in sync), so we create a fake op# to capture the fact that
     // we read fSrcProxy.
     alloc->addInterval(fSrcProxy.get(), alloc->curOp(), alloc->curOp(),
                        GrResourceAllocator::ActualUse::kYes);
diff --git a/src/gpu/GrUserStencilSettings.h b/src/gpu/GrUserStencilSettings.h
index 83a19b7..b89a9ec 100644
--- a/src/gpu/GrUserStencilSettings.h
+++ b/src/gpu/GrUserStencilSettings.h
@@ -13,12 +13,12 @@
 
 /**
  * Gr uses the stencil buffer to implement complex clipping inside the
- * GrOpsTask class. The GrOpsTask makes a subset of the stencil buffer
+ * GrOpList class. The GrOpList makes a subset of the stencil buffer
  * bits available for other uses by external code (user bits). Client code can
- * modify these bits. GrOpsTask will ignore ref, mask, and writemask bits
+ * modify these bits. GrOpList will ignore ref, mask, and writemask bits
  * provided by clients that fall outside the user range.
  *
- * When code outside the GrOpsTask class uses the stencil buffer the contract
+ * When code outside the GrOpList class uses the stencil buffer the contract
  * is as follows:
  *
  * > Normal stencil funcs allow the client to pass / fail regardless of the
diff --git a/src/gpu/ccpr/GrCCClipPath.h b/src/gpu/ccpr/GrCCClipPath.h
index 32e5ae4..1494d2c6 100644
--- a/src/gpu/ccpr/GrCCClipPath.h
+++ b/src/gpu/ccpr/GrCCClipPath.h
@@ -19,7 +19,7 @@
 
 /**
  * These are keyed by SkPath generation ID, and store which device-space paths are accessed and
- * where by clip FPs in a given opsTask. A single GrCCClipPath can be referenced by multiple FPs. At
+ * where by clip FPs in a given opList. A single GrCCClipPath can be referenced by multiple FPs. At
  * flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps.
  */
 class GrCCClipPath {
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 4ad6cd9..0a6433c 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -111,7 +111,7 @@
     // If the path is clipped, CCPR will only draw the visible portion. This helps improve batching,
     // since it eliminates the need for scissor when drawing to the main canvas.
     // FIXME: We should parse the path right here. It will provide a tighter bounding box for us to
-    // give the opsTask, as well as enabling threaded parsing when using DDL.
+    // give the opList, as well as enabling threaded parsing when using DDL.
     SkRect clippedDrawBounds;
     if (!clippedDrawBounds.intersect(conservativeDevBounds, SkRect::Make(maskDevIBounds))) {
         clippedDrawBounds.setEmpty();
@@ -122,9 +122,9 @@
 }
 
 GrCCDrawPathsOp::~GrCCDrawPathsOp() {
-    if (fOwningPerOpsTaskPaths) {
+    if (fOwningPerOpListPaths) {
         // Remove the list's dangling pointer to this Op before deleting it.
-        fOwningPerOpsTaskPaths->fDrawOps.remove(this);
+        fOwningPerOpListPaths->fDrawOps.remove(this);
     }
 }
 
@@ -195,10 +195,9 @@
 
 GrOp::CombineResult GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps&) {
     GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
-    SkASSERT(fOwningPerOpsTaskPaths);
+    SkASSERT(fOwningPerOpListPaths);
     SkASSERT(fNumDraws);
-    SkASSERT(!that->fOwningPerOpsTaskPaths ||
-             that->fOwningPerOpsTaskPaths == fOwningPerOpsTaskPaths);
+    SkASSERT(!that->fOwningPerOpListPaths || that->fOwningPerOpListPaths == fOwningPerOpListPaths);
     SkASSERT(that->fNumDraws);
 
     if (fProcessors != that->fProcessors ||
@@ -206,18 +205,18 @@
         return CombineResult::kCannotCombine;
     }
 
-    fDraws.append(std::move(that->fDraws), &fOwningPerOpsTaskPaths->fAllocator);
+    fDraws.append(std::move(that->fDraws), &fOwningPerOpListPaths->fAllocator);
 
     SkDEBUGCODE(fNumDraws += that->fNumDraws);
     SkDEBUGCODE(that->fNumDraws = 0);
     return CombineResult::kMerged;
 }
 
-void GrCCDrawPathsOp::addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths) {
+void GrCCDrawPathsOp::addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths) {
     SkASSERT(1 == fNumDraws);
-    SkASSERT(!fOwningPerOpsTaskPaths);
-    fOwningPerOpsTaskPaths = std::move(owningPerOpsTaskPaths);
-    fOwningPerOpsTaskPaths->fDrawOps.addToTail(this);
+    SkASSERT(!fOwningPerOpListPaths);
+    fOwningPerOpListPaths = std::move(owningPerOpListPaths);
+    fOwningPerOpListPaths->fDrawOps.addToTail(this);
 }
 
 void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
@@ -415,9 +414,9 @@
 }
 
 void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
-    SkASSERT(fOwningPerOpsTaskPaths);
+    SkASSERT(fOwningPerOpListPaths);
 
-    const GrCCPerFlushResources* resources = fOwningPerOpsTaskPaths->fFlushResources.get();
+    const GrCCPerFlushResources* resources = fOwningPerOpListPaths->fFlushResources.get();
     if (!resources) {
         return;  // Setup failed.
     }
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index d0c4777..1499c28 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -17,7 +17,7 @@
 class GrCCAtlas;
 class GrCCPerFlushResources;
 struct GrCCPerFlushResourceSpecs;
-struct GrCCPerOpsTaskPaths;
+struct GrCCPerOpListPaths;
 class GrOnFlushResourceProvider;
 class GrRecordingContext;
 
@@ -46,7 +46,7 @@
     }
     void onPrepare(GrOpFlushState*) override {}
 
-    void addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths);
+    void addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths);
 
     // Makes decisions about how to draw each path (cached, copied, rendered, etc.), and
     // increments/fills out the corresponding GrCCPerFlushResourceSpecs.
@@ -125,9 +125,9 @@
         friend class GrCCSTLList<SingleDraw>;  // To access fNext.
     };
 
-    // Declare fOwningPerOpsTaskPaths first, before fDraws. The draws use memory allocated by
-    // fOwningPerOpsTaskPaths, so it must not be unreffed until after fDraws is destroyed.
-    sk_sp<GrCCPerOpsTaskPaths> fOwningPerOpsTaskPaths;
+    // Declare fOwningPerOpListPaths first, before fDraws. The draws use memory allocated by
+    // fOwningPerOpListPaths, so it must not be unreffed until after fDraws is destroyed.
+    sk_sp<GrCCPerOpListPaths> fOwningPerOpListPaths;
 
     GrCCSTLList<SingleDraw> fDraws;
     SkDEBUGCODE(int fNumDraws = 1);
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
index f2504e6..59122fe 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.h
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -62,8 +62,8 @@
 
 /**
  * This class wraps all the GPU resources that CCPR builds at flush time. It is allocated in CCPR's
- * preFlush() method, and referenced by all the GrCCPerOpsTaskPaths objects that are being flushed.
- * It is deleted in postFlush() once all the flushing GrCCPerOpsTaskPaths objects are deleted.
+ * preFlush() method, and referenced by all the GrCCPerOpListPaths objects that are being flushed.
+ * It is deleted in postFlush() once all the flushing GrCCPerOpListPaths objects are deleted.
  */
 class GrCCPerFlushResources : public GrNonAtomicRef<GrCCPerFlushResources> {
 public:
diff --git a/src/gpu/ccpr/GrCCPerOpsTaskPaths.h b/src/gpu/ccpr/GrCCPerOpListPaths.h
similarity index 78%
rename from src/gpu/ccpr/GrCCPerOpsTaskPaths.h
rename to src/gpu/ccpr/GrCCPerOpListPaths.h
index ff8a224..e0dd115 100644
--- a/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
+++ b/src/gpu/ccpr/GrCCPerOpListPaths.h
@@ -5,8 +5,8 @@
  * found in the LICENSE file.
  */
 
-#ifndef GrCCPerOpsTaskPaths_DEFINED
-#define GrCCPerOpsTaskPaths_DEFINED
+#ifndef GrCCPerOpListPaths_DEFINED
+#define GrCCPerOpListPaths_DEFINED
 
 #include "include/core/SkRefCnt.h"
 #include "src/core/SkArenaAlloc.h"
@@ -19,10 +19,10 @@
 class GrCCPerFlushResources;
 
 /**
- * Tracks all the CCPR paths in a given opsTask that will be drawn when it flushes.
+ * Tracks all the CCPR paths in a given opList that will be drawn when it flushes.
  */
 // DDL TODO: given the usage pattern in DDL mode, this could probably be non-atomic refcounting.
-struct GrCCPerOpsTaskPaths : public SkRefCnt {
+struct GrCCPerOpListPaths : public SkRefCnt {
     SkTInternalLList<GrCCDrawPathsOp> fDrawOps;  // This class does not own these ops.
     std::map<uint32_t, GrCCClipPath> fClipPaths;
     SkSTArenaAlloc<10 * 1024> fAllocator{10 * 1024 * 2};
diff --git a/src/gpu/ccpr/GrCCSTLList.h b/src/gpu/ccpr/GrCCSTLList.h
index 29f26b9..eb75863 100644
--- a/src/gpu/ccpr/GrCCSTLList.h
+++ b/src/gpu/ccpr/GrCCSTLList.h
@@ -14,8 +14,8 @@
 
 /**
  * A singly-linked list whose head element is a local class member. This is required by
- * GrCCDrawPathsOp because the owning opsTask is unknown at the time of creation, so we can't use
- * its associated allocator to create the first element.
+ * GrCCDrawPathsOp because the owning opList is unknown at the time of creation, so we can't use its
+ * associated allocator to create the first element.
  */
 template<typename T> class GrCCSTLList : SkNoncopyable {
 public:
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 4798b6f..a9e080b 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -71,11 +71,11 @@
     }
 }
 
-GrCCPerOpsTaskPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opsTaskID) {
-    auto it = fPendingPaths.find(opsTaskID);
+GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opListID) {
+    auto it = fPendingPaths.find(opListID);
     if (fPendingPaths.end() == it) {
-        sk_sp<GrCCPerOpsTaskPaths> paths = sk_make_sp<GrCCPerOpsTaskPaths>();
-        it = fPendingPaths.insert(std::make_pair(opsTaskID, std::move(paths))).first;
+        sk_sp<GrCCPerOpListPaths> paths = sk_make_sp<GrCCPerOpListPaths>();
+        it = fPendingPaths.insert(std::make_pair(opListID, std::move(paths))).first;
     }
     return it->second.get();
 }
@@ -182,17 +182,16 @@
 void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> op,
                                               const DrawPathArgs& args) {
     if (op) {
-        auto addToOwningPerOpsTaskPaths = [this](GrOp* op, uint32_t opsTaskID) {
-            op->cast<GrCCDrawPathsOp>()->addToOwningPerOpsTaskPaths(
-                    sk_ref_sp(this->lookupPendingPaths(opsTaskID)));
+        auto addToOwningPerOpListPaths = [this](GrOp* op, uint32_t opListID) {
+            op->cast<GrCCDrawPathsOp>()->addToOwningPerOpListPaths(
+                    sk_ref_sp(this->lookupPendingPaths(opListID)));
         };
-        args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op),
-                                             addToOwningPerOpsTaskPaths);
+        args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op), addToOwningPerOpListPaths);
     }
 }
 
 std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
-        uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+        uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
         const GrCaps& caps) {
     SkASSERT(!fFlushing);
 
@@ -203,7 +202,7 @@
         key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
     }
     GrCCClipPath& clipPath =
-            this->lookupPendingPaths(opsTaskID)->fClipPaths[key];
+            this->lookupPendingPaths(opListID)->fClipPaths[key];
     if (!clipPath.isInitialized()) {
         // This ClipPath was just created during lookup. Initialize it.
         const SkRect& pathDevBounds = deviceSpacePath.getBounds();
@@ -229,8 +228,8 @@
 
 void GrCoverageCountingPathRenderer::preFlush(
         GrOnFlushResourceProvider* onFlushRP,
-        const uint32_t* opsTaskIDs,
-        int numOpsTaskIDs,
+        const uint32_t* opListIDs,
+        int numOpListIDs,
         SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) {
     using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
     SkASSERT(!fFlushing);
@@ -252,13 +251,13 @@
     specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
     specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
 
-    // Move the per-opsTask paths that are about to be flushed from fPendingPaths to fFlushingPaths,
+    // Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
     // and count them up so we can preallocate buffers.
-    fFlushingPaths.reserve(numOpsTaskIDs);
-    for (int i = 0; i < numOpsTaskIDs; ++i) {
-        auto iter = fPendingPaths.find(opsTaskIDs[i]);
+    fFlushingPaths.reserve(numOpListIDs);
+    for (int i = 0; i < numOpListIDs; ++i) {
+        auto iter = fPendingPaths.find(opListIDs[i]);
         if (fPendingPaths.end() == iter) {
-            continue;  // No paths on this opsTask.
+            continue;  // No paths on this opList.
         }
 
         fFlushingPaths.push_back(std::move(iter->second));
@@ -319,8 +318,8 @@
     }
 }
 
-void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs,
-                                               int numOpsTaskIDs) {
+void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
+                                               int numOpListIDs) {
     SkASSERT(fFlushing);
 
     if (!fFlushingPaths.empty()) {
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 4a7ca18..0450918 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -10,10 +10,10 @@
 
 #include <map>
 #include "src/gpu/GrOnFlushResourceProvider.h"
-#include "src/gpu/GrOpsTask.h"
 #include "src/gpu/GrPathRenderer.h"
+#include "src/gpu/GrRenderTargetOpList.h"
 #include "src/gpu/ccpr/GrCCPerFlushResources.h"
-#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
+#include "src/gpu/ccpr/GrCCPerOpListPaths.h"
 
 class GrCCDrawPathsOp;
 class GrCCPathCache;
@@ -41,16 +41,16 @@
 
     CoverageType coverageType() const { return fCoverageType; }
 
-    using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
+    using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
 
-    // In DDL mode, Ganesh needs to be able to move the pending GrCCPerOpsTaskPaths to the DDL
-    // object (detachPendingPaths) and then return them upon replay (mergePendingPaths).
+    // In DDL mode, Ganesh needs to be able to move the pending GrCCPerOpListPaths to the DDL object
+    // (detachPendingPaths) and then return them upon replay (mergePendingPaths).
     PendingPathsMap detachPendingPaths() { return std::move(fPendingPaths); }
 
     void mergePendingPaths(const PendingPathsMap& paths) {
 #ifdef SK_DEBUG
-        // Ensure there are no duplicate opsTask IDs between the incoming path map and ours.
-        // This should always be true since opsTask IDs are globally unique and these are coming
+        // Ensure there are no duplicate opList IDs between the incoming path map and ours.
+        // This should always be true since opList IDs are globally unique and these are coming
         // from different DDL recordings.
         for (const auto& it : paths) {
             SkASSERT(!fPendingPaths.count(it.first));
@@ -65,9 +65,9 @@
             const GrCaps&);
 
     // GrOnFlushCallbackObject overrides.
-    void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
+    void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
                   SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) override;
-    void postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs, int numOpsTaskIDs) override;
+    void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
 
     void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);
 
@@ -94,19 +94,19 @@
     CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
     bool onDrawPath(const DrawPathArgs&) override;
 
-    GrCCPerOpsTaskPaths* lookupPendingPaths(uint32_t opsTaskID);
+    GrCCPerOpListPaths* lookupPendingPaths(uint32_t opListID);
     void recordOp(std::unique_ptr<GrCCDrawPathsOp>, const DrawPathArgs&);
 
     const CoverageType fCoverageType;
 
-    // fPendingPaths holds the GrCCPerOpsTaskPaths objects that have already been created, but not
-    // flushed, and those that are still being created. All GrCCPerOpsTaskPaths objects will first
+    // fPendingPaths holds the GrCCPerOpListPaths objects that have already been created, but not
+    // flushed, and those that are still being created. All GrCCPerOpListPaths objects will first
     // reside in fPendingPaths, then be moved to fFlushingPaths during preFlush().
     PendingPathsMap fPendingPaths;
 
-    // fFlushingPaths holds the GrCCPerOpsTaskPaths objects that are currently being flushed.
+    // fFlushingPaths holds the GrCCPerOpListPaths objects that are currently being flushed.
     // (It will only contain elements when fFlushing is true.)
-    SkSTArray<4, sk_sp<GrCCPerOpsTaskPaths>> fFlushingPaths;
+    SkSTArray<4, sk_sp<GrCCPerOpListPaths>> fFlushingPaths;
 
     std::unique_ptr<GrCCPathCache> fPathCache;
 
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
index 1b2a55e..0b61e4b 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
@@ -17,7 +17,7 @@
 }
 
 std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
-        uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+        uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
         const GrCaps& caps) {
     return nullptr;
 }
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 479ca28..0cbef8b 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -1879,7 +1879,7 @@
         if (rt && rt->needsResolve()) {
             this->resolveRenderTarget(rt);
             // TEMPORARY: MSAA resolve will have dirtied mipmaps. This goes away once we switch
-            // to resolving MSAA from the opsTask as well.
+            // to resolving MSAA from the opList as well.
             if (GrSamplerState::Filter::kMipMap == sampler.filter() &&
                 (tex->width() != 1 || tex->height() != 1)) {
                 SkASSERT(tex->texturePriv().mipMapped() == GrMipMapped::kYes);
@@ -1887,7 +1887,7 @@
                 this->regenerateMipMapLevels(tex);
             }
         }
-        // Ensure mipmaps were all resolved ahead of time by the opsTask.
+        // Ensure mipmaps were all resolved ahead of time by the opList.
         if (GrSamplerState::Filter::kMipMap == sampler.filter() &&
             (tex->width() != 1 || tex->height() != 1)) {
             // There are some cases where we might be given a non-mipmapped texture with a mipmap
@@ -2165,7 +2165,7 @@
 #else
     // we could just clear the clip bit but when we go through
     // ANGLE a partial stencil mask will cause clears to be
-    // turned into draws. Our contract on GrOpsTask says that
+    // turned into draws. Our contract on GrOpList says that
     // changing the clip between stencil passes may or may not
     // zero the client's clip bits. So we just clear the whole thing.
     static const GrGLint clipStencilMask  = ~0;
diff --git a/src/gpu/ops/GrOp.h b/src/gpu/ops/GrOp.h
index 7aff59d..f94e041 100644
--- a/src/gpu/ops/GrOp.h
+++ b/src/gpu/ops/GrOp.h
@@ -21,6 +21,7 @@
 class GrCaps;
 class GrGpuCommandBuffer;
 class GrOpFlushState;
+class GrRenderTargetOpList;
 
 /**
  * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
diff --git a/src/gpu/ops/GrSmallPathRenderer.h b/src/gpu/ops/GrSmallPathRenderer.h
index e7017b1..d7aa781 100644
--- a/src/gpu/ops/GrSmallPathRenderer.h
+++ b/src/gpu/ops/GrSmallPathRenderer.h
@@ -41,7 +41,7 @@
     }
 
     void postFlush(GrDeferredUploadToken startTokenForNextFlush,
-                   const uint32_t* /*opsTaskIDs*/, int /*numOpsTaskIDs*/) override {
+                   const uint32_t* /*opListIDs*/, int /*numOpListIDs*/) override {
         if (fAtlas) {
             fAtlas->compact(startTokenForNextFlush);
         }
diff --git a/src/gpu/text/GrAtlasManager.h b/src/gpu/text/GrAtlasManager.h
index 650b905..026806b 100644
--- a/src/gpu/text/GrAtlasManager.h
+++ b/src/gpu/text/GrAtlasManager.h
@@ -98,7 +98,7 @@
     }
 
     void postFlush(GrDeferredUploadToken startTokenForNextFlush,
-                   const uint32_t* opsTaskIDs, int numOpsTaskIDs) override {
+                   const uint32_t* opListIDs, int numOpListIDs) override {
         for (int i = 0; i < kMaskFormatCount; ++i) {
             if (fAtlases[i]) {
                 fAtlases[i]->compact(startTokenForNextFlush);
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
index 8f5309e..80e4340 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
@@ -637,7 +637,7 @@
         if (texRT && texRT->needsResolve()) {
             fGpu->resolveRenderTargetNoFlush(texRT);
             // TEMPORARY: MSAA resolve will have dirtied mipmaps. This goes away once we switch
-            // to resolving MSAA from the opsTask as well.
+            // to resolving MSAA from the opList as well.
             if (GrSamplerState::Filter::kMipMap == filter &&
                 (vkTexture->width() != 1 || vkTexture->height() != 1)) {
                 SkASSERT(vkTexture->texturePriv().mipMapped() == GrMipMapped::kYes);
@@ -646,7 +646,7 @@
             }
         }
 
-        // Ensure mip maps were all resolved ahead of time by the opsTask.
+        // Ensure mip maps were all resolved ahead of time by the opList.
         if (GrSamplerState::Filter::kMipMap == filter &&
             (vkTexture->width() != 1 || vkTexture->height() != 1)) {
             SkASSERT(vkTexture->texturePriv().mipMapped() == GrMipMapped::kYes);
diff --git a/tests/GrCCPRTest.cpp b/tests/GrCCPRTest.cpp
index c7b0128..4628c90 100644
--- a/tests/GrCCPRTest.cpp
+++ b/tests/GrCCPRTest.cpp
@@ -42,7 +42,7 @@
 private:
     bool apply(GrRecordingContext* context, GrRenderTargetContext* rtc, bool useHWAA,
                bool hasUserStencilSettings, GrAppliedClip* out, SkRect* bounds) const override {
-        out->addCoverageFP(fCCPR->makeClipProcessor(rtc->priv().testingOnly_getOpsTaskID(), fPath,
+        out->addCoverageFP(fCCPR->makeClipProcessor(rtc->priv().testingOnly_getOpListID(), fPath,
                                                     SkIRect::MakeWH(rtc->width(), rtc->height()),
                                                     *context->priv().caps()));
         return true;
@@ -321,7 +321,7 @@
         int lastCopyAtlasID() const { return fLastCopyAtlasID; }
         int lastRenderedAtlasID() const { return fLastRenderedAtlasID; }
 
-        void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
+        void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
                       SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) override {
             fLastRenderedAtlasID = fLastCopyAtlasID = 0;
 
@@ -839,7 +839,7 @@
 };
 DEF_CCPR_TEST(CCPR_cache_partialInvalidate)
 
-class CCPR_unrefPerOpsTaskPathsBeforeOps : public CCPRTest {
+class CCPR_unrefPerOpListPathsBeforeOps : public CCPRTest {
     void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
         for (int i = 0; i < 10000; ++i) {
@@ -847,9 +847,9 @@
             ccpr.drawPath(fPath);
         }
 
-        // Unref the GrCCPerOpsTaskPaths object.
-        auto perOpsTaskPathsMap = ccpr.ccpr()->detachPendingPaths();
-        perOpsTaskPathsMap.clear();
+        // Unref the GrCCPerOpListPaths object.
+        auto perOpListPathsMap = ccpr.ccpr()->detachPendingPaths();
+        perOpListPathsMap.clear();
 
         // Now delete the Op and all its draws.
         REPORTER_ASSERT(reporter, !SkPathPriv::TestingOnly_unique(fPath));
@@ -857,7 +857,7 @@
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
     }
 };
-DEF_CCPR_TEST(CCPR_unrefPerOpsTaskPathsBeforeOps)
+DEF_CCPR_TEST(CCPR_unrefPerOpListPathsBeforeOps)
 
 class CCPRRenderingTest {
 public:
diff --git a/tests/GrMipMappedTest.cpp b/tests/GrMipMappedTest.cpp
index eaf5f23..e660556 100644
--- a/tests/GrMipMappedTest.cpp
+++ b/tests/GrMipMappedTest.cpp
@@ -356,7 +356,7 @@
     return rtc;
 }
 
-// Test that two opsTasks using the same mipmaps both depend on the same GrTextureResolveRenderTask.
+// Test that two opLists using the same mipmaps both depend on the same GrTextureResolveRenderTask.
 DEF_GPUTEST(GrManyDependentsMipMappedTest, reporter, /* options */) {
     using CanClearFullscreen = GrRenderTargetContext::CanClearFullscreen;
     using Enable = GrContextOptions::Enable;
@@ -369,7 +369,7 @@
         ctxOptions.fReduceOpListSplitting = enableSortingAndReduction;
         sk_sp<GrContext> context = GrContext::MakeMock(&mockOptions, ctxOptions);
         if (!context) {
-            ERRORF(reporter, "could not create mock context with fReduceOpsTaskSplitting %s.",
+            ERRORF(reporter, "could not create mock context with fReduceOpListSplitting %s.",
                    (Enable::kYes == enableSortingAndReduction) ? "enabled" : "disabled");
             continue;
         }
@@ -402,9 +402,9 @@
                 mipmapProxy, colorType, nullptr, nullptr, true);
         mipmapRTC->clear(nullptr, {.1f,.2f,.3f,.4f}, CanClearFullscreen::kYes);
         REPORTER_ASSERT(reporter, mipmapProxy->getLastRenderTask());
-        // mipmapProxy's last render task should now just be the opsTask containing the clear.
+        // mipmapProxy's last render task should now just be the opList containing the clear.
         REPORTER_ASSERT(reporter,
-                mipmapRTC->testingOnly_PeekLastOpsTask() == mipmapProxy->getLastRenderTask());
+                mipmapRTC->testingOnly_PeekLastOpList() == mipmapProxy->getLastRenderTask());
 
         // Mipmaps don't get marked dirty until makeClosed().
         REPORTER_ASSERT(reporter, !mipmapProxy->mipMapsAreDirty());
@@ -415,14 +415,14 @@
 
         // Mipmaps should have gotten marked dirty during makeClosed, then marked clean again as
         // soon as a GrTextureResolveRenderTask was inserted. The way we know they were resolved is
-        // if mipmapProxy->getLastRenderTask() has switched from the opsTask that drew to it, to the
+        // if mipmapProxy->getLastRenderTask() has switched from the opList that drew to it, to the
         // task that resolved its mips.
         GrRenderTask* initialMipmapRegenTask = mipmapProxy->getLastRenderTask();
         REPORTER_ASSERT(reporter, initialMipmapRegenTask);
         REPORTER_ASSERT(reporter,
-                initialMipmapRegenTask != mipmapRTC->testingOnly_PeekLastOpsTask());
+                initialMipmapRegenTask != mipmapRTC->testingOnly_PeekLastOpList());
         REPORTER_ASSERT(reporter,
-                rtc1->testingOnly_PeekLastOpsTask()->dependsOn(initialMipmapRegenTask));
+                rtc1->testingOnly_PeekLastOpList()->dependsOn(initialMipmapRegenTask));
         REPORTER_ASSERT(reporter, !mipmapProxy->mipMapsAreDirty());
 
         // Draw the now-clean mipmap texture into a second target.
@@ -432,7 +432,7 @@
         // Make sure the mipmap texture still has the same regen task.
         REPORTER_ASSERT(reporter, mipmapProxy->getLastRenderTask() == initialMipmapRegenTask);
         REPORTER_ASSERT(reporter,
-                rtc2->testingOnly_PeekLastOpsTask()->dependsOn(initialMipmapRegenTask));
+                rtc2->testingOnly_PeekLastOpList()->dependsOn(initialMipmapRegenTask));
         SkASSERT(!mipmapProxy->mipMapsAreDirty());
 
         // Reset everything so we can go again, this time with the first draw not mipmapped.
@@ -441,9 +441,9 @@
         // Render something to dirty the mips.
         mipmapRTC->clear(nullptr, {.1f,.2f,.3f,.4f}, CanClearFullscreen::kYes);
         REPORTER_ASSERT(reporter, mipmapProxy->getLastRenderTask());
-        // mipmapProxy's last render task should now just be the opsTask containing the clear.
+        // mipmapProxy's last render task should now just be the opList containing the clear.
         REPORTER_ASSERT(reporter,
-                mipmapRTC->testingOnly_PeekLastOpsTask() == mipmapProxy->getLastRenderTask());
+                mipmapRTC->testingOnly_PeekLastOpList() == mipmapProxy->getLastRenderTask());
 
         // Mipmaps don't get marked dirty until makeClosed().
         REPORTER_ASSERT(reporter, !mipmapProxy->mipMapsAreDirty());
@@ -459,7 +459,7 @@
 
         // Since mips weren't regenerated, the last render task shouldn't have changed.
         REPORTER_ASSERT(reporter,
-                mipmapRTC->testingOnly_PeekLastOpsTask() == mipmapProxy->getLastRenderTask());
+                mipmapRTC->testingOnly_PeekLastOpList() == mipmapProxy->getLastRenderTask());
 
         // Draw the stil-dirty mipmap texture into a second target with mipmap filtering.
         rtc2 = draw_mipmap_into_new_render_target(
@@ -469,9 +469,9 @@
         // and that the mipmaps are now clean.
         REPORTER_ASSERT(reporter, mipmapProxy->getLastRenderTask());
         REPORTER_ASSERT(reporter,
-                mipmapRTC->testingOnly_PeekLastOpsTask() != mipmapProxy->getLastRenderTask());
+                mipmapRTC->testingOnly_PeekLastOpList() != mipmapProxy->getLastRenderTask());
         REPORTER_ASSERT(reporter,
-                rtc2->testingOnly_PeekLastOpsTask()->dependsOn(mipmapProxy->getLastRenderTask()));
+                rtc2->testingOnly_PeekLastOpList()->dependsOn(mipmapProxy->getLastRenderTask()));
         SkASSERT(!mipmapProxy->mipMapsAreDirty());
     }
 }
diff --git a/tests/GrOpListFlushTest.cpp b/tests/GrOpListFlushTest.cpp
index d7d9968..b413317 100644
--- a/tests/GrOpListFlushTest.cpp
+++ b/tests/GrOpListFlushTest.cpp
@@ -25,7 +25,7 @@
     return result;
 }
 
-DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrOpsTaskFlushCount, reporter, ctxInfo) {
+DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrOpListFlushCount, reporter, ctxInfo) {
     GrContext* context = ctxInfo.grContext();
     GrGpu* gpu = context->priv().getGpu();
 
diff --git a/tests/IncrTopoSortTest.cpp b/tests/IncrTopoSortTest.cpp
index 9f5aa66..93fedb7 100644
--- a/tests/IncrTopoSortTest.cpp
+++ b/tests/IncrTopoSortTest.cpp
@@ -11,7 +11,7 @@
 
 #include "tools/ToolUtils.h"
 
-// A node in the graph. This corresponds to an opsTask in the MDB world.
+// A node in the graph. This corresponds to an opList in the MDB world.
 class Node : public SkRefCnt {
 public:
     char id() const { return fID; }
@@ -80,7 +80,7 @@
     bool             fVisited;             // only used in addEdges()
 };
 
-// The DAG driving the incremental topological sort. This corresponds to the opsTask DAG in
+// The DAG driving the incremental topological sort. This corresponds to the opList DAG in
 // the MDB world.
 class Graph {
 public:
diff --git a/tests/LazyProxyTest.cpp b/tests/LazyProxyTest.cpp
index 4b3e7f3..5aae8e9 100644
--- a/tests/LazyProxyTest.cpp
+++ b/tests/LazyProxyTest.cpp
@@ -48,7 +48,7 @@
         REPORTER_ASSERT(fReporter, !fHasClipTexture);
     }
 
-    void postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs, int numOpsTaskIDs) override {
+    void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override {
         REPORTER_ASSERT(fReporter, fHasOpTexture);
         REPORTER_ASSERT(fReporter, fHasClipTexture);
     }
diff --git a/tests/OnFlushCallbackTest.cpp b/tests/OnFlushCallbackTest.cpp
index dd699da..2079d34 100644
--- a/tests/OnFlushCallbackTest.cpp
+++ b/tests/OnFlushCallbackTest.cpp
@@ -231,7 +231,7 @@
     static const GrColor kColors[kMaxIDs];
 
     int            fID;
-    // The Atlased ops have an internal singly-linked list of ops that land in the same opsTask
+    // The Atlased ops have an internal singly-linked list of ops that land in the same opList
     AtlasedRectOp* fNext;
 
     typedef NonAARectOp INHERITED;
@@ -274,17 +274,17 @@
         fDone = true;
     }
 
-    // Insert the new op in an internal singly-linked list for 'opsTaskID'
-    void addOp(uint32_t opsTaskID, AtlasedRectOp* op) {
+    // Insert the new op in an internal singly-linked list for 'opListID'
+    void addOp(uint32_t opListID, AtlasedRectOp* op) {
         LinkedListHeader* header = nullptr;
         for (int i = 0; i < fOps.count(); ++i) {
-            if (opsTaskID == fOps[i].fID) {
+            if (opListID == fOps[i].fID) {
                 header = &(fOps[i]);
             }
         }
 
         if (!header) {
-            fOps.push_back({opsTaskID, nullptr});
+            fOps.push_back({opListID, nullptr});
             header = &(fOps[fOps.count()-1]);
         }
 
@@ -334,15 +334,15 @@
      * This callback creates the atlas and updates the AtlasedRectOps to read from it
      */
     void preFlush(GrOnFlushResourceProvider* resourceProvider,
-                  const uint32_t* opsTaskIDs,
-                  int numOpsTaskIDs,
+                  const uint32_t* opListIDs,
+                  int numOpListIDs,
                   SkTArray<std::unique_ptr<GrRenderTargetContext>>* results) override {
         SkASSERT(!results->count());
 
-        // Until MDB is landed we will most-likely only have one opsTask.
+        // Until MDB is landed we will most-likely only have one opList.
         SkTDArray<LinkedListHeader*> lists;
-        for (int i = 0; i < numOpsTaskIDs; ++i) {
-            if (LinkedListHeader* list = this->getList(opsTaskIDs[i])) {
+        for (int i = 0; i < numOpListIDs; ++i) {
+            if (LinkedListHeader* list = this->getList(opListIDs[i])) {
                 lists.push_back(list);
             }
         }
@@ -407,9 +407,9 @@
         AtlasedRectOp* fHead;
     } LinkedListHeader;
 
-    LinkedListHeader* getList(uint32_t opsTaskID) {
+    LinkedListHeader* getList(uint32_t opListID) {
         for (int i = 0; i < fOps.count(); ++i) {
-            if (opsTaskID == fOps[i].fID) {
+            if (opListID == fOps[i].fID) {
                 return &(fOps[i]);
             }
         }
@@ -420,10 +420,10 @@
         // The AtlasedRectOps have yet to execute (and this class doesn't own them) so just
         // forget about them in the laziest way possible.
         header->fHead = nullptr;
-        header->fID = 0;            // invalid opsTask ID
+        header->fID = 0;            // invalid opList ID
     }
 
-    // Each opsTask containing AtlasedRectOps gets its own internal singly-linked list
+    // Each opList containing AtlasedRectOps gets its own internal singly-linked list
     SkTDArray<LinkedListHeader>  fOps;
 
     // The fully lazy proxy for the atlas
@@ -456,12 +456,12 @@
 
         AtlasedRectOp* sparePtr = op.get();
 
-        uint32_t opsTaskID;
+        uint32_t opListID;
         rtc->priv().testingOnly_addDrawOp(GrNoClip(), std::move(op),
-                                          [&opsTaskID](GrOp* op, uint32_t id) { opsTaskID = id; });
-        SkASSERT(SK_InvalidUniqueID != opsTaskID);
+                                          [&opListID](GrOp* op, uint32_t id) { opListID = id; });
+        SkASSERT(SK_InvalidUniqueID != opListID);
 
-        object->addOp(opsTaskID, sparePtr);
+        object->addOp(opListID, sparePtr);
     }
 
     return rtc->asTextureProxyRef();
diff --git a/tests/OpChainTest.cpp b/tests/OpChainTest.cpp
index 1cbc437..90acfa7 100644
--- a/tests/OpChainTest.cpp
+++ b/tests/OpChainTest.cpp
@@ -9,7 +9,7 @@
 #include "src/gpu/GrContextPriv.h"
 #include "src/gpu/GrMemoryPool.h"
 #include "src/gpu/GrOpFlushState.h"
-#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/GrRenderTargetOpList.h"
 #include "src/gpu/ops/GrOp.h"
 #include "tests/Test.h"
 
@@ -205,7 +205,7 @@
                 GrOpFlushState flushState(context->priv().getGpu(),
                                           context->priv().resourceProvider(),
                                           &tracker);
-                GrOpsTask opsTask(sk_ref_sp(context->priv().opMemoryPool()),
+                GrRenderTargetOpList opList(sk_ref_sp(context->priv().opMemoryPool()),
                                             sk_ref_sp(proxy->asRenderTargetProxy()),
                                             context->priv().auditTrail());
                 // This assumes the particular values of kRanges.
@@ -221,14 +221,14 @@
                     range.fOffset += pos;
                     auto op = TestOp::Make(context.get(), value, range, result, &combinable);
                     op->writeResult(validResult);
-                    opsTask.addOp(std::move(op),
-                                  GrTextureResolveManager(context->priv().drawingManager()),
-                                  *context->priv().caps());
+                    opList.addOp(std::move(op),
+                                 GrTextureResolveManager(context->priv().drawingManager()),
+                                 *context->priv().caps());
                 }
-                opsTask.makeClosed(*context->priv().caps());
-                opsTask.prepare(&flushState);
-                opsTask.execute(&flushState);
-                opsTask.endFlush();
+                opList.makeClosed(*context->priv().caps());
+                opList.prepare(&flushState);
+                opList.execute(&flushState);
+                opList.endFlush();
 #if 0  // Useful to repeat a random configuration that fails the test while debugger attached.
                 if (!std::equal(result, result + result_width(), validResult)) {
                     repeat = true;
diff --git a/tests/ResourceAllocatorTest.cpp b/tests/ResourceAllocatorTest.cpp
index dfc2b30..7d0cc47 100644
--- a/tests/ResourceAllocatorTest.cpp
+++ b/tests/ResourceAllocatorTest.cpp
@@ -83,7 +83,7 @@
     alloc.incOps();
     alloc.addInterval(p2.get(), 1, 2, GrResourceAllocator::ActualUse::kYes);
     alloc.incOps();
-    alloc.markEndOfOpsTask(0);
+    alloc.markEndOfOpList(0);
 
     alloc.determineRecyclability();
 
@@ -115,7 +115,7 @@
 
     alloc.addInterval(p1.get(), 0, 2, GrResourceAllocator::ActualUse::kYes);
     alloc.addInterval(p2.get(), 3, 5, GrResourceAllocator::ActualUse::kYes);
-    alloc.markEndOfOpsTask(0);
+    alloc.markEndOfOpList(0);
 
     alloc.determineRecyclability();
 
@@ -329,7 +329,7 @@
         alloc.addInterval(p2.get(), 0, 1, GrResourceAllocator::ActualUse::kNo);
         alloc.addInterval(p3.get(), 0, 1, GrResourceAllocator::ActualUse::kNo);
         alloc.incOps();
-        alloc.markEndOfOpsTask(0);
+        alloc.markEndOfOpList(0);
 
         alloc.determineRecyclability();
 
@@ -344,8 +344,8 @@
     REPORTER_ASSERT(reporter, p3->isInstantiated());
 }
 
-// Set up so there are two opsTasks that need to be flushed but the resource allocator thinks
-// it is over budget. The two opsTasks should be flushed separately and the opsTask indices
+// Set up so there are two opLists that need to be flushed but the resource allocator thinks
+// it is over budget. The two opLists should be flushed separately and the opList indices
 // returned from assign should be correct.
 DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorOverBudgetTest, reporter, ctxInfo) {
     GrContext* context = ctxInfo.grContext();
@@ -377,13 +377,13 @@
         alloc.incOps();
         alloc.addInterval(p2.get(), 1, 1, GrResourceAllocator::ActualUse::kYes);
         alloc.incOps();
-        alloc.markEndOfOpsTask(0);
+        alloc.markEndOfOpList(0);
 
         alloc.addInterval(p3.get(), 2, 2, GrResourceAllocator::ActualUse::kYes);
         alloc.incOps();
         alloc.addInterval(p4.get(), 3, 3, GrResourceAllocator::ActualUse::kYes);
         alloc.incOps();
-        alloc.markEndOfOpsTask(1);
+        alloc.markEndOfOpList(1);
 
         int startIndex, stopIndex;
         GrResourceAllocator::AssignError error;
diff --git a/tools/DDLTileHelper.cpp b/tools/DDLTileHelper.cpp
index eb50f4e..236dbbd 100644
--- a/tools/DDLTileHelper.cpp
+++ b/tools/DDLTileHelper.cpp
@@ -36,7 +36,7 @@
     std::unique_ptr<SkDeferredDisplayList> ddl = recorder.detach();
     if (ddl->priv().numRenderTasks()) {
         // TODO: remove this once skbug.com/8424 is fixed. If the DDL resulting from the
-        // reinflation of the SKPs contains opsTasks that means some image subset operation
+        // reinflation of the SKPs contains opLists that means some image subset operation
         // created a draw.
         fReconstitutedPicture.reset();
     }
diff --git a/tools/debugger/DebugCanvas.cpp b/tools/debugger/DebugCanvas.cpp
index 0aec409..24d467c 100644
--- a/tools/debugger/DebugCanvas.cpp
+++ b/tools/debugger/DebugCanvas.cpp
@@ -172,7 +172,7 @@
             at->getBoundsByClientID(&childrenBounds, index);
         } else {
             // the client wants us to draw the mth op
-            at->getBoundsByOpsTaskID(&childrenBounds.push_back(), m);
+            at->getBoundsByOpListID(&childrenBounds.push_back(), m);
         }
         SkPaint paint;
         paint.setStyle(SkPaint::kStroke_Style);
@@ -270,12 +270,12 @@
     this->cleanupAuditTrail(canvas);
 }
 
-void DebugCanvas::toJSONOpsTask(SkJSONWriter& writer, int n, SkCanvas* canvas) {
+void DebugCanvas::toJSONOpList(SkJSONWriter& writer, int n, SkCanvas* canvas) {
     this->drawAndCollectOps(n, canvas);
 
     GrAuditTrail* at = this->getAuditTrail(canvas);
     if (at) {
-        GrAuditTrail::AutoManageOpsTask enable(at);
+        GrAuditTrail::AutoManageOpList enable(at);
         at->toJson(writer);
     } else {
         writer.beginObject();
diff --git a/tools/debugger/DebugCanvas.h b/tools/debugger/DebugCanvas.h
index b7c314f..0041a7b 100644
--- a/tools/debugger/DebugCanvas.h
+++ b/tools/debugger/DebugCanvas.h
@@ -100,7 +100,7 @@
      */
     void toJSON(SkJSONWriter& writer, UrlDataManager& urlDataManager, int n, SkCanvas*);
 
-    void toJSONOpsTask(SkJSONWriter& writer, int n, SkCanvas*);
+    void toJSONOpList(SkJSONWriter& writer, int n, SkCanvas*);
 
     void detachCommands(SkTDArray<DrawCommand*>* dst) { fCommandVector.swap(*dst); }
 
diff --git a/tools/flags/CommonFlags.h b/tools/flags/CommonFlags.h
index 83ed083..c7380a2 100644
--- a/tools/flags/CommonFlags.h
+++ b/tools/flags/CommonFlags.h
@@ -31,8 +31,8 @@
  *     --noGS
  *     --pr
  *     --disableDriverCorrectnessWorkarounds
- *     --reduceOpsTaskSplitting
- *     --dontReduceOpsTaskSplitting
+ *     --reduceOpListSplitting
+ *     --dontReduceOpListSplitting
  */
 void SetCtxOptionsFromCommonFlags(struct GrContextOptions*);
 
diff --git a/tools/flags/CommonFlagsGpu.cpp b/tools/flags/CommonFlagsGpu.cpp
index 7ed0d30..1c4019b 100644
--- a/tools/flags/CommonFlagsGpu.cpp
+++ b/tools/flags/CommonFlagsGpu.cpp
@@ -29,8 +29,8 @@
 static DEFINE_bool(disableDriverCorrectnessWorkarounds, false,
                    "Disables all GPU driver correctness workarounds");
 
-static DEFINE_bool(reduceOpsTaskSplitting, false, "Improve opsTask sorting");
-static DEFINE_bool(dontReduceOpsTaskSplitting, false, "Allow more opsTask splitting");
+static DEFINE_bool(reduceOpListSplitting, false, "Improve opList sorting");
+static DEFINE_bool(dontReduceOpListSplitting, false, "Allow more opList splitting");
 
 static GpuPathRenderers get_named_pathrenderers_flags(const char* name) {
     if (!strcmp(name, "none")) {
@@ -89,10 +89,10 @@
     ctxOptions->fGpuPathRenderers                    = collect_gpu_path_renderers_from_flags();
     ctxOptions->fDisableDriverCorrectnessWorkarounds = FLAGS_disableDriverCorrectnessWorkarounds;
 
-    if (FLAGS_reduceOpsTaskSplitting) {
-        SkASSERT(!FLAGS_dontReduceOpsTaskSplitting);
+    if (FLAGS_reduceOpListSplitting) {
+        SkASSERT(!FLAGS_dontReduceOpListSplitting);
         ctxOptions->fReduceOpListSplitting = GrContextOptions::Enable::kYes;
-    } else if (FLAGS_dontReduceOpsTaskSplitting) {
+    } else if (FLAGS_dontReduceOpListSplitting) {
         ctxOptions->fReduceOpListSplitting = GrContextOptions::Enable::kNo;
     }
 }
diff --git a/tools/gpu/GrTest.cpp b/tools/gpu/GrTest.cpp
index 70d8692..465acf1 100644
--- a/tools/gpu/GrTest.cpp
+++ b/tools/gpu/GrTest.cpp
@@ -60,8 +60,8 @@
     SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fRenderTargetContext->singleOwner());)
 
 
-uint32_t GrRenderTargetContextPriv::testingOnly_getOpsTaskID() {
-    return fRenderTargetContext->getOpsTask()->uniqueID();
+uint32_t GrRenderTargetContextPriv::testingOnly_getOpListID() {
+    return fRenderTargetContext->getOpList()->uniqueID();
 }
 
 void GrRenderTargetContextPriv::testingOnly_addDrawOp(std::unique_ptr<GrDrawOp> op) {
diff --git a/tools/mdbviz/Model.cpp b/tools/mdbviz/Model.cpp
index 0243d00..3cb01f0 100644
--- a/tools/mdbviz/Model.cpp
+++ b/tools/mdbviz/Model.cpp
@@ -21,7 +21,7 @@
 }
 
 Model::~Model() {
-    this->resetOpsTask();
+    this->resetOpList();
 }
 
 Model::ErrorCode Model::load(const char* filename) {
@@ -42,7 +42,7 @@
         temp->setPicture(pic.get());
         pic->playback(temp.get());
         temp->setPicture(nullptr);
-        this->resetOpsTask();
+        this->resetOpList();
         temp->detachCommands(&fOps);
     }
 
@@ -105,7 +105,7 @@
     canvas.restoreToCount(saveCount);
 }
 
-void Model::resetOpsTask() {
+void Model::resetOpList() {
     for (int i = 0; i < fOps.count(); ++i) {
         delete fOps[i];
     }
diff --git a/tools/mdbviz/Model.h b/tools/mdbviz/Model.h
index f3bc99d..debb4f4 100644
--- a/tools/mdbviz/Model.h
+++ b/tools/mdbviz/Model.h
@@ -27,7 +27,7 @@
 
     // Replace the list of draw ops by reading the provided skp filename and
     // reset the Skia draw state. It is up to the view portion to update itself
-    // after this call (i.e., rebuild the opsTask view).
+    // after this call (i.e., rebuild the opList view).
     ErrorCode load(const char* filename);
 
     // Update the rendering state to the provided op
@@ -48,7 +48,7 @@
 protected:
     // draw the ops up to (and including) the index-th op
     void drawTo(int index);
-    void resetOpsTask();
+    void resetOpList();
 
 private:
     SkTDArray<DrawCommand*>   fOps;
diff --git a/tools/mdbviz/mainwindow.cpp b/tools/mdbviz/mainwindow.cpp
index e36f81a..5f5746b 100644
--- a/tools/mdbviz/mainwindow.cpp
+++ b/tools/mdbviz/mainwindow.cpp
@@ -27,8 +27,8 @@
     }
 }
 
-void MainWindow::setupOpsTaskWidget() {
-    fOpsTaskWidget->clear();
+void MainWindow::setupOpListWidget() {
+    fOpListWidget->clear();
 
     QTreeWidgetItem* item = nullptr;
     SkTDArray<QTreeWidgetItem*> parents;
@@ -45,7 +45,7 @@
         }
 
         if (parents.isEmpty()) {
-            fOpsTaskWidget->addTopLevelItem(item);
+            fOpListWidget->addTopLevelItem(item);
         } else {
             parents.top()->addChild(item);
         }
@@ -55,8 +55,8 @@
         }
     }
 
-    fOpsTaskWidget->setCurrentItem(item);
-    fOpsTaskWidget->expandToDepth(100);
+    fOpListWidget->setCurrentItem(item);
+    fOpListWidget->expandToDepth(100);
 }
 
 void MainWindow::presentCurrentRenderState() {
@@ -87,7 +87,7 @@
         return;
     }
 
-    this->setupOpsTaskWidget();
+    this->setupOpListWidget();
     this->presentCurrentRenderState();
 
 #ifndef QT_NO_CURSOR
@@ -147,25 +147,25 @@
 
     // Op List Window
     {
-        QDockWidget* opsTaskDock = new QDockWidget("Ops", this);
-        opsTaskDock->setAllowedAreas(Qt::LeftDockWidgetArea);
+        QDockWidget* opListDock = new QDockWidget("Ops", this);
+        opListDock->setAllowedAreas(Qt::LeftDockWidgetArea);
 
-        fOpsTaskWidget = new QTreeWidget(opsTaskDock);
+        fOpListWidget = new QTreeWidget(opListDock);
 
         QTreeWidgetItem* headerItem = new QTreeWidgetItem;
         headerItem->setText(0, "Index");
         headerItem->setText(1, "Op Name");
-        fOpsTaskWidget->setHeaderItem(headerItem);
+        fOpListWidget->setHeaderItem(headerItem);
 
-        fOpsTaskWidget->header()->setSectionResizeMode(0, QHeaderView::ResizeToContents);
-        fOpsTaskWidget->header()->setSectionResizeMode(1, QHeaderView::ResizeToContents);
+        fOpListWidget->header()->setSectionResizeMode(0, QHeaderView::ResizeToContents);
+        fOpListWidget->header()->setSectionResizeMode(1, QHeaderView::ResizeToContents);
 
-        opsTaskDock->setWidget(fOpsTaskWidget);
-        this->addDockWidget(Qt::LeftDockWidgetArea, opsTaskDock);
+        opListDock->setWidget(fOpListWidget);
+        this->addDockWidget(Qt::LeftDockWidgetArea, opListDock);
 
-        fViewMenu->addAction(opsTaskDock->toggleViewAction());
+        fViewMenu->addAction(opListDock->toggleViewAction());
 
-        connect(fOpsTaskWidget, SIGNAL(currentItemChanged(QTreeWidgetItem*,QTreeWidgetItem*)),
+        connect(fOpListWidget, SIGNAL(currentItemChanged(QTreeWidgetItem*,QTreeWidgetItem*)),
                 this, SLOT(onCurrentItemChanged(QTreeWidgetItem*, QTreeWidgetItem*)));
     }
 
diff --git a/tools/mdbviz/mainwindow.h b/tools/mdbviz/mainwindow.h
index f80b157..7fa6152 100644
--- a/tools/mdbviz/mainwindow.h
+++ b/tools/mdbviz/mainwindow.h
@@ -32,7 +32,7 @@
 
 private:
     void loadFile(const QString &fileName);
-    void setupOpsTaskWidget();
+    void setupOpListWidget();
     void presentCurrentRenderState();
 
 
@@ -46,7 +46,7 @@
     QImage  fImage;
     QLabel* fImageLabel;
 
-    QTreeWidget* fOpsTaskWidget;
+    QTreeWidget* fOpListWidget;
 
     QMenu* fViewMenu;
 
diff --git a/tools/skiaserve/Request.cpp b/tools/skiaserve/Request.cpp
index f2dbab0..dc0b40d 100644
--- a/tools/skiaserve/Request.cpp
+++ b/tools/skiaserve/Request.cpp
@@ -229,13 +229,13 @@
     return stream.detachAsData();
 }
 
-sk_sp<SkData> Request::getJsonOpsTask(int n) {
+sk_sp<SkData> Request::getJsonOpList(int n) {
     SkCanvas* canvas = this->getCanvas();
     SkASSERT(fGPUEnabled);
     SkDynamicMemoryWStream stream;
     SkJSONWriter writer(&stream, SkJSONWriter::Mode::kFast);
 
-    fDebugCanvas->toJSONOpsTask(writer, n, canvas);
+    fDebugCanvas->toJSONOpList(writer, n, canvas);
 
     writer.flush();
     return stream.detachAsData();
diff --git a/tools/skiaserve/Request.h b/tools/skiaserve/Request.h
index e3dfada..aef69c3 100644
--- a/tools/skiaserve/Request.h
+++ b/tools/skiaserve/Request.h
@@ -51,7 +51,7 @@
     sk_sp<SkData> getJsonOps(int n);
 
     // Returns a json list of ops as an SkData
-    sk_sp<SkData> getJsonOpsTask(int n);
+    sk_sp<SkData> getJsonOpList(int n);
 
     // Returns json with the viewMatrix and clipRect
     sk_sp<SkData> getJsonInfo(int n);
diff --git a/tools/skiaserve/urlhandlers/OpsHandler.cpp b/tools/skiaserve/urlhandlers/OpsHandler.cpp
index 0c55419..556c2a9 100644
--- a/tools/skiaserve/urlhandlers/OpsHandler.cpp
+++ b/tools/skiaserve/urlhandlers/OpsHandler.cpp
@@ -31,7 +31,7 @@
     if (0 == strcmp(method, MHD_HTTP_METHOD_GET)) {
         int n = request->getLastOp();
 
-        sk_sp<SkData> data(request->getJsonOpsTask(n));
+        sk_sp<SkData> data(request->getJsonOpList(n));
         return SendData(connection, data.get(), "application/json");
     }