[gold] Remove unused bolt-based stores

Bug: skia:
Change-Id: I4a1b3d16395c6ffd3ca1dd20171efec141e8a690
Reviewed-on: https://skia-review.googlesource.com/c/buildbot/+/219777
Reviewed-by: Kevin Lubick <kjlubick@google.com>
Commit-Queue: Kevin Lubick <kjlubick@google.com>
diff --git a/golden/go/digeststore/digeststore.go b/golden/go/digeststore/digeststore.go
deleted file mode 100644
index eec2490..0000000
--- a/golden/go/digeststore/digeststore.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package digeststore
-
-import (
-	"encoding/json"
-	"fmt"
-	"os"
-	"path"
-
-	"github.com/boltdb/bolt"
-)
-
-const (
-	SUB_DIR_NAME   = "digeststore"
-	DIGEST_DB_NAME = "digest_store.boltdb"
-)
-
-// DigestInfo aggregates all information we have about an individual digest.
-type DigestInfo struct {
-	// TestName for this digest.
-	TestName string
-
-	// Digest that uniquely identifies the digest within this test.
-	Digest string
-
-	// First containes the timestamp of the first occurance of this digest.
-	First int64
-
-	// Last contains the timestamp of the last time we have seen this digest.
-	Last int64
-
-	// Exception stores a string representing the exception that was encountered
-	// retrieving this digest. If Exception is "" then there was no problem.
-	Exception string
-
-	// IssueIDs is a list of issue ids that are associated with this digest.
-	IssueIDs []int
-}
-
-// UpdateTimestamps updates the time stamps of a DigestInfo based on the
-// arguments. It returns true if the digest info was modified.
-func (d *DigestInfo) UpdateTimestamps(first int64, last int64) bool {
-	changed := false
-	if first < d.First {
-		d.First = first
-		changed = true
-	}
-	if last > d.Last {
-		d.Last = last
-		changed = true
-	}
-	return changed
-}
-
-type DigestStore interface {
-	// Get returns the information about the given testName/digest pair.
-	Get(testName, digest string) (*DigestInfo, bool, error)
-
-	// Update updates the stored information about the testname/digest
-	// pairs identified in the list of DigestInfos.
-	Update(digetInfos []*DigestInfo) error
-}
-
-type BoltDigestStore struct {
-	digestDB *bolt.DB
-}
-
-func New(storageDir string) (DigestStore, error) {
-	dbDir := path.Join(storageDir, SUB_DIR_NAME)
-	if err := os.MkdirAll(dbDir, 0755); err != nil {
-		return nil, err
-	}
-	db, err := bolt.Open(path.Join(dbDir, DIGEST_DB_NAME), 0666, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	return &BoltDigestStore{digestDB: db}, nil
-}
-
-func (b BoltDigestStore) Get(testName, digest string) (*DigestInfo, bool, error) {
-	if testName == "" {
-		return nil, false, fmt.Errorf("No testname provided for digest '%s'", digest)
-	}
-
-	var ret *DigestInfo = nil
-	err := b.digestDB.View(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(testName))
-		if bucket == nil {
-			return nil
-		}
-
-		if retBytes := bucket.Get([]byte(digest)); retBytes != nil {
-			if err := json.Unmarshal(retBytes, &ret); err != nil {
-				return err
-			}
-		}
-		return nil
-	})
-	return ret, ret != nil, err
-}
-
-func (b BoltDigestStore) Update(digestInfos []*DigestInfo) error {
-	return b.digestDB.Update(func(tx *bolt.Tx) error {
-		// Wrap everything into a single transaction. This avoids a write lock
-		// by using the lock of the transaction.
-		writeDigestInfos := make([]*DigestInfo, 0, len(digestInfos))
-		for _, digestInfo := range digestInfos {
-			di, found, err := b.Get(digestInfo.TestName, digestInfo.Digest)
-			if err != nil {
-				return err
-			}
-
-			// If the testname/digest was not found or needs to be updated we
-			// record it.
-			if !found {
-				writeDigestInfos = append(writeDigestInfos, digestInfo)
-			} else if di.UpdateTimestamps(digestInfo.First, digestInfo.Last) {
-				writeDigestInfos = append(writeDigestInfos, di)
-			}
-		}
-
-		// If no digest needs updating we are done.
-		if len(writeDigestInfos) == 0 {
-			return nil
-		}
-
-		for _, digestInfo := range writeDigestInfos {
-			bucket, err := tx.CreateBucketIfNotExists([]byte(digestInfo.TestName))
-			if err != nil {
-				return err
-			}
-
-			jsonBytes, err := json.Marshal(digestInfo)
-			if err != nil {
-				return err
-			}
-
-			if err = bucket.Put([]byte(digestInfo.Digest), jsonBytes); err != nil {
-				return err
-			}
-		}
-		return nil
-	})
-}
diff --git a/golden/go/digeststore/digeststore_test.go b/golden/go/digeststore/digeststore_test.go
deleted file mode 100644
index 42f985a..0000000
--- a/golden/go/digeststore/digeststore_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package digeststore
-
-import (
-	"os"
-	"testing"
-	"time"
-
-	assert "github.com/stretchr/testify/require"
-	"go.skia.org/infra/go/testutils"
-	"go.skia.org/infra/go/testutils/unittest"
-)
-
-const TEST_DATA_DIR = "testdata"
-
-func TestDigestStore(t *testing.T) {
-	unittest.MediumTest(t)
-	assert.NoError(t, os.MkdirAll(TEST_DATA_DIR, 0755))
-	defer testutils.RemoveAll(t, TEST_DATA_DIR)
-
-	digestStore, err := New(TEST_DATA_DIR)
-	assert.NoError(t, err)
-	testDigestStore(t, digestStore)
-}
-
-func testDigestStore(t assert.TestingT, digestStore DigestStore) {
-	testName_1, digest_1 := "smapleTest_1", "sampleDigest_1"
-	timestamp_1 := time.Now().Unix() - 20
-
-	// TODO(kjlubick): assert something with di
-	di, ok, err := digestStore.Get(testName_1, digest_1)
-	assert.NoError(t, err)
-	assert.False(t, ok)
-
-	digestInfos := []*DigestInfo{
-		{TestName: testName_1, Digest: digest_1, First: timestamp_1, Last: timestamp_1},
-	}
-	assert.NoError(t, digestStore.Update(digestInfos))
-
-	di, ok, err = digestStore.Get(testName_1, digest_1)
-	assert.NoError(t, err)
-	assert.True(t, ok)
-	assert.Equal(t, timestamp_1, di.Last)
-	assert.Equal(t, timestamp_1, di.First)
-
-	// Update the digest with a commit 10 seconds later than the first one.
-	timestamp_2 := timestamp_1 + 10
-	digestInfos = []*DigestInfo{
-		{TestName: testName_1, Digest: digest_1, First: timestamp_2, Last: timestamp_2},
-	}
-
-	assert.NoError(t, digestStore.Update(digestInfos))
-
-	di, ok, err = digestStore.Get(testName_1, digest_1)
-	assert.NoError(t, err)
-	assert.True(t, ok)
-
-	assert.Equal(t, timestamp_1, di.First)
-	assert.Equal(t, timestamp_2, di.Last)
-}
diff --git a/golden/go/issuestore/issuestore.go b/golden/go/issuestore/issuestore.go
deleted file mode 100644
index 2891033..0000000
--- a/golden/go/issuestore/issuestore.go
+++ /dev/null
@@ -1,280 +0,0 @@
-package issuestore
-
-import (
-	"path"
-
-	"github.com/boltdb/bolt"
-	"go.skia.org/infra/go/boltutil"
-	"go.skia.org/infra/go/fileutil"
-	"go.skia.org/infra/go/util"
-)
-
-// IssueStore captures the functions necessary to persist the connection between
-// Monorail issues and digests, traces, tests and ignores.
-type IssueStore interface {
-	// ByDigest returns the ids of all issue associated with the given digest.
-	ByDigest(digest string) ([]string, error) // list of issues
-
-	// ByDigest returns the ids of all issue associated with the given digest.
-	ByIgnore(ignoreID string) ([]string, error) // list of issues
-
-	// ByDigest returns the ids of all issue associated with the given digest.
-	ByTrace(traceID string) ([]string, error) // list of issues
-
-	// ByDigest returns the ids of all issue associated with the given digest.
-	ByTest(testName string) ([]string, error) // list of issues
-
-	// Add allows to create an issue annotation or add to an existing annotation.
-	// If the issue identified by delta.IssueID exists, delta will be merged into
-	// the existing annotation.
-	Add(delta *Annotation) error
-
-	// Subtract removes from an existing issue annotation. The values in
-	// delta are subtracted from an existing annotation.
-	Subtract(delta *Annotation) error
-
-	// Get returns the annotations for the given list of issue ids.
-	Get(issueIDs []string) ([]*Annotation, error)
-
-	// List returns a list of all issues that are currently annotated with
-	// support of paging. The first 'offset' annotations will be skipped and
-	// the returned array has at most 'size'. If 'size' <= 0 there is no limit
-	// on the number of annotations returned.
-	List(offset int, size int) ([]*Annotation, int, error)
-
-	// Delete the given issue annotations.
-	Delete(issueIDs []string) error
-}
-
-var annotationIndices = []string{DIGEST_INDEX, TRACE_INDEX, IGNORE_INDEX, TEST_INDEX}
-
-// Annotation captures annotations for the issue identified by IssueID.
-type Annotation struct {
-	IssueID   string   // id of the issue in Monorail
-	Digests   []string // Image digests connected to this issue
-	Traces    []string // Trace ids connected to this issues.
-	Ignores   []string // Ignore ids connected to this issue.
-	TestNames []string // Test names connected to this issue.
-}
-
-// Key see boltutil.Record interface.
-func (a *Annotation) Key() string {
-	return a.IssueID
-}
-
-// IndexValues see boltutil.Record interface.
-func (a *Annotation) IndexValues() map[string][]string {
-	ret := make(map[string][]string, len(annotationIndices))
-	for _, idx := range annotationIndices {
-		switch idx {
-		case DIGEST_INDEX:
-			ret[idx] = append(ret[idx], a.Digests...)
-		case TRACE_INDEX:
-			ret[idx] = append(ret[idx], a.Traces...)
-		case IGNORE_INDEX:
-			ret[idx] = append(ret[idx], a.Ignores...)
-		case TEST_INDEX:
-			ret[idx] = append(ret[idx], a.TestNames...)
-		}
-	}
-	return ret
-}
-
-// Adds the digests, traces, ignores and tests in delta to the current annotation.
-// and deduplicates in the process.
-func (r *Annotation) Add(deltaRec *Annotation) bool {
-	updated := mergeStrings(&r.Digests, deltaRec.Digests)
-	updated = mergeStrings(&r.Traces, deltaRec.Traces) || updated
-	updated = mergeStrings(&r.Ignores, deltaRec.Ignores) || updated
-	return mergeStrings(&r.TestNames, deltaRec.TestNames) || updated
-}
-
-// Subtract removes the digests, traces, ignores and tests in delta from the current annotation.
-func (r *Annotation) Subtract(deltaRec *Annotation) bool {
-	updated := removeStrings(&r.Digests, deltaRec.Digests)
-	updated = removeStrings(&r.Traces, deltaRec.Traces) || updated
-	updated = removeStrings(&r.Ignores, deltaRec.Ignores) || updated
-	return removeStrings(&r.TestNames, deltaRec.TestNames) || updated
-}
-
-// IsEmpty returns true if all all annotations are empty.
-func (r *Annotation) IsEmpty() bool {
-	return (len(r.Digests) + len(r.Traces) + len(r.Ignores) + len(r.TestNames)) == 0
-}
-
-const (
-	// Bucket names in boltdb. 'INDEX' in the name indicates an index.
-	ISSUES_DB    = "issues"
-	DIGEST_INDEX = "digest"
-	TRACE_INDEX  = "trace"
-	IGNORE_INDEX = "ignore"
-	TEST_INDEX   = "test"
-)
-
-// Separator used to separate child and parent id in indices.
-const IDX_SEPARATOR = "|"
-
-// boltIssueStore implements the IssueStore interface.
-type boltIssueStore struct {
-	store *boltutil.IndexedBucket
-}
-
-// New returns a new instance of IssueStore that is stored in the given directory.
-func New(baseDir string) (IssueStore, error) {
-	baseDir, err := fileutil.EnsureDirExists(baseDir)
-	if err != nil {
-		return nil, err
-	}
-
-	db, err := bolt.Open(path.Join(baseDir, "issuestore.db"), 0600, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	config := &boltutil.Config{
-		DB:      db,
-		Name:    "issues",
-		Indices: annotationIndices,
-		Codec:   util.JSONCodec(&Annotation{}),
-	}
-
-	store, err := boltutil.NewIndexedBucket(config)
-	if err != nil {
-		return nil, err
-	}
-
-	return &boltIssueStore{
-		store: store,
-	}, nil
-}
-
-// ByDigest, see IgnoreStore interface.
-func (b *boltIssueStore) ByDigest(digest string) ([]string, error) {
-	return b.readFromIndex(DIGEST_INDEX, digest)
-}
-
-// ByIgnore, see IgnoreStore interface.
-func (b *boltIssueStore) ByIgnore(ignoreID string) ([]string, error) {
-	return b.readFromIndex(IGNORE_INDEX, ignoreID)
-}
-
-// ByTrace, see IgnoreStore interface.
-func (b *boltIssueStore) ByTrace(traceID string) ([]string, error) {
-	return b.readFromIndex(TRACE_INDEX, traceID)
-}
-
-// ByTest, see IgnoreStore interface.
-func (b *boltIssueStore) ByTest(testName string) ([]string, error) {
-	return b.readFromIndex(TEST_INDEX, testName)
-}
-
-// Get, see IgnoreStore interface.
-func (b *boltIssueStore) Get(issueIDs []string) ([]*Annotation, error) {
-	if len(issueIDs) == 0 {
-		return []*Annotation{}, nil
-	}
-
-	result, err := b.store.Read(issueIDs)
-	if err != nil {
-		return nil, err
-	}
-
-	ret := make([]*Annotation, len(result))
-	for i, val := range result {
-		ret[i] = val.(*Annotation)
-	}
-	return ret, nil
-}
-
-// Add, see IgnoreStore interface.
-func (b *boltIssueStore) Add(delta *Annotation) error {
-	if delta.IsEmpty() {
-		return nil
-	}
-
-	writeFn := func(tx *bolt.Tx, result []boltutil.Record) error {
-		if result[0] != nil {
-			// If there a no change then don't write any records.
-			if !result[0].(*Annotation).Add(delta) {
-				result[0] = nil
-			}
-		} else {
-			result[0] = delta
-		}
-		return nil
-	}
-	return b.store.Update([]boltutil.Record{delta}, writeFn)
-}
-
-// List, see IgnoreStore interface.
-func (b *boltIssueStore) List(offset int, size int) ([]*Annotation, int, error) {
-	result, total, err := b.store.List(offset, size)
-	if err != nil {
-		return nil, 0, err
-	}
-	ret := make([]*Annotation, len(result))
-	for i, rec := range result {
-		ret[i] = rec.(*Annotation)
-	}
-	return ret, total, nil
-}
-
-// Subtract, see IgnoreStore interface.
-func (b *boltIssueStore) Subtract(delta *Annotation) error {
-	writeFn := func(tx *bolt.Tx, result []boltutil.Record) error {
-		found := result[0]
-		if found != nil {
-			rec := found.(*Annotation)
-			// Subtract the delta and only take action if there was a change.
-			if rec.Subtract(delta) {
-				// If the resulting record is not empty, then we write it to disk.
-				if !rec.IsEmpty() {
-					return nil
-				}
-
-				// Delete the empty record.
-				if err := b.store.DeleteTx(tx, []string{rec.IssueID}); err != nil {
-					return err
-				}
-			}
-		}
-		result[0] = nil
-		return nil
-	}
-	return b.store.Update([]boltutil.Record{delta}, writeFn)
-}
-
-// Delete, see IgnoreStore interface.
-func (b *boltIssueStore) Delete(issueIDs []string) error {
-	return b.store.Delete(issueIDs)
-}
-
-// readFromIndex does a lookup in the given index for the given value and
-// returns all primary keys that match.
-func (b *boltIssueStore) readFromIndex(index, value string) ([]string, error) {
-	ret, err := b.store.ReadIndex(index, []string{value})
-	if err != nil {
-		return nil, err
-	}
-	return ret[value], nil
-}
-
-// mergeStrings merges the strings of src into tgt. true is returned if the
-// strings in tgt changed as a result of the merge.
-func mergeStrings(tgt *[]string, src []string) bool {
-	if t := util.NewStringSet(*tgt, src); len(t) != len(*tgt) {
-		*tgt = t.Keys()
-		return true
-	}
-	return false
-}
-
-// removeStrings removes all strings from tgt that also appear in src. true is returned
-// if tgt changed as part of the removal.
-func removeStrings(tgt *[]string, src []string) bool {
-	if t := util.NewStringSet(*tgt).Complement(util.NewStringSet(src)); len(t) != len(*tgt) {
-		*tgt = t.Keys()
-		return true
-	}
-	return false
-}
diff --git a/golden/go/issuestore/issuestore_test.go b/golden/go/issuestore/issuestore_test.go
deleted file mode 100644
index 34a51be..0000000
--- a/golden/go/issuestore/issuestore_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package issuestore
-
-import (
-	"fmt"
-	"math/rand"
-	"sort"
-	"strings"
-	"testing"
-
-	assert "github.com/stretchr/testify/require"
-	"go.skia.org/infra/go/testutils"
-	"go.skia.org/infra/go/testutils/unittest"
-	"go.skia.org/infra/go/util"
-)
-
-const (
-	TEST_DATA_DIR = "./testdata"
-
-	// Prefixes for different generated item types.
-	ISSUE_PREFIX   = "ISSUES_"
-	DIGEST_PREFIX  = "DIGEST_"
-	IGNORES_PREFIX = "IGNORES_"
-	TRACE_PREFIX   = "TRACE_"
-	TEST_PREFIX    = "TEST_"
-)
-
-func TestIssueStore(t *testing.T) {
-	// Medium test because it stores a bolt db to disk
-	unittest.MediumTest(t)
-	const N_ISSUES = 20
-
-	// Add a number of issues
-	issueStore, err := New(TEST_DATA_DIR)
-	assert.NoError(t, err)
-	defer testutils.RemoveAll(t, TEST_DATA_DIR)
-
-	lookup := map[string][]string{}
-	initIssues := genIssues(t, lookup, N_ISSUES, N_ISSUES/4+1, N_ISSUES/2+1, N_ISSUES/3+1, N_ISSUES/3+1)
-	issueIDs := make([]string, 0, len(initIssues))
-	for _, issue := range initIssues {
-		assert.NoError(t, issueStore.Add(issue))
-		issueIDs = append(issueIDs, issue.IssueID)
-	}
-
-	for _, issue := range initIssues {
-		found, err := issueStore.Get([]string{issue.IssueID})
-		assert.NoError(t, err)
-		assert.Equal(t, 1, len(found))
-		assert.Equal(t, issue, found[0])
-	}
-
-	found, err := issueStore.Get(issueIDs)
-	assert.NoError(t, err)
-	assert.Equal(t, initIssues, found)
-
-	testAgainstLookup(t, issueStore, lookup)
-
-	// // Assert them I can read them by issue id, digest, traceid and testNames
-	updateIssues := genIssues(t, lookup, N_ISSUES/2+1, N_ISSUES/4+1, N_ISSUES/4+1, N_ISSUES/4+1, N_ISSUES/4+1)
-	updatedIssuesIDs := []string{}
-	for idx, issue := range updateIssues {
-		assert.NoError(t, issueStore.Add(issue))
-		initIssues[idx].Add(issue)
-		updatedIssuesIDs = append(updatedIssuesIDs, issue.IssueID)
-	}
-	assert.Len(t, updatedIssuesIDs, 11)
-	// Do a spot check
-	assert.Contains(t, updatedIssuesIDs, "ISSUES_0003")
-	testAgainstLookup(t, issueStore, lookup)
-
-	// Test the list function.
-	for i := 0; i < N_ISSUES; i += 2 {
-		foundList, total, err := issueStore.List(i, 2)
-		assert.NoError(t, err)
-		assert.Equal(t, 2, len(foundList))
-		assert.Equal(t, N_ISSUES, total)
-		compareEntries(t, initIssues[i:i+1], foundList[0:1])
-		compareEntries(t, initIssues[i+1:i+2], foundList[1:2])
-	}
-
-	foundList, total, err := issueStore.List(0, N_ISSUES+5)
-	assert.NoError(t, err)
-	assert.Equal(t, N_ISSUES, len(foundList))
-	assert.Equal(t, N_ISSUES, total)
-	compareEntries(t, initIssues, foundList)
-
-	// Remove the previously added entries.
-	for idx, issue := range updateIssues {
-		assert.NoError(t, issueStore.Subtract(issue))
-		removeLookup(lookup, issue)
-		initIssues[idx].Subtract(issue)
-		found, err := issueStore.Get([]string{issue.IssueID})
-		assert.NoError(t, err)
-		compareEntries(t, initIssues[idx:idx+1], found[0:1])
-	}
-
-	testAgainstLookup(t, issueStore, lookup)
-	foundList, total, err = issueStore.List(0, -1)
-	assert.NoError(t, err)
-	assert.Equal(t, N_ISSUES, total)
-	compareEntries(t, foundList, initIssues)
-
-	// Remove all entries to check at the bottom whether indices have been removed.
-	for _, issue := range initIssues {
-		removeLookup(lookup, issue)
-	}
-
-	// Subtract all annotations of a subset of issues.
-	for _, issue := range initIssues[:len(updateIssues)] {
-		assert.NoError(t, issueStore.Subtract(issue))
-	}
-	initIssues = initIssues[len(updateIssues):]
-	foundList, total, err = issueStore.List(0, -1)
-	assert.NoError(t, err)
-	// Should be 9 now because we started with 20 and removed 11.
-	assert.Equal(t, 9, total)
-	compareEntries(t, foundList, initIssues)
-
-	// Delete all issues and make sure they are gone.
-	assert.NoError(t, issueStore.Delete(issueIDs))
-	foundList, total, err = issueStore.List(0, -1)
-	assert.NoError(t, err)
-	assert.Equal(t, []*Annotation{}, foundList)
-	assert.Equal(t, 0, total)
-	testAgainstLookup(t, issueStore, lookup)
-}
-
-func compareEntries(t assert.TestingT, exps []*Annotation, actual []*Annotation) {
-	assert.Equal(t, len(exps), len(actual))
-	for i, exp := range exps {
-		assert.Equal(t, exp.IssueID, actual[i].IssueID)
-		compareList(t, exp.Digests, actual[i].Digests)
-		compareList(t, exp.Traces, actual[i].Traces)
-		compareList(t, exp.Ignores, actual[i].Ignores)
-		compareList(t, exp.TestNames, actual[i].TestNames)
-	}
-}
-
-func compareList(t assert.TestingT, exp, actual []string) {
-	sort.Strings(exp)
-	sort.Strings(actual)
-	assert.Equal(t, exp, actual)
-}
-
-func testAgainstLookup(t assert.TestingT, issueStore IssueStore, lookup map[string][]string) {
-	for itemID, exp := range lookup {
-		var found []string
-		var err error
-		if strings.HasPrefix(itemID, DIGEST_PREFIX) {
-			found, err = issueStore.ByDigest(itemID)
-		} else if strings.HasPrefix(itemID, IGNORES_PREFIX) {
-			found, err = issueStore.ByIgnore(itemID)
-		} else if strings.HasPrefix(itemID, TRACE_PREFIX) {
-			found, err = issueStore.ByTrace(itemID)
-		} else if strings.HasPrefix(itemID, TEST_PREFIX) {
-			found, err = issueStore.ByTest(itemID)
-		} else {
-			t.FailNow()
-		}
-		assert.NoError(t, err)
-		assert.Equal(t, exp, found)
-	}
-}
-
-func genIssues(t *testing.T, lookup map[string][]string, nIssues int, nDigests int, nTraces int, nIgnores int, nTestNames int) []*Annotation {
-	// generate a list of issues and the given number of digests/traces and tests.
-	issues := fmtStrings(ISSUE_PREFIX+"%04d", nIssues)
-	digests := fmtStrings(DIGEST_PREFIX+"%04d", 5*nDigests)
-	ignores := fmtStrings(IGNORES_PREFIX+"%04d", 3*nIgnores)
-	traces := fmtStrings(TRACE_PREFIX+"%04d", 5*nTraces)
-	testNames := fmtStrings(TEST_PREFIX+"%04d", 5*nTestNames)
-
-	ret := make([]*Annotation, nIssues)
-	for idx, issueID := range issues {
-		r := &Annotation{
-			IssueID:   issueID,
-			Digests:   drawN(digests, nDigests, lookup, issueID),
-			Traces:    drawN(traces, nTraces, lookup, issueID),
-			Ignores:   drawN(ignores, nIgnores, lookup, issueID),
-			TestNames: drawN(testNames, nTestNames, lookup, issueID),
-		}
-		assert.Equal(t, []int{nDigests, nTraces, nIgnores, nTestNames}, []int{len(r.Digests), len(r.Traces), len(r.Ignores), len(r.TestNames)})
-		addLookup(lookup, r)
-		ret[idx] = r
-	}
-	return ret
-}
-
-func addLookup(lookup map[string][]string, rec *Annotation) {
-	addLookupItem(lookup, rec.Digests, rec.IssueID)
-	addLookupItem(lookup, rec.Traces, rec.IssueID)
-	addLookupItem(lookup, rec.Ignores, rec.IssueID)
-	addLookupItem(lookup, rec.TestNames, rec.IssueID)
-}
-
-func removeLookup(lookup map[string][]string, delta *Annotation) {
-	removeLookupItem(lookup, delta.Digests, delta.IssueID)
-	removeLookupItem(lookup, delta.Traces, delta.IssueID)
-	removeLookupItem(lookup, delta.Ignores, delta.IssueID)
-	removeLookupItem(lookup, delta.TestNames, delta.IssueID)
-}
-
-func addLookupItem(lookup map[string][]string, ids []string, parentID string) {
-	for _, id := range ids {
-		lookup[id] = util.NewStringSet(lookup[id], []string{parentID}).Keys()
-		sort.Strings(lookup[id])
-	}
-}
-
-func removeLookupItem(lookup map[string][]string, ids []string, parentID string) {
-	for _, id := range ids {
-		s := util.NewStringSet(lookup[id])
-		delete(s, parentID)
-		lookup[id] = s.Keys()
-		sort.Strings(lookup[id])
-	}
-}
-
-func fmtStrings(template string, n int) []string {
-	ret := make([]string, n)
-	for i := 0; i < n; i++ {
-		ret[i] = fmt.Sprintf(template, i)
-	}
-	return ret
-}
-
-func drawN(strs []string, n int, lookup map[string][]string, ignoreParent string) []string {
-	indices := rand.Perm(len(strs))
-	ret := make([]string, 0, n)
-	for i := 0; (i < len(indices)) && (len(ret) < n); i++ {
-		str := strs[indices[i]]
-		if ignoreParent == "" || !util.In(ignoreParent, lookup[str]) {
-			ret = append(ret, strs[indices[i]])
-		}
-	}
-	return ret
-}
diff --git a/golden/go/mocks/mocks.go b/golden/go/mocks/mocks.go
index 72b81c8..84ad405 100644
--- a/golden/go/mocks/mocks.go
+++ b/golden/go/mocks/mocks.go
@@ -18,7 +18,6 @@
 	"go.skia.org/infra/go/sklog"
 	"go.skia.org/infra/go/tiling"
 	tracedb "go.skia.org/infra/go/trace/db"
-	"go.skia.org/infra/golden/go/digeststore"
 	"go.skia.org/infra/golden/go/types"
 )
 
@@ -41,25 +40,6 @@
 	return tiling.TraceId(strings.Join(traceParts, ":"))
 }
 
-type MockDigestStore struct {
-	IssueIDs  []int
-	FirstSeen int64
-	OkValue   bool
-}
-
-func (m *MockDigestStore) Get(testName, digest string) (*digeststore.DigestInfo, bool, error) {
-	return &digeststore.DigestInfo{
-		TestName: testName,
-		Digest:   digest,
-		First:    m.FirstSeen,
-	}, m.OkValue, nil
-}
-
-func (m *MockDigestStore) Update([]*digeststore.DigestInfo) error {
-	m.OkValue = true
-	return nil
-}
-
 type MockTileBuilder struct {
 	t    assert.TestingT
 	tile *tiling.Tile
diff --git a/golden/go/tsuite/tsuite.go b/golden/go/tsuite/tsuite.go
deleted file mode 100644
index e564767..0000000
--- a/golden/go/tsuite/tsuite.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// The tsuite package contains datastructures run tests on Firebase Testlab and
-// process the results.
-package tsuite
-
-import (
-	"context"
-	"encoding/json"
-	"net/http"
-
-	"cloud.google.com/go/storage"
-	"go.skia.org/infra/go/sklog"
-	"go.skia.org/infra/go/util"
-	"google.golang.org/api/option"
-)
-
-// TODO(stephana): Expand this to read the results and meta data from
-// GCS and ingest into Gold or a separate tool.
-
-// FirebaseDevice contains the information and JSON tags for device information
-// returned by firebase.
-type FirebaseDevice struct {
-	Brand        string   `json:"brand"`
-	Form         string   `json:"form"`
-	ID           string   `json:"id"`
-	Manufacturer string   `json:"manufacturer"`
-	Name         string   `json:"name"`
-	VersionIDs   []string `json:"supportedVersionIds"`
-}
-
-// DeviceVersions combines device information from Firebase Testlab with
-// a selected list of versions. This is used to define a subset of versions
-// used by a devices.
-type DeviceVersions struct {
-	Device *FirebaseDevice
-
-	// Versions contains the version ids of interest contained in Device.
-	Versions []string
-}
-
-// TestRunMeta contains the meta data of a complete testrun on firebase.
-type TestRunMeta struct {
-	ID             string            `json:"id"`
-	TS             int64             `json:"timeStamp"`
-	Devices        []*DeviceVersions `json:"devices"`
-	IgnoredDevices []*DeviceVersions `json:"ignoredDevices"`
-	ExitCode       int               `json:"exitCode"`
-}
-
-// TODO(stephana): WriteToGCS should probably be converted to accepting an
-// instance of Client from the cloud.google.com/go/storage package.
-// Add this as the package evolves.
-
-// WriteToGCS writes the meta data as JSON to the given bucket and path in
-// GCS. It assumes that the provided client as permissions to write to the
-// specified location in GCS.
-func (t *TestRunMeta) WriteToGCS(bucket, path string, client *http.Client) error {
-	storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(client))
-	if err != nil {
-		return err
-	}
-
-	w := storageClient.Bucket(bucket).Object(path).NewWriter(context.Background())
-	if err := json.NewEncoder(w).Encode(t); err != nil {
-		return err
-	}
-	defer util.Close(w)
-
-	sklog.Infof("Sucess: Meta data written to %s/%s", bucket, path)
-	return nil
-}