[gitstore] Expect valid Index and Branches in Put()

This relieves us of the need to load all existing commits in Put(), and
we no longer need to care about ancestry chains in PutBranches; we just
retrieve the commits pointed to by the new branch heads and use their
indexes.

Change-Id: I203781416f4bcaf73d3077558f89464b9df6b198
Reviewed-on: https://skia-review.googlesource.com/c/buildbot/+/234913
Commit-Queue: Eric Boren <borenet@google.com>
Reviewed-by: Kevin Lubick <kjlubick@google.com>
diff --git a/gitsync/go/btgit/main.go b/gitsync/go/btgit/main.go
index e23b59f..82379c4 100644
--- a/gitsync/go/btgit/main.go
+++ b/gitsync/go/btgit/main.go
@@ -24,7 +24,7 @@
 		listRepos    = flag.Bool("list_repos", false, "List all repositories quit")
 		loadGraph    = flag.Bool("load_graph", false, "Load the entire commit graph. For performance check only.")
 		projectID    = flag.String("project", "skia-public", "ID of the GCP project")
-		branch       = flag.String("branch", "", "Name of the branch to list. Empty means all commits across all branches.")
+		branch       = flag.String("branch", gitstore.ALL_BRANCHES, "Name of the branch to list. By default, show all commits across all branches.")
 		limit        = flag.Int("limit", 100, "Number of commits to show. 0 means no limit")
 		repoURL      = flag.String("repo_url", "", "URL of the git repo.")
 		verbose      = flag.Bool("verbose", false, "Indicate whether to log the commits we find.")
diff --git a/gitsync/go/gitsync/main.go b/gitsync/go/gitsync/main.go
index 15b1c13..3875b44 100644
--- a/gitsync/go/gitsync/main.go
+++ b/gitsync/go/gitsync/main.go
@@ -28,7 +28,7 @@
 // Default config/flag values
 var defaultConf = gitSyncConfig{
 	BTInstanceID:    "production",
-	BTTableID:       "git-repos",
+	BTTableID:       "git-repos2",
 	HttpPort:        ":9091",
 	Local:           false,
 	ProjectID:       "skia-public",
diff --git a/go/git/repograph/shared_tests/shared_tests.go b/go/git/repograph/shared_tests/shared_tests.go
index 2f272c8..b6a97fc 100644
--- a/go/git/repograph/shared_tests/shared_tests.go
+++ b/go/git/repograph/shared_tests/shared_tests.go
@@ -6,7 +6,9 @@
 	"io/ioutil"
 	"os"
 	"path"
+	"reflect"
 	"sort"
+	"strings"
 	"time"
 
 	assert "github.com/stretchr/testify/require"
@@ -766,9 +768,19 @@
 			_, ok := commitMap[c.Hash]
 			assert.True(t, ok, "%s not modified", c.Hash)
 		}
-		// TODO(borenet): We'd like to assert that any Branches maps
-		// which contain the same set of branches are the exact same
-		// instance, but I don't know of a way to do that.
+		// Verify that we deduplicated the branch maps.
+		maps := map[string]uintptr{}
+		for _, c := range actual {
+			keys := util.StringSet(c.Branches).Keys()
+			sort.Strings(keys)
+			str := strings.Join(keys, ",")
+			ptr := reflect.ValueOf(c.Branches).Pointer()
+			if exist, ok := maps[str]; ok {
+				assert.Equal(t, exist, ptr)
+			} else {
+				maps[str] = ptr
+			}
+		}
 	}
 
 	// Update branch info. Ensure that all commits were updated with the
diff --git a/go/gitstore/bt_gitstore/bt_gitstore.go b/go/gitstore/bt_gitstore/bt_gitstore.go
index 407a94f..8a0e503 100644
--- a/go/gitstore/bt_gitstore/bt_gitstore.go
+++ b/go/gitstore/bt_gitstore/bt_gitstore.go
@@ -7,6 +7,7 @@
 	"bytes"
 	"context"
 	"encoding/binary"
+	"encoding/json"
 	"fmt"
 	"hash/crc32"
 	"strconv"
@@ -18,6 +19,7 @@
 	"go.skia.org/infra/go/git"
 	"go.skia.org/infra/go/gitstore"
 	"go.skia.org/infra/go/skerr"
+	"go.skia.org/infra/go/sklog"
 	"go.skia.org/infra/go/util"
 	"go.skia.org/infra/go/vcsinfo"
 	"golang.org/x/sync/errgroup"
@@ -25,10 +27,11 @@
 
 // BigTableGitStore implements the GitStore interface based on BigTable.
 type BigTableGitStore struct {
-	RepoID  int64
-	RepoURL string
-	shards  uint32
-	table   *bigtable.Table
+	RepoID          int64
+	RepoURL         string
+	shards          uint32
+	writeGoroutines int
+	table           *bigtable.Table
 }
 
 // New returns an instance of GitStore that uses BigTable as its backend storage.
@@ -51,10 +54,15 @@
 		shards = DefaultShards
 	}
 
+	writeGoroutines := config.WriteGoroutines
+	if writeGoroutines <= 0 {
+		writeGoroutines = DefaultWriteGoroutines
+	}
 	ret := &BigTableGitStore{
-		table:   client.Open(config.TableID),
-		shards:  uint32(shards),
-		RepoURL: repoURL,
+		table:           client.Open(config.TableID),
+		shards:          uint32(shards),
+		RepoURL:         repoURL,
+		writeGoroutines: writeGoroutines,
 	}
 
 	repoInfo, err := ret.loadRepoInfo(ctx, true)
@@ -81,15 +89,16 @@
 	metaVarIDCounter = "idcounter"
 
 	// index commit
-	colHash      = "h"
-	colTimestamp = "t"
+	colHashTs = "ht"
 
-	// shortcommit
+	// long commit
 	colAuthor   = "a"
 	colSubject  = "s"
 	colParents  = "p"
 	colBody     = "b"
 	colBranches = "br"
+	colHash     = "h"
+	colIndex    = "i"
 
 	// Define the row types.
 	typIndex     = "i"
@@ -97,9 +106,6 @@
 	typCommit    = "k"
 	typMeta      = "!"
 
-	// allCommitsBranch is a pseudo branch name to index all commits in a repo.
-	allCommitsBranch = "@all-commits"
-
 	// getBatchSize is the batchsize for the Get operation. Each call to bigtable is made with maximally
 	// this number of git hashes. This is a conservative number to stay within the 1M request
 	// size limit. Since requests are sharded this will not limit throughput in practice.
@@ -108,94 +114,260 @@
 	// writeBatchSize is the number of mutations to write at once. This could be fine tuned for different
 	// row types.
 	writeBatchSize = 1000
-)
 
-var (
 	// Default number of shards used, if not shards provided in BTConfig.
 	DefaultShards = 32
+
+	// DefaultWriteGoroutines defines the maximum number of goroutines
+	// used to write to BigTable concurrently, if not provided in BTConfig.
+	// This number was shown to keep memory usage reasonably low while still
+	// providing decent throughput.
+	DefaultWriteGoroutines = 100
 )
 
+// rowMutation is a mutation for a single BT row.
+type rowMutation struct {
+	row string
+	mut *bigtable.Mutation
+}
+
 // Put implements the GitStore interface.
 func (b *BigTableGitStore) Put(ctx context.Context, commits []*vcsinfo.LongCommit) error {
-	branch := ""
-
-	if err := b.writeLongCommits(ctx, commits); err != nil {
-		return skerr.Fmt("Error writing long commits: %s", err)
+	if len(commits) == 0 {
+		return nil
 	}
 
-	// Retrieve the commits in time chronological order and set the index.
-	indexCommits, err := b.RangeByTime(ctx, vcsinfo.MinTime, vcsinfo.MaxTime, branch)
-	if err != err {
-		return skerr.Fmt("Error retrieving commits in order: %s", err)
+	// Spin up a goroutine to create mutations for commits.
+	mutations := make(chan rowMutation, writeBatchSize)
+	var egroup errgroup.Group
+	egroup.Go(func() error {
+		defer func() {
+			close(mutations)
+		}()
+		// Create IndexCommits mutations for each branch for each commit.
+		for _, c := range commits {
+			// Validation.
+			if c.Index == 0 && len(c.Parents) != 0 {
+				return skerr.Fmt("Commit %s has index zero but has at least one parent. This cannot be correct.", c.Hash)
+			}
+			if len(c.Branches) == 0 {
+				// TODO(borenet): Is there any way to check for this?
+				sklog.Warningf("Commit %s has no branch information; this is valid if it is not on the first-parent ancestry chain of any branch.", c.Hash)
+			}
+
+			// LongCommit mutation.
+			mut, err := b.mutationForLongCommit(c)
+			if err != nil {
+				return skerr.Wrapf(err, "Failed to Put commits; failed to create mutation.")
+			}
+			mutations <- mut
+
+			// Create the IndexCommit.
+			ic := &vcsinfo.IndexCommit{
+				Hash:      c.Hash,
+				Index:     c.Index,
+				Timestamp: c.Timestamp,
+			}
+			mutations <- b.mutationForIndexCommit(gitstore.ALL_BRANCHES, ic)
+			mutations <- b.mutationForTimestampCommit(gitstore.ALL_BRANCHES, ic)
+			for branch := range c.Branches {
+				if branch != gitstore.ALL_BRANCHES {
+					mutations <- b.mutationForIndexCommit(branch, ic)
+					mutations <- b.mutationForTimestampCommit(branch, ic)
+				}
+			}
+		}
+		return nil
+	})
+
+	// Spin up workers to write to BT.
+	empty := ""
+	var egroup2 errgroup.Group
+	for i := 0; i < b.writeGoroutines; i++ {
+		egroup2.Go(func() error {
+			rows := make([]string, 0, writeBatchSize)
+			muts := make([]*bigtable.Mutation, 0, writeBatchSize)
+			for rowMut := range mutations {
+				rows = append(rows, rowMut.row)
+				muts = append(muts, rowMut.mut)
+				if len(rows) == writeBatchSize {
+					if err := b.applyBulk(ctx, rows, muts); err != nil {
+						return skerr.Wrapf(err, "Failed to write commits.")
+					}
+					// Reuse the buffers. We need to clear
+					// out the values so that the underlying
+					// elements can be GC'd.
+					for i := 0; i < len(rows); i++ {
+						rows[i] = empty
+						muts[i] = nil
+					}
+					rows = rows[:0]
+					muts = muts[:0]
+				}
+			}
+			if len(rows) > 0 {
+				if err := b.applyBulk(ctx, rows, muts); err != nil {
+					return skerr.Wrapf(err, "Failed to write commits.")
+				}
+			}
+			return nil
+		})
 	}
 
-	for idx, idxCommit := range indexCommits {
-		idxCommit.Index = idx
+	// Wait for the inserts to finish.
+	insertErr := egroup2.Wait()
+	if insertErr != nil {
+		// We need to consume all of the mutations so that the first
+		// goroutine can exit.
+		for range mutations {
+		}
 	}
-	return b.writeIndexCommits(ctx, indexCommits, branch)
+	generateErr := egroup.Wait()
+	if insertErr != nil && generateErr != nil {
+		return skerr.Wrapf(generateErr, "Failed to generate BT mutations, and failed to apply with: %s", insertErr)
+	} else if insertErr != nil {
+		return skerr.Wrapf(insertErr, "Failed to apply BT mutations.")
+	} else if generateErr != nil {
+		return skerr.Wrapf(generateErr, "Failed to generate BT mutations.")
+	}
+
+	// Write the ALL_BRANCHES branch pointer.
+	lastCommit := commits[len(commits)-1]
+	ic := &vcsinfo.IndexCommit{
+		Hash:      lastCommit.Hash,
+		Index:     lastCommit.Index,
+		Timestamp: lastCommit.Timestamp,
+	}
+	return b.putBranchPointer(ctx, getRepoInfoRowName(b.RepoURL), gitstore.ALL_BRANCHES, ic)
+}
+
+// mutationForLongCommit returns a rowMutation for the given LongCommit.
+func (b *BigTableGitStore) mutationForLongCommit(commit *vcsinfo.LongCommit) (rowMutation, error) {
+	mut, err := b.getCommitMutation(commit)
+	if err != nil {
+		return rowMutation{}, skerr.Wrapf(err, "Failed to create BT mutation")
+	}
+	return rowMutation{
+		row: b.rowName("", typCommit, commit.Hash),
+		mut: mut,
+	}, nil
+}
+
+// mutationForIndexCommit returns a rowMutation for the given IndexCommit.
+func (b *BigTableGitStore) mutationForIndexCommit(branch string, commit *vcsinfo.IndexCommit) rowMutation {
+	return rowMutation{
+		row: b.rowName(branch, typIndex, sortableIndex(commit.Index)),
+		mut: b.simpleMutation(cfCommit, [][2]string{
+			{colHashTs, fmt.Sprintf("%s#%d", commit.Hash, commit.Timestamp.Unix())}, // Git has a timestamp resolution of 1s.
+		}...),
+	}
+}
+
+// mutationForTimestampCommit returns a rowMutation for the given IndexCommit
+// keyed by timestamp.
+func (b *BigTableGitStore) mutationForTimestampCommit(branch string, commit *vcsinfo.IndexCommit) rowMutation {
+	return rowMutation{
+		row: b.rowName(branch, typTimeStamp, sortableTimestamp(commit.Timestamp)),
+		mut: b.simpleMutation(cfTsCommit, [][2]string{
+			{commit.Hash, sortableIndex(commit.Index)},
+		}...),
+	}
+}
+
+// applyBulk is a helper function for b.table.ApplyBulk.
+func (b *BigTableGitStore) applyBulk(ctx context.Context, rows []string, muts []*bigtable.Mutation) error {
+	errs, err := b.table.ApplyBulk(ctx, rows, muts)
+	if err != nil {
+		return skerr.Fmt("Error writing batch: %s", err)
+	}
+	if errs != nil {
+		return skerr.Fmt("Error writing some portions of batch: %s", errs)
+	}
+	return nil
 }
 
 // Get implements the GitStore interface.
 func (b *BigTableGitStore) Get(ctx context.Context, hashes []string) ([]*vcsinfo.LongCommit, error) {
-	rowNames := make(bigtable.RowList, len(hashes))
-	hashOrder := make(map[string]int, len(hashes))
+	// hashOrder tracks the original index(es) of each hash in the passed-in
+	// slice. It is used to ensure that we return the LongCommits in the
+	// desired order, despite our receiving them from BT in arbitrary order.
+	hashOrder := make(map[string][]int, len(hashes))
 	for idx, h := range hashes {
-		rowNames[idx] = b.rowName("", typCommit, h)
-		hashOrder[h] = idx
+		hashOrder[h] = append(hashOrder[h], idx)
+	}
+	rowNames := make(bigtable.RowList, 0, len(hashOrder))
+	for h := range hashOrder {
+		rowNames = append(rowNames, b.rowName("", typCommit, h))
 	}
 
 	var egroup errgroup.Group
-	tempRet := make([]*vcsinfo.LongCommit, len(hashes))
+	tempRet := make([]*vcsinfo.LongCommit, len(rowNames))
 	prefix := cfCommit + ":"
 
-	for batchStart := 0; batchStart < len(rowNames); batchStart += getBatchSize {
-		func(bStart, bEnd int) {
-			egroup.Go(func() error {
-				bRowNames := rowNames[bStart:bEnd]
-				batchIdx := int64(bStart - 1)
+	err := util.ChunkIter(len(rowNames), getBatchSize, func(bStart, bEnd int) error {
+		egroup.Go(func() error {
+			bRowNames := rowNames[bStart:bEnd]
+			batchIdx := int64(bStart - 1)
+			err := b.table.ReadRows(ctx, bRowNames, func(row bigtable.Row) bool {
+				longCommit := vcsinfo.NewLongCommit()
+				longCommit.Hash = keyFromRowName(row.Key())
 
-				err := b.table.ReadRows(ctx, bRowNames, func(row bigtable.Row) bool {
-					longCommit := vcsinfo.NewLongCommit()
-					longCommit.Hash = keyFromRowName(row.Key())
-
-					for _, col := range row[cfCommit] {
-						switch strings.TrimPrefix(col.Column, prefix) {
-						case colHash:
-							longCommit.Timestamp = col.Timestamp.Time().UTC()
-						case colAuthor:
-							longCommit.Author = string(col.Value)
-						case colSubject:
-							longCommit.Subject = string(col.Value)
-						case colParents:
-							if len(col.Value) > 0 {
-								longCommit.Parents = strings.Split(string(col.Value), ":")
-							}
-						case colBody:
-							longCommit.Body = string(col.Value)
+				for _, col := range row[cfCommit] {
+					switch strings.TrimPrefix(col.Column, prefix) {
+					case colHash:
+						longCommit.Timestamp = col.Timestamp.Time().UTC()
+					case colAuthor:
+						longCommit.Author = string(col.Value)
+					case colSubject:
+						longCommit.Subject = string(col.Value)
+					case colParents:
+						if len(col.Value) > 0 {
+							longCommit.Parents = strings.Split(string(col.Value), ":")
 						}
+					case colBody:
+						longCommit.Body = string(col.Value)
+					case colBranches:
+						if err := json.Unmarshal(col.Value, &longCommit.Branches); err != nil {
+							// We don't want to fail forever if there's a bad value in
+							// BigTable. Log an error and move on.
+							sklog.Errorf("Failed to decode LongCommit branches: %s\nStored value: %s", err, string(col.Value))
+						}
+					case colIndex:
+						index, err := strconv.Atoi(string(col.Value))
+						if err != nil {
+							// We don't want to fail forever if there's a bad value in
+							// BigTable. Log an error and move on.
+							sklog.Errorf("Failed to decode LongCommit branches: %s\nStored value: %s", err, string(col.Value))
+						}
+						longCommit.Index = index
 					}
-					targetIdx := atomic.AddInt64(&batchIdx, 1)
-					tempRet[targetIdx] = longCommit
-					return true
-				})
-				if err != nil {
-					return skerr.Fmt("Error running ReadRows: %s", err)
 				}
-				return nil
+				targetIdx := atomic.AddInt64(&batchIdx, 1)
+				tempRet[targetIdx] = longCommit
+				return true
 			})
-		}(batchStart, util.MinInt(batchStart+getBatchSize, len(rowNames)))
+			if err != nil {
+				return skerr.Fmt("Error running ReadRows: %s", err)
+			}
+			return nil
+		})
+		return nil
+	})
+	if err != nil {
+		return nil, skerr.Wrapf(err, "Failed to spin up goroutines to load commits.")
 	}
 
 	if err := egroup.Wait(); err != nil {
-		return nil, err
+		return nil, skerr.Wrapf(err, "Failed loading commits from BT.")
 	}
 
-	// Put the results into their places based of the order of the input hashes.
+	// Order the LongCommits to match the passed-in slice of hashes.
 	ret := make([]*vcsinfo.LongCommit, len(hashes))
 	for _, commit := range tempRet {
 		if commit != nil {
-			targetIdx := hashOrder[commit.Hash]
-			ret[targetIdx] = commit
+			for _, targetIdx := range hashOrder[commit.Hash] {
+				ret[targetIdx] = commit
+			}
 		}
 	}
 	return ret, nil
@@ -203,49 +375,41 @@
 
 // PutBranches implements the GitStore interface.
 func (b *BigTableGitStore) PutBranches(ctx context.Context, branches map[string]string) error {
-	repoInfo, err := b.loadRepoInfo(ctx, false)
-	if err != nil {
-		return err
-	}
-
-	// Load the commit graph.
-	graph, err := b.GetGraph(ctx)
-	if err != nil {
-		return skerr.Fmt("Error loading graph: %s", err)
-	}
-
-	// updateFromm maps branchName -> branch_pointer_to_old_head to capture the branches we  need to update
-	// and whether the branch existed before this update (the value of the map is not nil).
-	updateFrom := make(map[string]*gitstore.BranchPointer, len(branches))
-	for branchName, head := range branches {
-		// Assume we start out with a completely fresh branch
-		var oldHeadPtr *gitstore.BranchPointer = nil
-		if foundHeadPtr, ok := repoInfo.Branches[branchName]; ok {
-			// We are already done and do not need to update this branch.
-			if foundHeadPtr.Head == head {
-				continue
-			}
-
-			oldHeadNode := graph.GetNode(foundHeadPtr.Head)
-			if oldHeadNode == nil {
-				return skerr.Fmt("Unable to find previous head commit %s in graph", foundHeadPtr.Head)
-			}
-			oldHeadPtr = foundHeadPtr
+	// Get the commits pointed to by the branches.
+	hashes := make([]string, 0, len(branches))
+	for _, head := range branches {
+		if head != gitstore.DELETE_BRANCH {
+			hashes = append(hashes, head)
 		}
-		updateFrom[branchName] = oldHeadPtr
+	}
+	longCommits, err := b.Get(ctx, hashes)
+	if err != nil {
+		return skerr.Wrapf(err, "Failed to retrieve branch heads.")
+	}
+	indexCommitsByHash := make(map[string]*vcsinfo.IndexCommit, len(longCommits))
+	for idx, c := range longCommits {
+		if c == nil {
+			return skerr.Fmt("Commit %s is missing from GitStore", hashes[idx])
+		}
+		indexCommitsByHash[c.Hash] = &vcsinfo.IndexCommit{
+			Hash:      c.Hash,
+			Index:     c.Index,
+			Timestamp: c.Timestamp,
+		}
 	}
 
 	var egroup errgroup.Group
-	for branchName, oldHeadPtr := range updateFrom {
-		func(branchName string, oldHeadPtr *gitstore.BranchPointer) {
-			egroup.Go(func() error {
-				if branches[branchName] == gitstore.DELETE_BRANCH {
-					return b.deleteBranchPointer(ctx, branchName)
-				} else {
-					return b.updateBranch(ctx, branchName, branches[branchName], oldHeadPtr, graph)
-				}
-			})
-		}(branchName, oldHeadPtr)
+	for name, head := range branches {
+		// https://golang.org/doc/faq#closures_and_goroutines
+		name := name
+		head := head
+		egroup.Go(func() error {
+			if head == gitstore.DELETE_BRANCH {
+				return b.deleteBranchPointer(ctx, name)
+			} else {
+				return b.putBranchPointer(ctx, getRepoInfoRowName(b.RepoURL), name, indexCommitsByHash[head])
+			}
+		})
 	}
 	if err := egroup.Wait(); err != nil {
 		return skerr.Fmt("Error updating branches: %s", err)
@@ -259,13 +423,6 @@
 	if err != nil {
 		return nil, err
 	}
-
-	// Replace the pseudo branch for all commits with an empty branch name.
-	if found, ok := repoInfo.Branches[allCommitsBranch]; ok {
-		repoInfo.Branches[""] = found
-		delete(repoInfo.Branches, allCommitsBranch)
-	}
-
 	return repoInfo.Branches, nil
 }
 
@@ -274,28 +431,86 @@
 	startTS := sortableTimestamp(start)
 	endTS := sortableTimestamp(end)
 
+	// If a branch was supplied, retrieve the pointer.
+	var branchPtr *gitstore.BranchPointer
+	var egroup errgroup.Group
+	if branch != gitstore.ALL_BRANCHES {
+		egroup.Go(func() error {
+			branches, err := b.GetBranches(ctx)
+			if err != nil {
+				return err
+			}
+			branchPtr = branches[branch]
+			return nil
+		})
+	}
+
 	result := newSRTimestampCommits(b.shards)
+	// Note that we do NOT use a LatestN filter here, because that would
+	// result in incomplete results in the case of commits which have the
+	// same timestamp. Git has a timestamp resolution of one second, which
+	// makes this likely, especially in tests.
 	filters := []bigtable.Filter{bigtable.FamilyFilter(cfTsCommit)}
 	err := b.iterShardedRange(ctx, branch, typTimeStamp, startTS, endTS, filters, result)
 	if err != nil {
 		return nil, err
 	}
+	indexCommits, timestamps := result.Sorted()
 
-	return result.Sorted(), nil
+	// Filter out results which do not belong on the given branch.
+	if err := egroup.Wait(); err != nil {
+		return nil, skerr.Wrapf(err, "Failed to retrieve branch pointer for %s", branch)
+	}
+	if branchPtr == nil && branch != gitstore.ALL_BRANCHES {
+		// If we don't know about the requested branch, return nil even
+		// if we found IndexCommits. This is correct behavior for
+		// deleted branches, because we don't delete the IndexCommits.
+		return nil, nil
+	}
+	if branchPtr != nil {
+		filtered := make(map[int][]*vcsinfo.IndexCommit, len(indexCommits))
+		for _, ic := range indexCommits {
+			if ic.Index <= branchPtr.Index {
+				filtered[ic.Index] = append(filtered[ic.Index], ic)
+			}
+		}
+		indexCommits = make([]*vcsinfo.IndexCommit, 0, len(filtered))
+		for idx := 0; idx <= branchPtr.Index; idx++ {
+			commits, ok := filtered[idx]
+			if !ok {
+				return nil, skerr.Fmt("Missing index %d for branch %s.", idx, branch)
+			}
+			if len(commits) == 1 {
+				indexCommits = append(indexCommits, commits[0])
+			} else {
+				sklog.Warningf("History was changed for branch %s. Deduplicating by last insertion into BT.", branch)
+				var mostRecent *vcsinfo.IndexCommit
+				for _, ic := range commits {
+					if mostRecent == nil || timestamps[ic].After(timestamps[mostRecent]) {
+						mostRecent = ic
+					}
+				}
+				indexCommits = append(indexCommits, mostRecent)
+			}
+		}
+	}
+
+	return indexCommits, nil
 }
 
-// RangeByTime implements the GitStore interface.
+// RangeN implements the GitStore interface.
 func (b *BigTableGitStore) RangeN(ctx context.Context, startIndex, endIndex int, branch string) ([]*vcsinfo.IndexCommit, error) {
 	startIdx := sortableIndex(startIndex)
 	endIdx := sortableIndex(endIndex)
 
 	result := newSRIndexCommits(b.shards)
-	filters := []bigtable.Filter{bigtable.FamilyFilter(cfCommit)}
+	filters := []bigtable.Filter{bigtable.FamilyFilter(cfCommit), bigtable.LatestNFilter(1)}
 	err := b.iterShardedRange(ctx, branch, typIndex, startIdx, endIdx, filters, result)
 	if err != nil {
 		return nil, err
 	}
-	return result.Sorted(), nil
+	rv, _ := result.Sorted()
+	return rv, nil
 }
 
 func (b *BigTableGitStore) loadRepoInfo(ctx context.Context, create bool) (*gitstore.RepoInfo, error) {
@@ -340,80 +555,9 @@
 	}, nil
 }
 
-// graphColFilter defines a filter (regex) that only keeps columns we need to build the commit graph.
-// Used by GetGraph.
-var graphColFilter = fmt.Sprintf("(%s)", strings.Join([]string{colHash, colParents}, "|"))
-
-// GetGraph implements the GitStore interface.
-func (b *BigTableGitStore) GetGraph(ctx context.Context) (*gitstore.CommitGraph, error) {
-	result := newRawNodesResult(b.shards)
-	filters := []bigtable.Filter{
-		bigtable.FamilyFilter(cfCommit),
-		bigtable.ColumnFilter(graphColFilter),
-	}
-	if err := b.iterShardedRange(ctx, "", typCommit, "", "", filters, result); err != nil {
-		return nil, skerr.Fmt("Error getting sharded commits: %s", err)
-	}
-	rawGraph, timeStamps := result.Merge()
-	return gitstore.BuildGraph(rawGraph, timeStamps), nil
-}
-
-func (b *BigTableGitStore) getAsIndexCommits(ctx context.Context, ancestors []*gitstore.Node, startIdx int) ([]*vcsinfo.IndexCommit, error) {
-	ret := make([]*vcsinfo.IndexCommit, len(ancestors))
-	for idx, commitNode := range ancestors {
-		ret[idx] = &vcsinfo.IndexCommit{
-			Index:     startIdx + idx,
-			Hash:      commitNode.Hash,
-			Timestamp: commitNode.Timestamp,
-		}
-	}
-	return ret, nil
-}
-
-// updateBranch updates the indices for the named branch and stores the branch pointer. It
-// calculates the branch based on the given commit graph.
-// If there is no previous branch then oldBranchPtr should be nil.
-func (b *BigTableGitStore) updateBranch(ctx context.Context, branchName, newBranchHead string, oldBranchPtr *gitstore.BranchPointer, graph *gitstore.CommitGraph) error {
-	// Make sure the new head node is in branch.
-	headNode := graph.GetNode(newBranchHead)
-	if headNode == nil {
-		return skerr.Fmt("Head commit %s not found in commit graph", newBranchHead)
-	}
-
-	// If we have not previous branch we set the corresponding values so the logic below still works.
-	if oldBranchPtr == nil {
-		oldBranchPtr = &gitstore.BranchPointer{Head: "", Index: 0}
-	}
-
-	branchNodes := graph.DecendantChain(oldBranchPtr.Head, newBranchHead)
-	startIndex := 0
-
-	// If the hash of the first Node matches the hash of the old branchpointer we need to adjust
-	// the initial value of index.
-	if branchNodes[0].Hash == oldBranchPtr.Head {
-		startIndex = oldBranchPtr.Index
-	}
-	indexCommits, err := b.getAsIndexCommits(ctx, branchNodes, startIndex)
-	if err != nil {
-		return skerr.Fmt("Error getting index commits for branch %s: %s", branchName, err)
-	}
-
-	// Write the index commits.
-	if err := b.writeIndexCommits(ctx, indexCommits, branchName); err != nil {
-		return err
-	}
-
-	// Write the index commits of the branch sorted by timestamps.
-	return b.writeTimestampIndex(ctx, indexCommits, branchName)
-}
-
 // putBranchPointer writes the branch pointer (the HEAD of a branch) to the row that stores
 // the repo information. idxCommit is the index commit of the HEAD of the branch.
 func (b *BigTableGitStore) putBranchPointer(ctx context.Context, repoInfoRowName, branchName string, idxCommit *vcsinfo.IndexCommit) error {
-	if branchName == "" {
-		branchName = allCommitsBranch
-	}
-
 	mut := bigtable.NewMutation()
 	now := bigtable.Now()
 	mut.Set(cfBranches, branchName, now, encBranchPointer(idxCommit.Hash, idxCommit.Index))
@@ -428,99 +572,6 @@
 	return b.table.Apply(ctx, getRepoInfoRowName(b.RepoURL), mut)
 }
 
-// writeLongCommits writes the LongCommits to the store idempotently.
-func (b *BigTableGitStore) writeLongCommits(ctx context.Context, commits []*vcsinfo.LongCommit) error {
-	branch := ""
-
-	// Assemble the mutations.
-	nMutations := len(commits)
-	rowNames := make([]string, 0, nMutations)
-	mutations := make([]*bigtable.Mutation, 0, nMutations)
-
-	// Assemble the records for the Timestamp index.
-	tsIdxCommits := make([]*vcsinfo.IndexCommit, 0, nMutations)
-
-	for _, commit := range commits {
-		// Add the long commits
-		rowNames = append(rowNames, b.rowName(branch, typCommit, commit.Hash))
-		mutations = append(mutations, b.getCommitMutation(commit))
-
-		tsIdxCommits = append(tsIdxCommits, &vcsinfo.IndexCommit{
-			Hash:      commit.Hash,
-			Timestamp: commit.Timestamp,
-		})
-	}
-
-	if err := b.applyBulkBatched(ctx, rowNames, mutations, writeBatchSize); err != nil {
-		return skerr.Fmt("Error writing commits: %s", err)
-	}
-	return b.writeTimestampIndex(ctx, tsIdxCommits, branch)
-}
-
-// applyBulkBatched writes the given rowNames/mutation pairs to bigtable in batches that are
-// maximally of size 'batchSize'. The batches are written in parallel.
-func (b *BigTableGitStore) applyBulkBatched(ctx context.Context, rowNames []string, mutations []*bigtable.Mutation, batchSize int) error {
-	var egroup errgroup.Group
-	err := util.ChunkIter(len(rowNames), batchSize, func(chunkStart, chunkEnd int) error {
-		egroup.Go(func() error {
-			rowNames := rowNames[chunkStart:chunkEnd]
-			mutations := mutations[chunkStart:chunkEnd]
-			errs, err := b.table.ApplyBulk(ctx, rowNames, mutations)
-			if err != nil {
-				return skerr.Fmt("Error writing batch [%d:%d]: %s", chunkStart, chunkEnd, err)
-			}
-			if errs != nil {
-				return skerr.Fmt("Error writing some portions of batch [%d:%d]: %s", chunkStart, chunkEnd, errs)
-			}
-			return nil
-		})
-		return nil
-	})
-	if err != nil {
-		return skerr.Fmt("Error running ChunkIter: %s", err)
-	}
-	return egroup.Wait()
-}
-
-// writeIndexCommits writes the given index commits keyed by their indices for the given branch.
-func (b *BigTableGitStore) writeIndexCommits(ctx context.Context, indexCommits []*vcsinfo.IndexCommit, branch string) error {
-	idxRowNames := make([]string, 0, len(indexCommits))
-	idxMutations := make([]*bigtable.Mutation, 0, len(indexCommits))
-
-	for idx, commit := range indexCommits {
-		sIndex := sortableIndex(indexCommits[idx].Index)
-		idxRowNames = append(idxRowNames, b.rowName(branch, typIndex, sIndex))
-		idxMutations = append(idxMutations, b.simpleMutation(cfCommit, commit.Timestamp, [2]string{colHash, commit.Hash}))
-	}
-
-	if err := b.applyBulkBatched(ctx, idxRowNames, idxMutations, writeBatchSize); err != nil {
-		return skerr.Fmt("Error writing indices: %s", err)
-	}
-	return b.putBranchPointer(ctx, getRepoInfoRowName(b.RepoURL), branch, indexCommits[len(indexCommits)-1])
-}
-
-// writeTimestampIndexCommits writes the given index commits keyed by their timestamp for the
-// given branch.
-func (b *BigTableGitStore) writeTimestampIndex(ctx context.Context, indexCommits []*vcsinfo.IndexCommit, branch string) error {
-	nMutations := len(indexCommits)
-	tsRowNames := make([]string, 0, nMutations)
-	tsMutations := make([]*bigtable.Mutation, 0, nMutations)
-
-	for _, commit := range indexCommits {
-		tsRowName := b.rowName(branch, typTimeStamp, sortableTimestamp(commit.Timestamp))
-		tsRowNames = append(tsRowNames, tsRowName)
-		tsMutations = append(tsMutations, b.simpleMutation(cfTsCommit, commit.Timestamp, [][2]string{
-			{commit.Hash, sortableIndex(commit.Index)},
-		}...))
-	}
-
-	// Write the timestamped index.
-	if err := b.applyBulkBatched(ctx, tsRowNames, tsMutations, writeBatchSize); err != nil {
-		return skerr.Fmt("Error writing timestamps: %s", err)
-	}
-	return nil
-}
-
 // iterShardedRange iterates the keys in the half open interval [startKey, endKey) across all
 // shards triggering as many queries as there are shards. If endKey is empty, then startKey is
 // used to generate a prefix and a Prefix scan is performed.
@@ -530,33 +581,33 @@
 
 	// Query all shards in parallel.
 	for shard := uint32(0); shard < b.shards; shard++ {
-		func(shard uint32) {
-			egroup.Go(func() error {
-				defer result.Finish(shard)
+		// https://golang.org/doc/faq#closures_and_goroutines
+		shard := shard
+		egroup.Go(func() error {
+			defer result.Finish(shard)
 
-				var rr bigtable.RowRange
-				// Treat the startKey as part of a prefix and do a prefix scan.
-				if endKey == "" {
-					rowPrefix := b.shardedRowName(shard, branch, rowType, startKey)
-					rr = bigtable.PrefixRange(rowPrefix)
-				} else {
-					// Derive the start and end row names.
-					rStart := b.shardedRowName(shard, branch, rowType, startKey)
-					rEnd := b.shardedRowName(shard, branch, rowType, endKey)
-					rr = bigtable.NewRange(rStart, rEnd)
-				}
+			var rr bigtable.RowRange
+			// Treat the startKey as part of a prefix and do a prefix scan.
+			if endKey == "" {
+				rowPrefix := b.shardedRowName(shard, branch, rowType, startKey)
+				rr = bigtable.PrefixRange(rowPrefix)
+			} else {
+				// Derive the start and end row names.
+				rStart := b.shardedRowName(shard, branch, rowType, startKey)
+				rEnd := b.shardedRowName(shard, branch, rowType, endKey)
+				rr = bigtable.NewRange(rStart, rEnd)
+			}
 
-				var addErr error
-				err := b.table.ReadRows(ctx, rr, func(row bigtable.Row) bool {
-					addErr = result.Add(shard, row)
-					return addErr == nil
-				}, filtersToReadOptions(filters)...)
-				if err != nil {
-					return err
-				}
-				return addErr
-			})
-		}(shard)
+			var addErr error
+			err := b.table.ReadRows(ctx, rr, func(row bigtable.Row) bool {
+				addErr = result.Add(shard, row)
+				return addErr == nil
+			}, filtersToReadOptions(filters)...)
+			if err != nil {
+				return err
+			}
+			return addErr
+		})
 	}
 
 	if err := egroup.Wait(); err != nil {
@@ -567,18 +618,18 @@
 
 // simpleMutation assembles a simple mutation consisting of a column family, a timestamp and a
 // set of column/value pairs. The timestamp is applied to all column/pairs.
-func (b *BigTableGitStore) simpleMutation(cfFam string, timeStamp time.Time, colValPairs ...[2]string) *bigtable.Mutation {
-	ts := bigtable.Time(timeStamp.UTC())
+func (b *BigTableGitStore) simpleMutation(cfFam string, colValPairs ...[2]string) *bigtable.Mutation {
 	ret := bigtable.NewMutation()
 	for _, pair := range colValPairs {
-		ret.Set(cfFam, pair[0], ts, []byte(pair[1]))
+		ret.DeleteCellsInColumn(cfFam, pair[0])
+		ret.Set(cfFam, pair[0], bigtable.ServerTime, []byte(pair[1]))
 	}
 	return ret
 }
 
 // getCommitMutation gets the mutation to write a long commit. Since the timestamp is set to the
 // timestamp of the commit this is idempotent.
-func (b *BigTableGitStore) getCommitMutation(commit *vcsinfo.LongCommit) *bigtable.Mutation {
+func (b *BigTableGitStore) getCommitMutation(commit *vcsinfo.LongCommit) (*bigtable.Mutation, error) {
 	ts := bigtable.Time(commit.Timestamp.UTC())
 	ret := bigtable.NewMutation()
 	ret.Set(cfCommit, colHash, ts, []byte(commit.Hash))
@@ -586,7 +637,13 @@
 	ret.Set(cfCommit, colSubject, ts, []byte(commit.Subject))
 	ret.Set(cfCommit, colParents, ts, []byte(strings.Join(commit.Parents, ":")))
 	ret.Set(cfCommit, colBody, ts, []byte(commit.Body))
-	return ret
+	encBranches, err := json.Marshal(commit.Branches)
+	if err != nil {
+		return nil, err
+	}
+	ret.Set(cfCommit, colBranches, ts, encBranches)
+	ret.Set(cfCommit, colIndex, ts, []byte(strconv.Itoa(commit.Index)))
+	return ret, nil
 }
 
 // rowName returns that BT rowName based on the tuple: (branch,rowType,Key).
diff --git a/go/gitstore/bt_gitstore/bt_gitstore_test.go b/go/gitstore/bt_gitstore/bt_gitstore_test.go
index 37a057b..e118d5f 100644
--- a/go/gitstore/bt_gitstore/bt_gitstore_test.go
+++ b/go/gitstore/bt_gitstore/bt_gitstore_test.go
@@ -3,6 +3,7 @@
 import (
 	"context"
 	"fmt"
+	"io/ioutil"
 	"os"
 	"path/filepath"
 	"sort"
@@ -16,6 +17,7 @@
 	gitstore_testutils "go.skia.org/infra/go/gitstore/testutils"
 	"go.skia.org/infra/go/testutils/unittest"
 	"go.skia.org/infra/go/timer"
+	"go.skia.org/infra/go/util"
 	"go.skia.org/infra/go/vcsinfo"
 	vcs_testutils "go.skia.org/infra/go/vcsinfo/testutils"
 )
@@ -28,6 +30,7 @@
 // If not present, syncs skia.git to /tmp/skia first.
 func TestGitStoreSkiaRepo(t *testing.T) {
 	unittest.ManualTest(t)
+	t.Skip()
 	skiaRepoDir := filepath.Join(os.TempDir(), "skia")
 	if _, err := os.Stat(skiaRepoDir); os.IsNotExist(err) {
 		_, err = git.NewRepo(context.Background(), common.REPO_SKIA, os.TempDir())
@@ -43,12 +46,16 @@
 
 	repoDir, cleanup := vcs_testutils.InitTempRepo()
 	defer cleanup()
-	testGitStore(t, localRepoURL, repoDir, true)
+	testGitStore(t, "file://"+repoDir, repoDir, true)
 }
 
 func testGitStore(t *testing.T, repoURL, repoDir string, freshLoad bool) {
+	wd, err := ioutil.TempDir("", "")
+	assert.NoError(t, err)
+	defer util.RemoveAll(wd)
+
 	// Get all commits that have been added to the gitstore.
-	_, longCommits, gitStore := gitstore_testutils.SetupAndLoadBTGitStore(t, repoURL, repoDir, freshLoad)
+	_, longCommits, gitStore := gitstore_testutils.SetupAndLoadBTGitStore(t, context.Background(), wd, repoURL, freshLoad)
 
 	// Sort long commits they way they are sorted by BigTable (by timestamp/hash)
 	sort.Slice(longCommits, func(i, j int) bool {
@@ -66,7 +73,7 @@
 	}
 
 	// Find all the commits in the repository independent of branches.
-	foundIndexCommits, foundLongCommits := getFromRange(t, gitStore, 0, len(longCommits), "")
+	foundIndexCommits, foundLongCommits := getFromRange(t, gitStore, 0, len(longCommits), gitstore.ALL_BRANCHES)
 	assert.Equal(t, len(indexCommits), len(foundIndexCommits))
 	assert.Equal(t, len(longCommits), len(foundLongCommits))
 
@@ -132,7 +139,6 @@
 	for _, commit := range foundIndexCommits {
 		hashes = append(hashes, commit.Hash)
 	}
-
 	tLongCommits := timer.New(fmt.Sprintf("Get %d LongCommits from branch %q", len(hashes), branchName))
 	foundLongCommits, err := gitStore.Get(ctx, hashes)
 	assert.NoError(t, err)
diff --git a/go/gitstore/bt_gitstore/helpers.go b/go/gitstore/bt_gitstore/helpers.go
index e972303..aed7eb7 100644
--- a/go/gitstore/bt_gitstore/helpers.go
+++ b/go/gitstore/bt_gitstore/helpers.go
@@ -48,13 +48,8 @@
 		if readRowErr != nil {
 			return false
 		}
-		// save the repo info and set the all-commits branch.
+		// save the repo info.
 		ret[repoInfo.RepoURL] = repoInfo
-		if found, ok := repoInfo.Branches[allCommitsBranch]; ok {
-			repoInfo.Branches[""] = found
-			delete(repoInfo.Branches, allCommitsBranch)
-		}
-
 		return true
 	}, bigtable.RowFilter(bigtable.LatestNFilter(1)))
 
diff --git a/go/gitstore/bt_gitstore/repo_impl_test.go b/go/gitstore/bt_gitstore/repo_impl_test.go
index 3494e18..e2d280e 100644
--- a/go/gitstore/bt_gitstore/repo_impl_test.go
+++ b/go/gitstore/bt_gitstore/repo_impl_test.go
@@ -8,6 +8,7 @@
 
 	"github.com/google/uuid"
 	assert "github.com/stretchr/testify/require"
+	"go.skia.org/infra/go/deepequal"
 	"go.skia.org/infra/go/git"
 	"go.skia.org/infra/go/git/repograph"
 	repograph_shared_tests "go.skia.org/infra/go/git/repograph/shared_tests"
@@ -36,9 +37,36 @@
 func (u *gitstoreRefresher) Refresh(commits ...*vcsinfo.LongCommit) {
 	ctx := context.Background()
 	// Add the commits.
-	assert.NoError(u.t, u.gs.Put(ctx, commits))
+	update := make(map[string]*vcsinfo.LongCommit, len(commits))
+	for _, commit := range commits {
+		c, err := u.repo.Details(ctx, commit.Hash)
+		assert.NoError(u.t, err)
+		// This is inefficient, but the test repo is small.
+		hashes, err := u.repo.RevList(ctx, "--first-parent", c.Hash)
+		assert.NoError(u.t, err)
+		c.Index = len(hashes) - 1
+		c.Branches = map[string]bool{}
+		update[c.Hash] = c
+	}
 	branches, err := u.repo.Branches(ctx)
 	assert.NoError(u.t, err)
+	for _, b := range branches {
+		hashes, err := u.repo.RevList(ctx, "--first-parent", b.Head)
+		assert.NoError(u.t, err)
+		for _, hash := range hashes {
+			c, ok := update[hash]
+			if ok {
+				c.Branches[b.Name] = true
+			}
+		}
+	}
+	putCommits := make([]*vcsinfo.LongCommit, 0, len(update))
+	putHashes := make([]string, 0, len(update))
+	for _, c := range update {
+		putCommits = append(putCommits, c)
+		putHashes = append(putHashes, c.Hash)
+	}
+	assert.NoError(u.t, u.gs.Put(ctx, putCommits))
 	putBranches := make(map[string]string, len(branches))
 	for _, branch := range branches {
 		putBranches[branch.Name] = branch.Head
@@ -46,7 +74,7 @@
 	oldBranches, err := u.gs.GetBranches(ctx)
 	assert.NoError(u.t, err)
 	for name := range oldBranches {
-		if name == "" {
+		if name == gitstore.ALL_BRANCHES {
 			continue
 		}
 		if _, ok := putBranches[name]; !ok {
@@ -69,7 +97,15 @@
 			}
 		}
 		for name := range actual {
-			if _, ok := putBranches[name]; name != "" && !ok {
+			if _, ok := putBranches[name]; name != gitstore.ALL_BRANCHES && !ok {
+				allMatch = false
+				break
+			}
+		}
+		gotCommits, err := u.gs.Get(ctx, putHashes)
+		assert.NoError(u.t, err)
+		for idx, expect := range putCommits {
+			if !deepequal.DeepEqual(expect, gotCommits[idx]) {
 				allMatch = false
 				break
 			}
diff --git a/go/gitstore/bt_gitstore/sharded_result.go b/go/gitstore/bt_gitstore/sharded_result.go
index e7acbe5..ebf2afc 100644
--- a/go/gitstore/bt_gitstore/sharded_result.go
+++ b/go/gitstore/bt_gitstore/sharded_result.go
@@ -2,6 +2,7 @@
 
 import (
 	"sort"
+	"strconv"
 	"strings"
 	"sync/atomic"
 	"time"
@@ -23,16 +24,22 @@
 
 // srIndexCommits implements the shardedResults interface for collecting results that are vcsinfo.IndexCommits
 type srIndexCommits struct {
-	errs    []*multierror.Error
-	results [][]*vcsinfo.IndexCommit
-	retSize int64
+	errs       []*multierror.Error
+	timestamps []map[*vcsinfo.IndexCommit]time.Time
+	results    [][]*vcsinfo.IndexCommit
+	retSize    int64
 }
 
 // create a new instance to collect IndexCommits.
 func newSRIndexCommits(shards uint32) *srIndexCommits {
+	timestamps := make([]map[*vcsinfo.IndexCommit]time.Time, shards)
+	for i := uint32(0); i < shards; i++ {
+		timestamps[i] = map[*vcsinfo.IndexCommit]time.Time{}
+	}
 	return &srIndexCommits{
-		results: make([][]*vcsinfo.IndexCommit, shards),
-		errs:    make([]*multierror.Error, shards),
+		results:    make([][]*vcsinfo.IndexCommit, shards),
+		timestamps: timestamps,
+		errs:       make([]*multierror.Error, shards),
 	}
 }
 
@@ -42,22 +49,28 @@
 	if idx < 0 {
 		return skerr.Fmt("Unable to parse index key %q. Invalid index", row.Key())
 	}
-
-	var hash string
-	var timeStamp bigtable.Timestamp
 	prefix := cfCommit + ":"
 	for _, col := range row[cfCommit] {
-		if strings.TrimPrefix(col.Column, prefix) == colHash {
-			hash = string(col.Value)
-			timeStamp = col.Timestamp
+		if strings.TrimPrefix(col.Column, prefix) == colHashTs {
+			split := strings.Split(string(col.Value), "#")
+			if len(split) != 2 {
+				return skerr.Fmt("Unable to parse hash#timestamp from %q", string(col.Value))
+			}
+			ts, err := strconv.Atoi(split[1])
+			if err != nil {
+				return skerr.Wrapf(err, "Unable to parse hash#timestamp from %q", string(col.Value))
+			}
+			hash := make([]byte, len(split[0]))
+			copy(hash, []byte(split[0]))
+			ic := &vcsinfo.IndexCommit{
+				Index:     idx,
+				Hash:      string(hash),
+				Timestamp: time.Unix(int64(ts), 0).UTC(), // Git has a timestamp resolution of 1s.
+			}
+			s.results[shard] = append(s.results[shard], ic)
+			s.timestamps[shard][ic] = col.Timestamp.Time().UTC()
 		}
 	}
-
-	s.results[shard] = append(s.results[shard], &vcsinfo.IndexCommit{
-		Index:     idx,
-		Hash:      hash,
-		Timestamp: timeStamp.Time().UTC(),
-	})
 	return nil
 }
 
@@ -66,21 +79,21 @@
 	atomic.AddInt64(&s.retSize, int64(len(s.results)))
 }
 
-// Sorted returns the resulting IndexCommits by Index->TimeStamp->Hash.
+// Sorted returns the resulting IndexCommits by Index->TimeStamp->Hash, and the
+// server-side update timestamps associated with each IndexCommit.
 // Using the hash ensures results with identical timestamps are sorted stably.
-func (s *srIndexCommits) Sorted() []*vcsinfo.IndexCommit {
+func (s *srIndexCommits) Sorted() ([]*vcsinfo.IndexCommit, map[*vcsinfo.IndexCommit]time.Time) {
 	// Concatenate the shard results into a single output and sort it.
-	ret := make([]*vcsinfo.IndexCommit, 0, s.retSize)
-	for _, sr := range s.results {
-		ret = append(ret, sr...)
+	ics := make([]*vcsinfo.IndexCommit, 0, s.retSize)
+	timestamps := make(map[*vcsinfo.IndexCommit]time.Time, len(ics))
+	for shard, sr := range s.results {
+		ics = append(ics, sr...)
+		for ic, t := range s.timestamps[shard] {
+			timestamps[ic] = t
+		}
 	}
-	sort.Slice(ret, func(i, j int) bool {
-		return ret[i].Index < ret[j].Index ||
-			((ret[i].Index == ret[j].Index) && ret[i].Timestamp.Before(ret[j].Timestamp)) ||
-			((ret[i].Index == ret[j].Index) && ret[i].Timestamp.Equal(ret[j].Timestamp) &&
-				(ret[i].Hash < ret[j].Hash))
-	})
-	return ret
+	sort.Sort(vcsinfo.IndexCommitSlice(ics))
+	return ics, timestamps
 }
 
 // srTimestampCommits is an adaptation of srIndexCommits that extracts a different
@@ -101,83 +114,28 @@
 	prefix := cfTsCommit + ":"
 	for _, col := range row[cfTsCommit] {
 		hash := strings.TrimPrefix(col.Column, prefix)
-		timeStamp := col.Timestamp
 
-		// Parse the index
+		// Parse the timestamp from the row key.
+		split := strings.Split(row.Key(), ":")
+		ts, err := strconv.Atoi(split[len(split)-1])
+		if err != nil {
+			return skerr.Wrapf(err, "Unable to parse timestamp from row key %q", row.Key())
+		}
+
+		// Parse the index.
 		idxStr := string(col.Value)
 		idx := parseIndex(idxStr)
 		if idx < 0 {
 			return skerr.Fmt("Unable to parse index key %q. Invalid index", idxStr)
 		}
 
-		s.results[shard] = append(s.results[shard], &vcsinfo.IndexCommit{
+		ic := &vcsinfo.IndexCommit{
 			Hash:      hash,
-			Timestamp: timeStamp.Time().UTC(),
+			Timestamp: time.Unix(int64(ts), 0).UTC(), // Git has a timestamp resolution of 1s.
 			Index:     idx,
-		})
-	}
-	return nil
-}
-
-// rawNodesResults implements the shardedResults interface for rows necessary to build the
-// commit graph. It collects slices of strings, where the first string is the commit hash and
-// all subsequent strings are its parents.
-type rawNodesResult struct {
-	errs       []*multierror.Error
-	results    [][][]string
-	timeStamps [][]time.Time
-	retSize    int64
-}
-
-// newRawNodesResult creates a new
-func newRawNodesResult(shards uint32) *rawNodesResult {
-	return &rawNodesResult{
-		results:    make([][][]string, shards),
-		timeStamps: make([][]time.Time, shards),
-		errs:       make([]*multierror.Error, shards),
-	}
-}
-
-// rawNodeColPrefix is the prefix of column names.
-const rawNodeColPrefix = cfCommit + ":"
-
-// Add implements the shardedResults interface.
-func (r *rawNodesResult) Add(shard uint32, row bigtable.Row) error {
-	var commitHash string
-	var parents []string
-	var timeStamp bigtable.Timestamp
-	for _, col := range row[cfCommit] {
-		switch strings.TrimPrefix(col.Column, rawNodeColPrefix) {
-		case colHash:
-			commitHash = string(col.Value)
-			timeStamp = col.Timestamp
-		case colParents:
-			if len(col.Value) > 0 {
-				parents = strings.Split(string(col.Value), ":")
-			}
 		}
+		s.results[shard] = append(s.results[shard], ic)
+		s.timestamps[shard][ic] = col.Timestamp.Time().UTC()
 	}
-	hp := make([]string, 0, 1+len(parents))
-	hp = append(hp, commitHash)
-	hp = append(hp, parents...)
-	r.results[shard] = append(r.results[shard], hp)
-	r.timeStamps[shard] = append(r.timeStamps[shard], timeStamp.Time())
 	return nil
 }
-
-// Add implements the shardedResults interface.
-func (r *rawNodesResult) Finish(shard uint32) {
-	atomic.AddInt64(&r.retSize, int64(len(r.results)))
-}
-
-// Merge merges the results of all shards into one slice of string slices. These are unordered
-// since structure will be imposed by building a CommitGraph from it.
-func (r *rawNodesResult) Merge() ([][]string, []time.Time) {
-	ret := make([][]string, 0, r.retSize)
-	timeStamps := make([]time.Time, 0, r.retSize)
-	for idx, shardResults := range r.results {
-		ret = append(ret, shardResults...)
-		timeStamps = append(timeStamps, r.timeStamps[idx]...)
-	}
-	return ret, timeStamps
-}
diff --git a/go/gitstore/bt_gitstore/types.go b/go/gitstore/bt_gitstore/types.go
index 0acdb28..08c9b2d 100644
--- a/go/gitstore/bt_gitstore/types.go
+++ b/go/gitstore/bt_gitstore/types.go
@@ -2,8 +2,9 @@
 
 // BTConfig contains the BigTable configuration to define where the repo should be stored.
 type BTConfig struct {
-	ProjectID  string
-	InstanceID string
-	TableID    string
-	Shards     int
+	ProjectID       string
+	InstanceID      string
+	TableID         string
+	Shards          int
+	WriteGoroutines int
 }
diff --git a/go/gitstore/graph.go b/go/gitstore/graph.go
deleted file mode 100644
index f13b13f..0000000
--- a/go/gitstore/graph.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package gitstore
-
-import (
-	"time"
-)
-
-const (
-	// initialGraphSize is the assumed starting number of commits in a repository. Just so we
-	// don't start with an empty data structure when we building or traversing the graph.
-	initialGraphSize = 100000
-)
-
-// BuildGraph takes a rawGraph (a slice where each element contains a commit hash followed by its
-// parents) and returns an instance of CommitGraph.
-// TODO(kjlubick,borenet): can this be replaced by go/git/repograph.Graph ?
-func BuildGraph(rawGraph [][]string, timeStamps []time.Time) *CommitGraph {
-	nodeMap := make(map[string]*Node, len(rawGraph))
-	for idx, rawNode := range rawGraph {
-		hash := rawNode[0]
-		nodeMap[hash] = &Node{
-			Hash:      hash,
-			Parents:   make([]*Node, len(rawNode)-1),
-			Timestamp: timeStamps[idx],
-		}
-	}
-
-	for _, rawNode := range rawGraph {
-		for idx, p := range rawNode[1:] {
-			nodeMap[rawNode[0]].Parents[idx] = nodeMap[p]
-		}
-	}
-
-	return &CommitGraph{
-		Nodes: nodeMap,
-	}
-}
-
-// CommitGraph contains commits as Nodes that are connected and thus can be traversed.
-// Given a graph a client can retrieve a specific node and traverse the graph like this:
-//    // First-parent traversal
-//    node := graph.GetNode(someHash)
-//    for node != nil {
-//        // so something with the commit
-//        node = node.Parents[0]
-//    }
-//
-type CommitGraph struct {
-	Nodes map[string]*Node
-}
-
-// GetNode returns the node in the graph that corresponds to the given hash or nil
-func (c *CommitGraph) GetNode(hash string) *Node {
-	return c.Nodes[hash]
-}
-
-// Node is a node in the commit graph that contains the commit hash, a timestamp and pointers to
-// its parent nodes. The first parent is the immediate parent in the same branch (like in Git).
-type Node struct {
-	Hash      string
-	Timestamp time.Time
-	Parents   []*Node
-}
-
-// DescendantChain returns all nodes in the commit graph in the range of
-// (firstAncestor, lastDescendant) where the parameters are both commit hashes.
-// 'firstAncestor' can be "" in which case it will return all ancestors of 'lastDescendant'.
-// 'lastDescendant' must not be empty and must exist in graph or this will panic.
-func (g *CommitGraph) DecendantChain(firstAncestor, lastDescendant string) []*Node {
-	curr := g.Nodes[lastDescendant]
-	ret := make([]*Node, 0, len(g.Nodes))
-	for curr != nil {
-		ret = append(ret, curr)
-		if (len(curr.Parents) == 0) || (curr.Hash == firstAncestor) {
-			break
-		}
-		curr = curr.Parents[0]
-	}
-
-	// Reverse the result
-	for idx := 0; idx < len(ret)/2; idx++ {
-		rightIdx := len(ret) - 1 - idx
-		ret[idx], ret[rightIdx] = ret[rightIdx], ret[idx]
-	}
-	return ret
-}
diff --git a/go/gitstore/mocks/GitStore.go b/go/gitstore/mocks/GitStore.go
index 9e11d1f..03904c1 100644
--- a/go/gitstore/mocks/GitStore.go
+++ b/go/gitstore/mocks/GitStore.go
@@ -64,29 +64,6 @@
 	return r0, r1
 }
 
-// GetGraph provides a mock function with given fields: ctx
-func (_m *GitStore) GetGraph(ctx context.Context) (*gitstore.CommitGraph, error) {
-	ret := _m.Called(ctx)
-
-	var r0 *gitstore.CommitGraph
-	if rf, ok := ret.Get(0).(func(context.Context) *gitstore.CommitGraph); ok {
-		r0 = rf(ctx)
-	} else {
-		if ret.Get(0) != nil {
-			r0 = ret.Get(0).(*gitstore.CommitGraph)
-		}
-	}
-
-	var r1 error
-	if rf, ok := ret.Get(1).(func(context.Context) error); ok {
-		r1 = rf(ctx)
-	} else {
-		r1 = ret.Error(1)
-	}
-
-	return r0, r1
-}
-
 // Put provides a mock function with given fields: ctx, commits
 func (_m *GitStore) Put(ctx context.Context, commits []*vcsinfo.LongCommit) error {
 	ret := _m.Called(ctx, commits)
diff --git a/go/gitstore/repo_impl.go b/go/gitstore/repo_impl.go
index e66b08d..6accb6b 100644
--- a/go/gitstore/repo_impl.go
+++ b/go/gitstore/repo_impl.go
@@ -49,7 +49,7 @@
 	}
 	branches := make([]*git.Branch, 0, len(branchPtrs))
 	for name, ptr := range branchPtrs {
-		if name != "" {
+		if name != ALL_BRANCHES {
 			branches = append(branches, &git.Branch{
 				Name: name,
 				Head: ptr.Head,
@@ -60,7 +60,7 @@
 	from := g.lastUpdate.Add(-10 * time.Minute)
 	now := time.Now()
 	to := now.Add(time.Second)
-	indexCommits, err := g.gs.RangeByTime(ctx, from, to, "")
+	indexCommits, err := g.gs.RangeByTime(ctx, from, to, ALL_BRANCHES)
 	if err != nil {
 		return skerr.Wrapf(err, "Failed to read IndexCommits from GitStore")
 	}
@@ -92,7 +92,7 @@
 	// Update() should have pre-fetched all of the commits for us, so we
 	// shouldn't have hit this code. Log a warning and fall back to
 	// retrieving the commit from GitStore.
-	sklog.Warningf("Commit %q not found in results; performing explicit lookup.", hash)
+	sklog.Warningf("Commit %q not found in cache; performing explicit lookup.", hash)
 	got, err := g.gs.Get(ctx, []string{hash})
 	if err != nil {
 		return nil, skerr.Wrapf(err, "Failed to read commit %s from GitStore", hash)
diff --git a/go/gitstore/repo_impl_test.go b/go/gitstore/repo_impl_test.go
index 2d1233f..d32d248 100644
--- a/go/gitstore/repo_impl_test.go
+++ b/go/gitstore/repo_impl_test.go
@@ -72,7 +72,7 @@
 
 // See documentation for GitStore interface.
 func (gs *testGitStore) RangeByTime(ctx context.Context, start, end time.Time, branch string) ([]*vcsinfo.IndexCommit, error) {
-	if branch != "" {
+	if branch != ALL_BRANCHES {
 		return nil, skerr.Fmt("RangeByTime not implemented for single branches in testGitStore.")
 	}
 	rv := make([]*vcsinfo.IndexCommit, 0, len(gs.commits))
@@ -87,11 +87,6 @@
 	return rv, nil
 }
 
-// See documentation for GitStore interface.
-func (gs *testGitStore) GetGraph(ctx context.Context) (*CommitGraph, error) {
-	return nil, skerr.Fmt("GetGraph not implemented for testGitStore.")
-}
-
 // gitstoreRefresher is an implementation of repograph_shared_tests.RepoImplRefresher
 // used for testing a GitStore.
 type gitstoreRefresher struct {
@@ -121,7 +116,7 @@
 	oldBranches, err := u.gs.GetBranches(ctx)
 	assert.NoError(u.t, err)
 	for name := range oldBranches {
-		if name == "" {
+		if name == ALL_BRANCHES {
 			continue
 		}
 		if _, ok := putBranches[name]; !ok {
diff --git a/go/gitstore/testutils/bt_testutils.go b/go/gitstore/testutils/bt_testutils.go
index 3460796..dc277a9 100644
--- a/go/gitstore/testutils/bt_testutils.go
+++ b/go/gitstore/testutils/bt_testutils.go
@@ -3,21 +3,21 @@
 import (
 	"context"
 	"fmt"
-	"time"
+	"sort"
 
-	"github.com/stretchr/testify/assert"
+	assert "github.com/stretchr/testify/require"
 	"go.skia.org/infra/go/bt"
-	"go.skia.org/infra/go/git/gitinfo"
+	"go.skia.org/infra/go/git/repograph"
 	"go.skia.org/infra/go/gitstore"
 	"go.skia.org/infra/go/gitstore/bt_gitstore"
-	"go.skia.org/infra/go/sklog"
 	"go.skia.org/infra/go/sktest"
 	"go.skia.org/infra/go/timer"
+	"go.skia.org/infra/go/util"
 	"go.skia.org/infra/go/vcsinfo"
 )
 
 const (
-	concurrentWrites = 1000
+	batchSize = 1000
 )
 
 var (
@@ -28,9 +28,8 @@
 	}
 )
 
-// SetupAndLoadBTGitStore loads the Git repo in repoDir into the Gitstore. It assumes that the
-// repo is checked out. repoURL is only used for creating the GitStore.
-func SetupAndLoadBTGitStore(t sktest.TestingT, repoURL, repoDir string, load bool) ([]*vcsinfo.IndexCommit, []*vcsinfo.LongCommit, *bt_gitstore.BigTableGitStore) {
+// SetupAndLoadBTGitStore loads the Git repo at repoUrl into the Gitstore.
+func SetupAndLoadBTGitStore(t sktest.TestingT, ctx context.Context, workdir, repoURL string, load bool) ([]*vcsinfo.IndexCommit, []*vcsinfo.LongCommit, *bt_gitstore.BigTableGitStore) {
 	if load {
 		// Delete the tables.
 		assert.NoError(t, bt.DeleteTables(btConf.ProjectID, btConf.InstanceID, btConf.TableID))
@@ -38,38 +37,52 @@
 	}
 
 	// Get a new gitstore.
-	gitStore, err := bt_gitstore.New(context.TODO(), btConf, repoURL)
+	gitStore, err := bt_gitstore.New(ctx, btConf, repoURL)
 	assert.NoError(t, err)
 
-	// Get the commits of the last ~20 years and load them into the GitStore
-	timeDelta := time.Hour * 24 * 365 * 20
+	// Get all commits and load them into the GitStore.
 	tLoad := timer.New("Loading all commits")
-	indexCommits, longCommits := loadGitRepo(t, repoDir, gitStore, timeDelta, load)
+	graph, err := repograph.NewLocalGraph(ctx, repoURL, workdir)
+	assert.NoError(t, err)
+	assert.NoError(t, graph.Update(ctx))
+	graph.UpdateBranchInfo()
+	indexCommits, longCommits := loadGitRepo(t, ctx, graph, gitStore, load)
 	tLoad.Stop()
 
 	return indexCommits, longCommits, gitStore
 }
 
-type commitInfo struct {
-	commits []*vcsinfo.LongCommit
-	indices []int
-}
+func loadGitRepo(t sktest.TestingT, ctx context.Context, graph *repograph.Graph, gitStore gitstore.GitStore, load bool) ([]*vcsinfo.IndexCommit, []*vcsinfo.LongCommit) {
+	branchList := graph.BranchHeads()
+	branches := make(map[string]string, len(branchList))
+	for _, branch := range branchList {
+		branches[branch.Name] = branch.Head
+	}
+	commitsMap := graph.GetAll()
+	commits := make([]*repograph.Commit, 0, len(commitsMap))
+	for _, c := range commitsMap {
+		commits = append(commits, c)
+	}
+	sort.Sort(repograph.CommitSlice(commits))
+	indexCommits := make([]*vcsinfo.IndexCommit, 0, len(commits))
+	longCommits := make([]*vcsinfo.LongCommit, 0, len(commits))
+	for i := len(commits) - 1; i >= 0; i-- {
+		c := commits[i]
+		indexCommits = append(indexCommits, &vcsinfo.IndexCommit{
+			Hash:      c.Hash,
+			Index:     len(indexCommits),
+			Timestamp: c.Timestamp,
+		})
+		longCommits = append(longCommits, c.LongCommit)
+	}
 
-func loadGitRepo(t sktest.TestingT, repoDir string, gitStore gitstore.GitStore, timeDelta time.Duration, load bool) ([]*vcsinfo.IndexCommit, []*vcsinfo.LongCommit) {
-	ctx := context.TODO()
-	commitCh := make(chan *commitInfo, 10)
-	indexCommits, branches := iterateCommits(t, repoDir, concurrentWrites, commitCh, timeDelta)
-	longCommits := make([]*vcsinfo.LongCommit, 0, len(indexCommits))
-
-	for ci := range commitCh {
-		assert.True(t, len(ci.commits) > 0)
-		longCommits = append(longCommits, ci.commits...)
-		if load {
-			// Add the commits.
-			putT := timer.New(fmt.Sprintf("Put %d commits.", len(ci.commits)))
-			assert.NoError(t, gitStore.Put(ctx, ci.commits))
-			putT.Stop()
-		}
+	if load && len(longCommits) > 0 {
+		// Add the commits.
+		assert.NoError(t, util.ChunkIter(len(longCommits), batchSize, func(start, end int) error {
+			putT := timer.New(fmt.Sprintf("Put %d commits.", end-start))
+			defer putT.Stop()
+			return gitStore.Put(ctx, longCommits[start:end])
+		}))
 	}
 
 	for name, head := range branches {
@@ -77,65 +90,11 @@
 		assert.NoError(t, err)
 		if details[0] == nil {
 			delete(branches, name)
-		} else {
-			sklog.Infof("Found branches: %40s  :  %s", name, head)
 		}
 	}
 
-	if load {
+	if load && len(branches) > 0 {
 		assert.NoError(t, gitStore.PutBranches(ctx, branches))
 	}
 	return indexCommits, longCommits
 }
-
-// iterateCommits returns batches of commits via a channel. It returns all IndexCommits within
-// the given timeDelta.
-func iterateCommits(t sktest.TestingT, repoDir string, maxCount int, targetCh chan<- *commitInfo, timeDelta time.Duration) ([]*vcsinfo.IndexCommit, map[string]string) {
-	gitInfo, err := gitinfo.NewGitInfo(context.TODO(), repoDir, false, true)
-	assert.NoError(t, err)
-
-	start := time.Now().Add(-timeDelta)
-	indexCommits := gitInfo.Range(start, time.Now().Add(time.Hour))
-	sklog.Infof("Index commits: %d", len(indexCommits))
-
-	gitBranches, err := gitInfo.GetBranches(context.TODO())
-	assert.NoError(t, err)
-
-	// Keep track of the branches.
-	branches := map[string]string{}
-	for _, gb := range gitBranches {
-		branches[gb.Name] = gb.Head
-	}
-
-	go func() {
-		ctx := context.TODO()
-		longCommits := make([]*vcsinfo.LongCommit, 0, maxCount)
-		indices := make([]int, 0, maxCount)
-		retIdx := 0
-		batchTimer := timer.New("Fetching commits starting with 0")
-		for idx, indexCommit := range indexCommits {
-			commitDetails, err := gitInfo.Details(ctx, indexCommit.Hash, false)
-			if err != nil {
-				sklog.Fatalf("Error fetching commits: %s", err)
-			}
-			longCommits = append(longCommits, commitDetails)
-			indices = append(indices, indexCommit.Index)
-			if len(longCommits) >= maxCount || idx == (len(indexCommits)-1) {
-				batchTimer.Stop()
-				targetCh <- &commitInfo{
-					commits: longCommits,
-					indices: indices,
-				}
-				longCommits = make([]*vcsinfo.LongCommit, 0, maxCount)
-				indices = make([]int, 0, maxCount)
-				retIdx = 0
-				batchTimer = timer.New(fmt.Sprintf("Fetching commits starting with %d", idx))
-			} else {
-				retIdx++
-			}
-		}
-		batchTimer.Stop()
-		close(targetCh)
-	}()
-	return indexCommits, branches
-}
diff --git a/go/gitstore/types.go b/go/gitstore/types.go
index 7cce3a3..a916da7 100644
--- a/go/gitstore/types.go
+++ b/go/gitstore/types.go
@@ -8,6 +8,10 @@
 )
 
 const (
+	// ALL_BRANCHES is a placeholder which can be used to retrieve IndexCommits
+	// for every branch, as opposed to just one.
+	ALL_BRANCHES = "@all-commits"
+
 	// DELETE_BRANCH is a placeholder which can be used as a value in the
 	// branch map passed to GitStore.PutBranches to signify that the branch
 	// should be deleted.
@@ -19,7 +23,9 @@
 // implementation.
 type GitStore interface {
 	// Put stores the given commits. They can be retrieved in order of timestamps by using
-	// RangeByTime or RangeN (no topological ordering).
+	// RangeByTime or RangeN (no topological ordering). The Index and Branch information
+	// on the commits must be correct, or the results of RangeN and RangeByTime will not
+	// be correct.
 	Put(ctx context.Context, commits []*vcsinfo.LongCommit) error
 
 	// Get retrieves the commits identified by 'hashes'. The return value will always have the
@@ -29,13 +35,10 @@
 	// if the given hashes do not exist or are invalid.
 	Get(ctx context.Context, hashes []string) ([]*vcsinfo.LongCommit, error)
 
-	// PutBranches updates branches in the repository. It writes indices for the branches so they
-	// can be retrieved via RangeN and RangeByTime. These are ordered in toplogical order with only
-	// first-parents included.
-	// 'branches' maps branchName -> commit_hash to indicate the head of a branch. The store then
-	// calculates the commits of the branch and updates the indices accordingly. Branches which
-	// already exist in the GitStore are not removed if not present in 'branches'; if DELETE_BRANCH
-	// string is used as the head instead of a commit hash, then the branch is removed.
+	// PutBranches updates the given branch heads in the GitStore. The 'branches' parameter
+	// maps branch name to commit hash to indicate the head of a branch. All of the referenced
+	// commits must already exist in the GitStore. If the DELETE_BRANCH string is used instead
+	// of a commit hash, then the branch is removed.
 	PutBranches(ctx context.Context, branches map[string]string) error
 
 	// GetBranches returns the current branches in the store. It maps[branchName]->BranchPointer.
@@ -43,19 +46,22 @@
 	// usually the total number of commits in the branch minus 1.
 	GetBranches(ctx context.Context) (map[string]*BranchPointer, error)
 
-	// RangeN returns all commits in the half open index range [startIndex, endIndex).
-	// Thus not including endIndex. It returns the commits in the given branch sorted in ascending
-	// order by Index and the commits are topologically sorted only including first-parent commits.
+	// RangeN returns all commits in the half open index range [startIndex, endIndex), thus not
+	// including endIndex. It returns the commits in the given branch sorted in ascending
+	// order by Index, which only includes commits on the first-parent ancestry chain, per the
+	// definition of vcsinfo.IndexCommit. This does not make sense for branch == ALL_BRANCHES,
+	// because different lines of history may use the same indexes. Therefore, the results of
+	// RangeN for ALL_BRANCHES may not be complete or correct.
 	RangeN(ctx context.Context, startIndex, endIndex int, branch string) ([]*vcsinfo.IndexCommit, error)
 
-	// RangeByTime returns all commits in the half open time range [start, end). Thus not
-	// including commits at 'end' time.
+	// RangeByTime returns all commits in the half open time range [start, end), thus not
+	// including commits at 'end' time. Set branch = ALL_BRANCHES to retrieve all commits
+	// for every branch within the specified range.
 	// Caveat: The returned results will match the requested range, but will be sorted by Index.
-	// So if the timestamps within a commit are not in order they will be unordered in the results.
+	// So if the timestamps of the commits within a branch are not in order they will be
+	// unordered in the results. In the case of branch == ALL_BRANCHES, some indexes may be
+	// repeated, because different lines of history may use the same indexes.
 	RangeByTime(ctx context.Context, start, end time.Time, branch string) ([]*vcsinfo.IndexCommit, error)
-
-	// GetGraph returns the commit graph of the entire repository.
-	GetGraph(ctx context.Context) (*CommitGraph, error)
 }
 
 // GitStoreBased is an interface that tags an object as being based on GitStore and the
diff --git a/go/vcsinfo/bt_vcs/bt_vcs.go b/go/vcsinfo/bt_vcs/bt_vcs.go
index e4584c2..e0a014d 100644
--- a/go/vcsinfo/bt_vcs/bt_vcs.go
+++ b/go/vcsinfo/bt_vcs/bt_vcs.go
@@ -55,7 +55,7 @@
 
 // NewVCS returns an instance of vcsinfo.VCS that is backed by the given GitStore and uses the
 // gittiles.Repo to retrieve files. Each instance provides an interface to one branch.
-// If defaultBranch is "" all commits in the repository are considered.
+// If defaultBranch is gitstore.ALL_BRANCHES all commits in the repository are considered.
 // The instances of gitiles.Repo is only used to fetch files.
 func New(ctx context.Context, gitStore gitstore.GitStore, defaultBranch string, repo *gitiles.Repo) (*BigTableVCS, error) {
 	if gitStore == nil {
@@ -91,7 +91,7 @@
 	// Check if we need to pull across all branches.
 	targetBranch := b.defaultBranch
 	if allBranches {
-		targetBranch = ""
+		targetBranch = gitstore.ALL_BRANCHES
 	}
 
 	// Simulate a pull by fetching the latest head of the target branch.
@@ -288,7 +288,7 @@
 	var mutex sync.Mutex
 	var egroup errgroup.Group
 	for branchName := range allBranches {
-		if branchName != "" {
+		if branchName != gitstore.ALL_BRANCHES {
 			func(branchName string) {
 				egroup.Go(func() error {
 					// Since we cannot look up a commit in a branch directly we query for all commits that
diff --git a/go/vcsinfo/bt_vcs/bt_vcs_test.go b/go/vcsinfo/bt_vcs/bt_vcs_test.go
index 6e2bb82..a3018d4 100644
--- a/go/vcsinfo/bt_vcs/bt_vcs_test.go
+++ b/go/vcsinfo/bt_vcs/bt_vcs_test.go
@@ -2,6 +2,7 @@
 
 import (
 	"context"
+	"io/ioutil"
 	"testing"
 	"time"
 
@@ -12,6 +13,7 @@
 	"go.skia.org/infra/go/gitstore/mocks"
 	gs_testutils "go.skia.org/infra/go/gitstore/testutils"
 	"go.skia.org/infra/go/testutils/unittest"
+	"go.skia.org/infra/go/util"
 	"go.skia.org/infra/go/vcsinfo"
 	vcs_testutils "go.skia.org/infra/go/vcsinfo/testutils"
 )
@@ -37,14 +39,14 @@
 
 func TestBranchInfo(t *testing.T) {
 	unittest.LargeTest(t)
-	vcs, gitStore, cleanup := setupVCSLocalRepo(t, "")
+	vcs, gitStore, cleanup := setupVCSLocalRepo(t, gitstore.ALL_BRANCHES)
 	defer cleanup()
 
 	branchPointers, err := gitStore.GetBranches(context.Background())
 	assert.NoError(t, err)
 	branches := []string{}
 	for branchName := range branchPointers {
-		if branchName != "" {
+		if branchName != gitstore.ALL_BRANCHES {
 			branches = append(branches, branchName)
 		}
 	}
@@ -260,10 +262,16 @@
 // setupVCSLocalRepo loads the test repo into a new GitStore and returns an instance of vcsinfo.VCS.
 func setupVCSLocalRepo(t *testing.T, branch string) (vcsinfo.VCS, gitstore.GitStore, func()) {
 	repoDir, cleanup := vcs_testutils.InitTempRepo()
-	_, _, btgs := gs_testutils.SetupAndLoadBTGitStore(t, localRepoURL, repoDir, true)
-	vcs, err := New(context.Background(), btgs, branch, nil)
+	wd, err := ioutil.TempDir("", "")
 	assert.NoError(t, err)
-	return vcs, btgs, cleanup
+	ctx := context.Background()
+	_, _, btgs := gs_testutils.SetupAndLoadBTGitStore(t, ctx, wd, "file://"+repoDir, true)
+	vcs, err := New(ctx, btgs, branch, nil)
+	assert.NoError(t, err)
+	return vcs, btgs, func() {
+		util.RemoveAll(wd)
+		cleanup()
+	}
 }
 
 func startWithEmptyCache(mg *mocks.GitStore) {
diff --git a/go/vcsinfo/types.go b/go/vcsinfo/types.go
index 857848e..105b059 100644
--- a/go/vcsinfo/types.go
+++ b/go/vcsinfo/types.go
@@ -59,6 +59,19 @@
 func (s LongCommitSlice) Less(i, j int) bool { return s[i].Timestamp.After(s[j].Timestamp) }
 func (s LongCommitSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
 
+// IndexCommitSlice represents a slice of IndexCommit objects used for sorting
+// commits by index, then by timestamp, then by hash.
+type IndexCommitSlice []*IndexCommit
+
+func (s IndexCommitSlice) Len() int { return len(s) }
+func (s IndexCommitSlice) Less(i, j int) bool {
+	return s[i].Index < s[j].Index ||
+		((s[i].Index == s[j].Index) && s[i].Timestamp.Before(s[j].Timestamp)) ||
+		((s[i].Index == s[j].Index) && s[i].Timestamp.Equal(s[j].Timestamp) &&
+			(s[i].Hash < s[j].Hash))
+}
+func (s IndexCommitSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
 var NoSecondaryRepo = errors.New("No secondary repo configured for this vcsinfo")
 
 // VCS is a generic interface to the information contained in a version