Switch perf to vcsinfo.VCS

In preparation of migrating to BigTable-based Git metadata.

Bug: skia:
Change-Id: Idcd826f6aea0e9f5fc00f6b9c8a9f7de78193612
Reviewed-on: https://skia-review.googlesource.com/c/buildbot/+/199246
Commit-Queue: Stephan Altmueller <stephana@google.com>
Reviewed-by: Joe Gregorio <jcgregorio@google.com>
diff --git a/perf/go/cid/cid.go b/perf/go/cid/cid.go
index ba8f8c1..40aa649 100644
--- a/perf/go/cid/cid.go
+++ b/perf/go/cid/cid.go
@@ -99,7 +99,7 @@
 
 // CommitIDLookup allows getting CommitDetails from CommitIDs.
 type CommitIDLookup struct {
-	git *gitinfo.GitInfo
+	vcs vcsinfo.VCS
 
 	// mutex protects access to cache.
 	mutex sync.Mutex
@@ -116,7 +116,7 @@
 //
 // index is the index of the last commit id, or -1 if we don't know which
 // commit id we are on.
-func parseLogLine(ctx context.Context, s string, index *int, git *gitinfo.GitInfo) (*cacheEntry, error) {
+func parseLogLine(ctx context.Context, s string, index *int, vcs vcsinfo.VCS) (*cacheEntry, error) {
 	parts := strings.SplitN(s, " ", 4)
 	if len(parts) != 4 {
 		return nil, fmt.Errorf("Failed to parse parts of %q: %#v", s, parts)
@@ -130,7 +130,7 @@
 		return nil, fmt.Errorf("Can't parse timestamp %q: %s", ts, err)
 	}
 	if *index == -1 {
-		*index, err = git.IndexOf(ctx, hash)
+		*index, err = vcs.IndexOf(ctx, hash)
 		if err != nil {
 			return nil, fmt.Errorf("Failed to get index of %q: %s", hash, err)
 		}
@@ -151,9 +151,11 @@
 	defer timer.New("cid.warmCache time").Stop()
 	now := time.Now()
 
+	// TODO(jcgregorio) Remove entire cache once we switch to a BigTable backed vcsinfo.
+
 	// Extract ts, hash, author email, and subject from the git log.
 	since := now.Add(-365 * 24 * time.Hour).Format("2006-01-02")
-	log, err := c.git.LogArgs(ctx, "--since="+since, "--format=format:%ct %H %ae %s")
+	log, err := c.vcs.(*gitinfo.GitInfo).LogArgs(ctx, "--since="+since, "--format=format:%ct %H %ae %s")
 	if err != nil {
 		sklog.Errorf("Could not get log for --since=%q: %s", since, err)
 		return
@@ -164,7 +166,7 @@
 	var index int = -1
 	// Parse.
 	for _, s := range lines {
-		entry, err := parseLogLine(ctx, s, &index, c.git)
+		entry, err := parseLogLine(ctx, s, &index, c.vcs)
 		if err != nil {
 			sklog.Errorf("Failed to parse git log line %q: %s", s, err)
 			return
@@ -173,9 +175,9 @@
 	}
 }
 
-func New(ctx context.Context, git *gitinfo.GitInfo, gitRepoURL string) *CommitIDLookup {
+func New(ctx context.Context, vcs vcsinfo.VCS, gitRepoURL string) *CommitIDLookup {
 	cidl := &CommitIDLookup{
-		git:        git,
+		vcs:        vcs,
 		cache:      map[int]*cacheEntry{},
 		gitRepoURL: gitRepoURL,
 	}
@@ -202,7 +204,7 @@
 					Timestamp: entry.ts,
 				}
 			} else {
-				lc, err := c.git.ByIndex(ctx, cid.Offset)
+				lc, err := c.vcs.ByIndex(ctx, cid.Offset)
 				if err != nil {
 					return nil, fmt.Errorf("Failed to find match for cid %#v: %s", *cid, err)
 				}
diff --git a/perf/go/dataframe/async.go b/perf/go/dataframe/async.go
index a544cea..4a86167 100644
--- a/perf/go/dataframe/async.go
+++ b/perf/go/dataframe/async.go
@@ -19,6 +19,7 @@
 	"go.skia.org/infra/go/query"
 	"go.skia.org/infra/go/sklog"
 	"go.skia.org/infra/go/util"
+	"go.skia.org/infra/go/vcsinfo"
 	"go.skia.org/infra/go/vec32"
 	"go.skia.org/infra/perf/go/shortcut2"
 	"go.skia.org/infra/perf/go/types"
@@ -83,9 +84,9 @@
 	// request is read-only, it should not be modified.
 	request *FrameRequest
 
-	// git is for Git info. The value of the 'git' variable should not be
-	//   changed, but git is Go routine safe.
-	git *gitinfo.GitInfo
+	// vcs is for Git info. The value of the 'vcs' variable should not be
+	//   changed, but vcs is Go routine safe.
+	vcs vcsinfo.VCS
 
 	// dfBuilder builds DataFrame's.
 	dfBuilder DataFrameBuilder
@@ -106,7 +107,7 @@
 		numKeys = 1
 	}
 	ret := &FrameRequestProcess{
-		git:           fr.git,
+		vcs:           fr.vcs,
 		request:       req,
 		lastUpdate:    time.Now(),
 		state:         PROCESS_RUNNING,
@@ -124,7 +125,7 @@
 type RunningFrameRequests struct {
 	mutex sync.Mutex
 
-	git *gitinfo.GitInfo
+	vcs vcsinfo.VCS
 
 	dfBuilder DataFrameBuilder
 
@@ -133,9 +134,9 @@
 	inProcess map[string]*FrameRequestProcess
 }
 
-func NewRunningFrameRequests(git *gitinfo.GitInfo, dfBuilder DataFrameBuilder) *RunningFrameRequests {
+func NewRunningFrameRequests(vcs vcsinfo.VCS, dfBuilder DataFrameBuilder) *RunningFrameRequests {
 	fr := &RunningFrameRequests{
-		git:       git,
+		vcs:       vcs,
 		dfBuilder: dfBuilder,
 
 		inProcess: map[string]*FrameRequestProcess{},
@@ -290,10 +291,10 @@
 	}
 
 	if len(df.Header) == 0 {
-		df = NewHeaderOnly(p.git, begin, end, true)
+		df = NewHeaderOnly(p.vcs, begin, end, true)
 	}
 
-	resp, err := ResponseFromDataFrame(context.Background(), df, p.git, true, p.request.TZ)
+	resp, err := ResponseFromDataFrame(context.Background(), df, p.vcs, true, p.request.TZ)
 	if err != nil {
 		p.reportError(err, "Failed to get ticks or skps.")
 		return
@@ -307,11 +308,13 @@
 // getCommitTimesForFile returns a slice of Unix timestamps in seconds that are
 // the times that the given file changed in git between the given 'begin' and
 // 'end' hashes (inclusive).
-func getCommitTimesForFile(ctx context.Context, begin, end string, filename string, git *gitinfo.GitInfo) []int64 {
+func getCommitTimesForFile(ctx context.Context, begin, end string, filename string, vcs vcsinfo.VCS) []int64 {
 	ret := []int64{}
 
+	// TODO(jcgregorio): Replace with calls to Gerrit API, only used by the Skia instance of perf.
+
 	// Now query for all the changes to the skp version over the given range of commits.
-	log, err := git.LogFine(ctx, begin+"^", end, "--format=format:%ct", "--", filename)
+	log, err := vcs.(*gitinfo.GitInfo).LogFine(ctx, begin+"^", end, "--format=format:%ct", "--", filename)
 	if err != nil {
 		sklog.Errorf("Could not get skp log for %s..%s -- %q: %s", begin, end, filename, err)
 		return ret
@@ -330,23 +333,23 @@
 
 // getSkps returns the indices where the SKPs have been updated given
 // the ColumnHeaders.
-func getSkps(ctx context.Context, headers []*ColumnHeader, git *gitinfo.GitInfo) ([]int, error) {
+func getSkps(ctx context.Context, headers []*ColumnHeader, vcs vcsinfo.VCS) ([]int, error) {
 	// We have Offsets, which need to be converted to git hashes.
-	ci, err := git.ByIndex(ctx, int(headers[0].Offset))
+	ci, err := vcs.ByIndex(ctx, int(headers[0].Offset))
 	if err != nil {
 		return nil, fmt.Errorf("Could not find commit for index %d: %s", headers[0].Offset, err)
 	}
 	begin := ci.Hash
-	ci, err = git.ByIndex(ctx, int(headers[len(headers)-1].Offset))
+	ci, err = vcs.ByIndex(ctx, int(headers[len(headers)-1].Offset))
 	if err != nil {
 		return nil, fmt.Errorf("Could not find commit for index %d: %s", headers[len(headers)-1].Offset, err)
 	}
 	end := ci.Hash
 
 	// Now query for all the changes to the skp version over the given range of commits.
-	ts := getCommitTimesForFile(ctx, begin, end, "infra/bots/assets/skp/VERSION", git)
+	ts := getCommitTimesForFile(ctx, begin, end, "infra/bots/assets/skp/VERSION", vcs)
 	// Add in the changes to the old skp version over the given range of commits.
-	ts = append(ts, getCommitTimesForFile(ctx, begin, end, "SKP_VERSION", git)...)
+	ts = append(ts, getCommitTimesForFile(ctx, begin, end, "SKP_VERSION", vcs)...)
 
 	// Sort because they are in reverse order.
 	sort.Sort(util.Int64Slice(ts))
@@ -377,7 +380,7 @@
 // If truncate is true then the number of traces returned is limited.
 //
 // tz is the timezone, and can be the empty string if the default (Eastern) timezone is acceptable.
-func ResponseFromDataFrame(ctx context.Context, df *DataFrame, git *gitinfo.GitInfo, truncate bool, tz string) (*FrameResponse, error) {
+func ResponseFromDataFrame(ctx context.Context, df *DataFrame, vcs vcsinfo.VCS, truncate bool, tz string) (*FrameResponse, error) {
 	if len(df.Header) == 0 {
 		return nil, fmt.Errorf("No commits matched that time range.")
 	}
@@ -389,7 +392,7 @@
 	ticks := human.FlotTickMarks(ts, tz)
 
 	// Determine where SKP changes occurred.
-	skps, err := getSkps(ctx, df.Header, git)
+	skps, err := getSkps(ctx, df.Header, vcs)
 	if err != nil {
 		return nil, fmt.Errorf("Failed to load skps: %s", err)
 	}
diff --git a/perf/go/dataframe/fresh.go b/perf/go/dataframe/fresh.go
index 594ba5e..f88fc74 100644
--- a/perf/go/dataframe/fresh.go
+++ b/perf/go/dataframe/fresh.go
@@ -6,9 +6,9 @@
 	"sync"
 	"time"
 
-	"go.skia.org/infra/go/git/gitinfo"
 	"go.skia.org/infra/go/metrics2"
 	"go.skia.org/infra/go/sklog"
+	"go.skia.org/infra/go/vcsinfo"
 )
 
 // Refresher keeps a fresh DataFrame.
@@ -19,7 +19,7 @@
 	numTiles  int
 	period    time.Duration
 	dfBuilder DataFrameBuilder
-	git       *gitinfo.GitInfo
+	vcs       vcsinfo.VCS
 
 	mutex sync.Mutex // protects df.
 	df    *DataFrame
@@ -31,12 +31,12 @@
 // A non-nil error will be returned if the initial DataFrame cannot be
 // populated. I.e. if NewRefresher returns w/o error than the caller
 // can be assured that Get() will return a non-nil DataFrame.
-func NewRefresher(git *gitinfo.GitInfo, dfBuilder DataFrameBuilder, period time.Duration, numTiles int) (*Refresher, error) {
+func NewRefresher(vcs vcsinfo.VCS, dfBuilder DataFrameBuilder, period time.Duration, numTiles int) (*Refresher, error) {
 	ret := &Refresher{
 		dfBuilder: dfBuilder,
 		period:    period,
 		numTiles:  numTiles,
-		git:       git,
+		vcs:       vcs,
 	}
 	if err := ret.oneStep(); err != nil {
 		return nil, fmt.Errorf("Failed to build the initial DataFrame: %s", err)
@@ -46,7 +46,7 @@
 }
 
 func (f *Refresher) oneStep() error {
-	if err := f.git.Update(context.Background(), true, false); err != nil {
+	if err := f.vcs.Update(context.Background(), true, false); err != nil {
 		return err
 	}
 	newDf, err := f.dfBuilder.NewKeysOnly(f.numTiles)
diff --git a/perf/go/dryrun/dryrun.go b/perf/go/dryrun/dryrun.go
index 0a21697..acce789 100644
--- a/perf/go/dryrun/dryrun.go
+++ b/perf/go/dryrun/dryrun.go
@@ -12,10 +12,10 @@
 	"time"
 
 	"github.com/gorilla/mux"
-	"go.skia.org/infra/go/git/gitinfo"
 	"go.skia.org/infra/go/httputils"
 	"go.skia.org/infra/go/metrics2"
 	"go.skia.org/infra/go/sklog"
+	"go.skia.org/infra/go/vcsinfo"
 	"go.skia.org/infra/perf/go/alerts"
 	"go.skia.org/infra/perf/go/cid"
 	"go.skia.org/infra/perf/go/dataframe"
@@ -65,18 +65,18 @@
 type Requests struct {
 	cidl           *cid.CommitIDLookup
 	dfBuilder      dataframe.DataFrameBuilder
-	git            *gitinfo.GitInfo
+	vcs            vcsinfo.VCS
 	paramsProvider regression.ParamsetProvider // TODO build the paramset from dfBuilder.
 	mutex          sync.Mutex
 	inFlight       map[string]*Running
 }
 
-func New(cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, paramsProvider regression.ParamsetProvider, git *gitinfo.GitInfo) *Requests {
+func New(cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, paramsProvider regression.ParamsetProvider, vcs vcsinfo.VCS) *Requests {
 	ret := &Requests{
 		cidl:           cidl,
 		dfBuilder:      dfBuilder,
 		paramsProvider: paramsProvider,
-		git:            git,
+		vcs:            vcs,
 		inFlight:       map[string]*Running{},
 	}
 	// Start a go routine to clean up old dry runs.
@@ -168,7 +168,7 @@
 				}
 			}
 			end := time.Unix(int64(req.Domain.End), 0)
-			regression.RegressionsForAlert(ctx, &req.Config, d.paramsProvider(), cb, int(req.Domain.NumCommits), end, d.git, d.cidl, d.dfBuilder, nil)
+			regression.RegressionsForAlert(ctx, &req.Config, d.paramsProvider(), cb, int(req.Domain.NumCommits), end, d.vcs, d.cidl, d.dfBuilder, nil)
 			running.mutex.Lock()
 			defer running.mutex.Unlock()
 			running.Finished = true
diff --git a/perf/go/regression/alert.go b/perf/go/regression/alert.go
index 65375ea..08e69a1 100644
--- a/perf/go/regression/alert.go
+++ b/perf/go/regression/alert.go
@@ -4,10 +4,10 @@
 	"context"
 	"time"
 
-	"go.skia.org/infra/go/git/gitinfo"
 	"go.skia.org/infra/go/metrics2"
 	"go.skia.org/infra/go/paramtools"
 	"go.skia.org/infra/go/sklog"
+	"go.skia.org/infra/go/vcsinfo"
 	"go.skia.org/infra/perf/go/alerts"
 	"go.skia.org/infra/perf/go/cid"
 	"go.skia.org/infra/perf/go/dataframe"
@@ -16,7 +16,7 @@
 // RegressionsForAlert looks for regressions to the given alert over the last
 // 'numContinuous' commits with data and periodically calls
 // clusterResponseProcessor with the results of checking each commit.
-func RegressionsForAlert(ctx context.Context, cfg *alerts.Config, ps paramtools.ParamSet, clusterResponseProcessor ClusterResponseProcessor, numContinuous int, end time.Time, git *gitinfo.GitInfo, cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, stepProvider StepProvider) {
+func RegressionsForAlert(ctx context.Context, cfg *alerts.Config, ps paramtools.ParamSet, clusterResponseProcessor ClusterResponseProcessor, numContinuous int, end time.Time, vcs vcsinfo.VCS, cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, stepProvider StepProvider) {
 	queriesCounter := metrics2.GetCounter("perf_clustering_queries", nil)
 	sklog.Infof("About to cluster for: %#v", *cfg)
 	queries, err := cfg.QueriesFromParamset(ps)
@@ -44,7 +44,7 @@
 			N:           int32(numContinuous),
 			End:         end,
 		}
-		_, err := Run(ctx, req, git, cidl, dfBuilder, clusterResponseProcessor)
+		_, err := Run(ctx, req, vcs, cidl, dfBuilder, clusterResponseProcessor)
 		if err != nil {
 			sklog.Warningf("Failed while clustering %v %s", *req, err)
 			continue
diff --git a/perf/go/regression/async.go b/perf/go/regression/async.go
index b91fdfa..3ffc285 100644
--- a/perf/go/regression/async.go
+++ b/perf/go/regression/async.go
@@ -9,8 +9,8 @@
 	"sync"
 	"time"
 
-	"go.skia.org/infra/go/git/gitinfo"
 	"go.skia.org/infra/go/sklog"
+	"go.skia.org/infra/go/vcsinfo"
 	"go.skia.org/infra/go/vec32"
 	"go.skia.org/infra/perf/go/cid"
 	"go.skia.org/infra/perf/go/clustering2"
@@ -88,7 +88,7 @@
 type ClusterRequestProcess struct {
 	// These members are read-only, should not be modified.
 	request                  *ClusterRequest
-	git                      *gitinfo.GitInfo
+	vcs                      vcsinfo.VCS
 	iter                     DataFrameIterator
 	clusterResponseProcessor ClusterResponseProcessor
 
@@ -100,10 +100,10 @@
 	message    string             // Describes the current state of the process.
 }
 
-func newProcess(ctx context.Context, req *ClusterRequest, git *gitinfo.GitInfo, cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, clusterResponseProcessor ClusterResponseProcessor) (*ClusterRequestProcess, error) {
+func newProcess(ctx context.Context, req *ClusterRequest, vcs vcsinfo.VCS, cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, clusterResponseProcessor ClusterResponseProcessor) (*ClusterRequestProcess, error) {
 	ret := &ClusterRequestProcess{
 		request:                  req,
-		git:                      git,
+		vcs:                      vcs,
 		clusterResponseProcessor: clusterResponseProcessor,
 		response:                 []*ClusterResponse{},
 		lastUpdate:               time.Now(),
@@ -112,7 +112,7 @@
 	}
 	if req.Type == CLUSTERING_REQUEST_TYPE_SINGLE {
 		// TODO(jcgregorio) This is awkward and should go away in a future CL.
-		ret.iter = NewSingleDataFrameIterator(ret.progress, cidl, git, req, dfBuilder)
+		ret.iter = NewSingleDataFrameIterator(ret.progress, cidl, vcs, req, dfBuilder)
 	} else {
 		// Create a single large dataframe then chop it into 2*radius+1 length sub-dataframes in the iterator.
 		iter, err := NewDataFrameIterator(ctx, ret.progress, req, dfBuilder)
@@ -125,8 +125,8 @@
 	return ret, nil
 }
 
-func newRunningProcess(ctx context.Context, req *ClusterRequest, git *gitinfo.GitInfo, cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, clusterResponseProcessor ClusterResponseProcessor) (*ClusterRequestProcess, error) {
-	ret, err := newProcess(ctx, req, git, cidl, dfBuilder, clusterResponseProcessor)
+func newRunningProcess(ctx context.Context, req *ClusterRequest, vcs vcsinfo.VCS, cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, clusterResponseProcessor ClusterResponseProcessor) (*ClusterRequestProcess, error) {
+	ret, err := newProcess(ctx, req, vcs, cidl, dfBuilder, clusterResponseProcessor)
 	if err != nil {
 		return nil, err
 	}
@@ -139,7 +139,7 @@
 // Once a ClusterRequestProcess is complete the results will be kept in memory
 // for MAX_FINISHED_PROCESS_AGE before being deleted.
 type RunningClusterRequests struct {
-	git                *gitinfo.GitInfo
+	vcs                vcsinfo.VCS
 	cidl               *cid.CommitIDLookup
 	defaultInteresting float32 // The threshold to control if a cluster is considered interesting.
 	dfBuilder          dataframe.DataFrameBuilder
@@ -151,9 +151,9 @@
 }
 
 // NewRunningClusterRequests return a new RunningClusterRequests.
-func NewRunningClusterRequests(git *gitinfo.GitInfo, cidl *cid.CommitIDLookup, interesting float32, dfBuilder dataframe.DataFrameBuilder) *RunningClusterRequests {
+func NewRunningClusterRequests(vcs vcsinfo.VCS, cidl *cid.CommitIDLookup, interesting float32, dfBuilder dataframe.DataFrameBuilder) *RunningClusterRequests {
 	fr := &RunningClusterRequests{
-		git:                git,
+		vcs:                vcs,
 		cidl:               cidl,
 		inProcess:          map[string]*ClusterRequestProcess{},
 		defaultInteresting: interesting,
@@ -203,7 +203,7 @@
 	}
 	clusterResponseProcessor := func(resps []*ClusterResponse) {}
 	if _, ok := fr.inProcess[id]; !ok {
-		proc, err := newRunningProcess(ctx, req, fr.git, fr.cidl, fr.dfBuilder, clusterResponseProcessor)
+		proc, err := newRunningProcess(ctx, req, fr.vcs, fr.cidl, fr.dfBuilder, clusterResponseProcessor)
 		if err != nil {
 			return "", err
 		}
@@ -386,7 +386,7 @@
 		}
 
 		df.TraceSet = types.TraceSet{}
-		frame, err := dataframe.ResponseFromDataFrame(ctx, df, p.git, false, p.request.TZ)
+		frame, err := dataframe.ResponseFromDataFrame(ctx, df, p.vcs, false, p.request.TZ)
 		if err != nil {
 			p.reportError(err, "Failed to convert DataFrame to FrameResponse.")
 			return
diff --git a/perf/go/regression/continuous.go b/perf/go/regression/continuous.go
index 4a179c6..d8887a3 100644
--- a/perf/go/regression/continuous.go
+++ b/perf/go/regression/continuous.go
@@ -7,11 +7,11 @@
 	"sync"
 	"time"
 
-	"go.skia.org/infra/go/git/gitinfo"
 	"go.skia.org/infra/go/metrics2"
 	"go.skia.org/infra/go/paramtools"
 	"go.skia.org/infra/go/query"
 	"go.skia.org/infra/go/sklog"
+	"go.skia.org/infra/go/vcsinfo"
 	"go.skia.org/infra/perf/go/alerts"
 	"go.skia.org/infra/perf/go/cid"
 	"go.skia.org/infra/perf/go/dataframe"
@@ -39,7 +39,7 @@
 // Continuous is used to run clustering on the last numCommits commits and
 // look for regressions.
 type Continuous struct {
-	git            *gitinfo.GitInfo
+	vcs            vcsinfo.VCS
 	cidl           *cid.CommitIDLookup
 	store          *Store
 	numCommits     int // Number of recent commits to do clustering over.
@@ -58,9 +58,9 @@
 //   provider - Produces the slice of alerts.Config's that determine the clustering to perform.
 //   numCommits - The number of commits to run the clustering over.
 //   radius - The number of commits on each side of a commit to include when clustering.
-func NewContinuous(git *gitinfo.GitInfo, cidl *cid.CommitIDLookup, provider ConfigProvider, store *Store, numCommits int, radius int, notifier *notify.Notifier, paramsProvider ParamsetProvider, dfBuilder dataframe.DataFrameBuilder) *Continuous {
+func NewContinuous(vcs vcsinfo.VCS, cidl *cid.CommitIDLookup, provider ConfigProvider, store *Store, numCommits int, radius int, notifier *notify.Notifier, paramsProvider ParamsetProvider, dfBuilder dataframe.DataFrameBuilder) *Continuous {
 	return &Continuous{
-		git:            git,
+		vcs:            vcs,
 		cidl:           cidl,
 		store:          store,
 		numCommits:     numCommits,
@@ -220,7 +220,7 @@
 			if cfg.Radius == 0 {
 				cfg.Radius = c.radius
 			}
-			RegressionsForAlert(ctx, cfg, c.paramsProvider(), clusterResponseProcessor, c.numCommits, time.Time{}, c.git, c.cidl, c.dfBuilder, c.setCurrentStep)
+			RegressionsForAlert(ctx, cfg, c.paramsProvider(), clusterResponseProcessor, c.numCommits, time.Time{}, c.vcs, c.cidl, c.dfBuilder, c.setCurrentStep)
 			configsCounter.Inc(1)
 		}
 		clusteringLatency.Stop()
diff --git a/perf/go/regression/sync.go b/perf/go/regression/sync.go
index 70ea8c6..00e6dbf 100644
--- a/perf/go/regression/sync.go
+++ b/perf/go/regression/sync.go
@@ -4,7 +4,7 @@
 	"context"
 	"fmt"
 
-	"go.skia.org/infra/go/git/gitinfo"
+	"go.skia.org/infra/go/vcsinfo"
 	"go.skia.org/infra/perf/go/cid"
 	"go.skia.org/infra/perf/go/dataframe"
 )
@@ -13,8 +13,8 @@
 type ClusterResponseProcessor func([]*ClusterResponse)
 
 // Run takes a ClusterRequest and runs it to completion before returning the results.
-func Run(ctx context.Context, req *ClusterRequest, git *gitinfo.GitInfo, cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, clusterResponseProcessor ClusterResponseProcessor) ([]*ClusterResponse, error) {
-	proc, err := newProcess(ctx, req, git, cidl, dfBuilder, clusterResponseProcessor)
+func Run(ctx context.Context, req *ClusterRequest, vcs vcsinfo.VCS, cidl *cid.CommitIDLookup, dfBuilder dataframe.DataFrameBuilder, clusterResponseProcessor ClusterResponseProcessor) ([]*ClusterResponse, error) {
+	proc, err := newProcess(ctx, req, vcs, cidl, dfBuilder, clusterResponseProcessor)
 	if err != nil {
 		return nil, fmt.Errorf("Failed to start new clustering process: %s", err)
 	}
diff --git a/perf/go/skiaperf/main.go b/perf/go/skiaperf/main.go
index 2f32aab..f62b818 100644
--- a/perf/go/skiaperf/main.go
+++ b/perf/go/skiaperf/main.go
@@ -37,6 +37,7 @@
 	"go.skia.org/infra/go/sharedconfig"
 	"go.skia.org/infra/go/sklog"
 	"go.skia.org/infra/go/util"
+	"go.skia.org/infra/go/vcsinfo"
 	"go.skia.org/infra/perf/go/activitylog"
 	"go.skia.org/infra/perf/go/alertfilter"
 	"go.skia.org/infra/perf/go/alerts"
@@ -75,7 +76,7 @@
 var (
 	activityHandlerPath = regexp.MustCompile(`/activitylog/([0-9]*)$`)
 
-	git *gitinfo.GitInfo = nil
+	vcs vcsinfo.VCS
 
 	cidl *cid.CommitIDLookup = nil
 )
@@ -262,7 +263,7 @@
 	*gitRepoUrl = btConfig.GitUrl
 
 	sklog.Info("About to clone repo.")
-	git, err = gitinfo.CloneOrUpdate(ctx, *gitRepoUrl, *gitRepoDir, false)
+	vcs, err = gitinfo.CloneOrUpdate(ctx, *gitRepoUrl, *gitRepoDir, false)
 	if err != nil {
 		sklog.Fatal(err)
 	}
@@ -273,13 +274,13 @@
 	if err != nil {
 		sklog.Fatalf("Failed to open trace store: %s", err)
 	}
-	dfBuilder = dfbuilder.NewDataFrameBuilderFromBTTS(git, traceStore)
+	dfBuilder = dfbuilder.NewDataFrameBuilderFromBTTS(vcs, traceStore)
 
 	sklog.Info("About to build cidl.")
-	cidl = cid.New(ctx, git, *gitRepoUrl)
+	cidl = cid.New(ctx, vcs, *gitRepoUrl)
 
 	sklog.Info("About to build dataframe refresher.")
-	freshDataFrame, err = dataframe.NewRefresher(git, dfBuilder, 15*time.Minute, *numTilesRefresher)
+	freshDataFrame, err = dataframe.NewRefresher(vcs, dfBuilder, 15*time.Minute, *numTilesRefresher)
 	if err != nil {
 		sklog.Fatalf("Failed to build the dataframe Refresher: %s", err)
 	}
@@ -299,19 +300,19 @@
 		notifier = notify.New(notify.NoEmail{}, *subdomain)
 	}
 
-	frameRequests = dataframe.NewRunningFrameRequests(git, dfBuilder)
-	clusterRequests = regression.NewRunningClusterRequests(git, cidl, float32(*interesting), dfBuilder)
+	frameRequests = dataframe.NewRunningFrameRequests(vcs, dfBuilder)
+	clusterRequests = regression.NewRunningClusterRequests(vcs, cidl, float32(*interesting), dfBuilder)
 	regStore = regression.NewStore()
 	configProvider = newAlertsConfigProvider(clusterAlgo)
 	paramsProvider := newParamsetProvider(freshDataFrame)
-	dryrunRequests = dryrun.New(cidl, dfBuilder, paramsProvider, git)
+	dryrunRequests = dryrun.New(cidl, dfBuilder, paramsProvider, vcs)
 
 	if *doClustering {
 		go func() {
 			for i := 0; i < *numContinuousParallel; i++ {
 				// Start running continuous clustering looking for regressions.
 				time.Sleep(time.Minute)
-				c := regression.NewContinuous(git, cidl, configProvider, regStore, *numContinuous, *radius, notifier, paramsProvider, dfBuilder)
+				c := regression.NewContinuous(vcs, cidl, configProvider, regStore, *numContinuous, *radius, notifier, paramsProvider, dfBuilder)
 				continuous = append(continuous, c)
 				go c.Run(context.Background())
 			}
@@ -438,7 +439,7 @@
 			end = rr.End
 		}
 	}
-	df := dataframe.NewHeaderOnly(git, time.Unix(begin, 0), time.Unix(end, 0), false)
+	df := dataframe.NewHeaderOnly(vcs, time.Unix(begin, 0), time.Unix(end, 0), false)
 
 	found := false
 	cids := []*cid.CommitID{}
@@ -780,21 +781,18 @@
 	query := r.Form
 	hash := mux.Vars(r)["hash"]
 	dest := mux.Vars(r)["dest"]
-	index, err := git.IndexOf(ctx, hash)
+	index, err := vcs.IndexOf(ctx, hash)
 	if err != nil {
 		httputils.ReportError(w, r, err, "Could not look up git hash.")
 		return
 	}
-	last := git.LastN(ctx, 1)
+	last := vcs.LastNIndex(1)
 	if len(last) != 1 {
-		httputils.ReportError(w, r, fmt.Errorf("gitinfo.LastN(1) returned 0 hashes."), "Failed to find last hash.")
+		httputils.ReportError(w, r, fmt.Errorf("VCS.LastN(1) returned 0 hashes."), "Failed to find last hash.")
 		return
 	}
-	lastIndex, err := git.IndexOf(ctx, last[0])
-	if err != nil {
-		httputils.ReportError(w, r, err, "Could not look up last git hash.")
-		return
-	}
+	lastIndex := last[0].Index
+
 	delta := config.GOTO_RANGE
 	// If redirecting to the Triage page then always show just a single commit.
 	if dest == "t" {
@@ -1063,7 +1061,7 @@
 	// Get a list of commits for the range.
 	var ids []*cid.CommitID
 	if rr.Subset == regression.ALL_SUBSET {
-		indexCommits := git.Range(time.Unix(rr.Begin, 0), time.Unix(rr.End, 0))
+		indexCommits := vcs.Range(time.Unix(rr.Begin, 0), time.Unix(rr.End, 0))
 		ids = make([]*cid.CommitID, 0, len(indexCommits))
 		for _, indexCommit := range indexCommits {
 			ids = append(ids, &cid.CommitID{
@@ -1265,7 +1263,7 @@
 		return
 	}
 	sklog.Infof("ShiftRequest: %#v", &sr)
-	commits := git.Range(time.Unix(sr.Begin, 0), time.Unix(sr.End, 0))
+	commits := vcs.Range(time.Unix(sr.Begin, 0), time.Unix(sr.End, 0))
 	if len(commits) == 0 {
 		httputils.ReportError(w, r, fmt.Errorf("No commits found in range."), "No commits found in range.")
 		return
@@ -1274,16 +1272,16 @@
 	if sr.RequestType == dataframe.REQUEST_COMPACT {
 		numCommits -= sr.BeginOffset
 	}
-	beginCommit, err := git.ByIndex(ctx, commits[0].Index+sr.BeginOffset)
+	beginCommit, err := vcs.ByIndex(ctx, commits[0].Index+sr.BeginOffset)
 	if err != nil {
 		httputils.ReportError(w, r, err, "Scrolled too far.")
 		return
 	}
 	var endCommitTs time.Time
-	endCommit, err := git.ByIndex(ctx, commits[len(commits)-1].Index+sr.EndOffset)
+	endCommit, err := vcs.ByIndex(ctx, commits[len(commits)-1].Index+sr.EndOffset)
 	if err != nil {
 		// We went too far, so just use the last index.
-		commits := git.LastNIndex(1)
+		commits := vcs.LastNIndex(1)
 		if len(commits) == 0 {
 			httputils.ReportError(w, r, err, "Scrolled too far.")
 			return