[goldctl] Add fetching hashes and expectations file from Gold
- Add fetching of hashes and expectations file to goldctl
- Refactor endpoints to fetch expectations on backend
- Factor out Baseliner into a separate type in preparation of
more changes to manage expectations on the backend
Bug: skia:8293
Change-Id: Icdb72f61dda50de410c213fe2ad6cec3766460ec
Reviewed-on: https://skia-review.googlesource.com/c/173918
Commit-Queue: Stephan Altmueller <stephana@google.com>
Reviewed-by: Kevin Lubick <kjlubick@google.com>
diff --git a/gold-client/cmd/goldctl/cmd_imgtest.go b/gold-client/cmd/goldctl/cmd_imgtest.go
index a1273f8..45b6418 100644
--- a/gold-client/cmd/goldctl/cmd_imgtest.go
+++ b/gold-client/cmd/goldctl/cmd_imgtest.go
@@ -18,7 +18,7 @@
flagIssueID string
flagPatchsetID string
flagJobID string
- flagInstandID string
+ flagInstanceID string
flagWorkDir string
flagPassFailStep bool
flagFailureFile string
@@ -95,7 +95,7 @@
}
func (i *imgTestEnv) addCommonFlags(cmd *cobra.Command, optional bool) {
- cmd.Flags().StringVarP(&i.flagInstandID, "instance", "", "", "ID of the Gold instance.")
+ cmd.Flags().StringVarP(&i.flagInstanceID, "instance", "", "", "ID of the Gold instance.")
cmd.Flags().StringVarP(&i.flagWorkDir, "work-dir", "", "", "Temporary work directory")
cmd.Flags().BoolVarP(&i.flagPassFailStep, "passfail", "", false, "Whether the 'add' call returns a pass/fail for each test.")
@@ -139,9 +139,7 @@
BuildBucketID: jobID,
}
- up, err := goldclient.NewUploadResults(gr, i.flagInstandID, i.flagPassFailStep, i.flagWorkDir)
- ifErrLogExit(cmd, err)
- goldClient, err := goldclient.NewCloudClient(up)
+ goldClient, err := goldclient.NewCloudClient(i.flagWorkDir, i.flagInstanceID, i.flagPassFailStep, gr)
ifErrLogExit(cmd, err)
pass, err := goldClient.Test(i.flagTestName, i.flagPNGFile)
diff --git a/gold-client/go/goldclient/goldclient.go b/gold-client/go/goldclient/goldclient.go
index 4dd6ef8..cd0fc6e 100644
--- a/gold-client/go/goldclient/goldclient.go
+++ b/gold-client/go/goldclient/goldclient.go
@@ -1,185 +1,218 @@
package goldclient
import (
+ "bufio"
"bytes"
"crypto/md5"
"encoding/json"
"fmt"
"image/png"
"io/ioutil"
+ "net/http"
"os"
- "os/exec"
"path/filepath"
+ "regexp"
"strconv"
+ "strings"
"time"
- "go.skia.org/infra/go/sklog"
+ "go.skia.org/infra/go/fileutil"
+ "go.skia.org/infra/go/httputils"
+ "go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/util"
"go.skia.org/infra/golden/go/diff"
"go.skia.org/infra/golden/go/jsonio"
"go.skia.org/infra/golden/go/types"
+ "go.skia.org/infra/golden/go/web"
)
const (
resultPrefix = "dm-json-v1"
imagePrefix = "dm-images-v1"
- resultFileNameTmpl = "dm-%s.json"
- tempFileName = "dm.json"
+ knownHashesURLPath = "_/hashes"
+
+ resultStateFile = "result-state.json"
+ jsonTempFileName = "dm.json"
+ fetchTempFileName = "hashes.txt"
)
+// md5Regexp is used to check whether strings are MD5 hashes.
+var md5Regexp = regexp.MustCompile(`^[a-f0-9]{32}$`)
+
+// GoldClient is the uniform interface to communicate with the Gold service.
type GoldClient interface {
- SetConfig(config interface{}) error
+ // Test adds a test result to the current testrun. If the GoldClient is configured to
+ // return PASS/FAIL for each test, the returned boolean indicates whether the test passed
+ // comparison with the expectations. An error is only returned if there was a technical problem
+ // in processing the test.
Test(name string, imgFileName string) (bool, error)
}
-type UploadResults struct {
- // Extend the GoldResults struct with some meta data about uploading.
- results *jsonio.GoldResults
-
- perTestPassFail bool
- instanceID string
- workDir string
-}
-
-func NewUploadResults(results *jsonio.GoldResults, instanceID string, perTestPassFail bool, workDir string) (*UploadResults, error) {
- ret := &UploadResults{
- results: results,
- perTestPassFail: perTestPassFail,
- instanceID: instanceID,
- workDir: workDir,
- }
-
- return ret, nil
-}
-
-func (u *UploadResults) merge(right *UploadResults) {
- if u.results == nil {
- u.results = right.results
- }
- if u.instanceID == "" {
- u.instanceID = right.instanceID
- }
- if !u.perTestPassFail {
- u.perTestPassFail = right.perTestPassFail
- }
-}
-
-func (u *UploadResults) getResultFilePath() string {
- now := time.Now().UTC()
- year, month, day := now.Date()
- hour := now.Hour()
- fileName := fmt.Sprintf(resultFileNameTmpl, strconv.FormatInt(now.UnixNano()/int64(time.Millisecond), 10))
- path := fmt.Sprintf("%s/%04d/%02d/%02d/%02d/%s", resultPrefix, year, month, day, hour, fileName)
-
- if u.results.Issue > 0 {
- path = "trybot/" + path
- }
-
- return path
-}
-
-func (u *UploadResults) getImagePath(imgHash string) string {
- return fmt.Sprintf("%s/%s.png", imagePrefix, imgHash)
-}
-
-// Implement the GoldClient interface for a remote Gold server.
+// cloudClient implements the GoldClient interface for the remote Gold service.
type cloudClient struct {
- uploadResults *UploadResults
- ready bool
- goldURL string
- bucket string
- knownHashes util.StringSet
- expectations types.TestExp
+ // workDir is a temporary directory that has to exist between related calls
+ workDir string
+
+ // resultState keeps track of the all the information to generate and upload a valid result.
+ resultState *resultState
+
+ // freshState indicates that the instance was created from scratch, not loaded from disk.
+ freshState bool
+
+ // ready caches the result of the isReady call so we avoid duplicate work.
+ ready bool
+ httpClient *http.Client
}
-func NewCloudClient(results *UploadResults) (GoldClient, error) {
- ret := &cloudClient{
- uploadResults: &UploadResults{},
+// NewCloudClient returns an implementation of the GoldClient that relies on the Gold service.
+// Arguments:
+// - workDir : is a temporary work directory that needs to be available during the entire
+// testrun (over multiple calls to 'Test')
+// - instanceID: is the id of the Gold instance.
+// - passFailStep: indicates whether each individual call to Test needs to return pass fail.
+// - goldResults: A populated instance of jsonio.GoldResults that contains configuration
+// shared by all tests.
+//
+// If a new instance is created for each call to Test, the arguments of the first call are
+// preserved. They are cached in a JSON file in the work directory.
+func NewCloudClient(workDir, instanceID string, passFailStep bool, goldResult *jsonio.GoldResults) (GoldClient, error) {
+ // Make sure the workdir was given and exists.
+ if workDir == "" {
+ return nil, skerr.Fmt("No 'workDir' provided to NewCloudClient")
}
- if err := ret.SetConfig(results); err != nil {
- return nil, sklog.FmtErrorf("Error initializing result in Cloud GoldClient: %s", err)
+ workDir, err := filepath.Abs(workDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(stephana): When we add authentication via a service account this needs to be
+ // be triggered by an argument to this function or a config flag of some sort.
+
+ // Make sure 'gsutil' is on the PATH.
+ if !gsutilAvailable() {
+ return nil, skerr.Fmt("Unable to find 'gsutil' on the PATH")
+ }
+
+ if !fileutil.FileExists(workDir) {
+ return nil, fmt.Errorf("Workdir path %q does not exist", workDir)
+ }
+
+ ret := &cloudClient{
+ workDir: workDir,
+ httpClient: httputils.DefaultClientConfig().Client(),
+ }
+
+ if err := ret.initResultState(instanceID, passFailStep, goldResult); err != nil {
+ return nil, skerr.Fmt("Error initializing result in cloud GoldClient: %s", err)
}
return ret, nil
}
-func (c *cloudClient) SetConfig(config interface{}) error {
- // If we are ready, there is nothing todo here.
+// Test implements the GoldClient interface.
+func (c *cloudClient) Test(name string, imgFileName string) (bool, error) {
+ passed, err := c.addTest(name, imgFileName)
+
+ // If there was no error and this is new instance then save the resultState for the next call.
+ if err == nil && c.freshState {
+ if err := c.resultState.save(c.getResultStateFile()); err != nil {
+ return false, err
+ }
+ }
+ return passed, err
+}
+
+// addTest adds a test to results. If perTestPassFail is true it will also upload the result.
+func (c *cloudClient) addTest(name string, imgFileName string) (bool, error) {
+ if err := c.isReady(); err != nil {
+ return false, skerr.Fmt("Unable to process test result. Cloud Gold Client not ready: %s", err)
+ }
+
+ // Load the PNG from disk and hash it.
+ _, imgHash, err := loadAndHashImage(imgFileName)
+ if err != nil {
+ return false, err
+ }
+
+ // Check against known hashes and upload if needed.
+ if !c.resultState.KnownHashes[imgHash] {
+ gcsImagePath := c.resultState.getGCSImagePath(imgHash)
+ if err := gsutilCopy(imgFileName, prefixGCS(gcsImagePath)); err != nil {
+ return false, skerr.Fmt("Error uploading image: %s", err)
+ }
+ }
+
+ // Add the result of this test.
+ c.addResult(name, imgHash)
+
+ // At this point the result should be correct for uploading.
+ if _, err := c.resultState.GoldResults.Validate(false); err != nil {
+ return false, err
+ }
+
+ // If we do per test pass/fail then upload the result and compare it to the baseline.
+ if c.resultState.PerTestPassFail {
+ localFileName := filepath.Join(c.workDir, jsonTempFileName)
+ if err := gsUtilUploadJson(c.resultState.GoldResults, localFileName, c.resultState.getResultFilePath()); err != nil {
+ return false, err
+ }
+ return c.resultState.Expectations[name][imgHash] == types.POSITIVE, nil
+ }
+
+ // If we don't do per-test pass/fail then return true.
+ return true, nil
+}
+
+// initResultState assembles the information that needs to be uploaded based on previous calls
+// to the function and new arguments.
+func (c *cloudClient) initResultState(instanceID string, passFailStep bool, goldResult *jsonio.GoldResults) error {
+ // Load the state from the workdir.
+ var err error
+ c.resultState, err = loadStateFromJson(c.getResultStateFile())
+ if err != nil {
+ return err
+ }
+
+ // If we are ready that means we have loaded the resultState from the temporary directory.
+ if err := c.isReady(); err == nil {
+ return nil
+ }
+
+ // Create a new instance of result state. Setting freshState to true indicates that this needs
+ // to be stored to disk once a test has been added successfully.
+ c.resultState, err = newResultState(goldResult, passFailStep, instanceID, c.workDir, c.httpClient)
+ if err != nil {
+ return err
+ }
+
+ c.freshState = true
+ return nil
+}
+
+// isReady returns true if the instance is ready to accept test results (all necessary info has been
+// configured)
+func (c *cloudClient) isReady() error {
if c.ready {
return nil
}
- // Make sure we have an instand of UploadResults.
- resultConf, ok := config.(*UploadResults)
- if !ok {
- return sklog.FmtErrorf("Provided config is not an instance of *UploadResults")
- }
- c.uploadResults.merge(resultConf)
-
- // From the instance ID load Derive the Gold URL and the bucket from the instance ID.
- if err := c.processInstanceID(resultConf.instanceID); err != nil {
- return err
+ // if resultState hasn't been set yet, then we are simply not ready.
+ if c.resultState == nil {
+ return skerr.Fmt("No result state object available")
}
- // TODO: Make sure the GoldResult instance is set up correctly.
- if _, err := c.uploadResults.results.Validate(true); err != nil {
- return sklog.FmtErrorf("Invalid GoldResults set. Missing fields: %s", err)
+ // Check if the GoldResults instance is complete once results are added.
+ if _, err := c.resultState.GoldResults.Validate(true); err != nil {
+ return skerr.Fmt("Gold results fields invalid: %s", err)
}
c.ready = true
return nil
}
-func (c *cloudClient) processInstanceID(instanceID string) error {
- // TODO(stephana): Move the URLs and deriving the bucket to a central place in the backend
- // or get rid of the bucket entirely and expose an upload URL (requires authentication)
-
- // Derive and set the GoldURL and the upload bucket.
- c.goldURL = fmt.Sprintf("https://%s-gold.skia.org", instanceID)
- c.bucket = fmt.Sprintf("skia-gold-%s", instanceID)
-
- // TODO(stephana): Fetch the known hashes (may be empty, but should not fail).
- c.knownHashes = util.StringSet{}
-
- // TODO(stephana): Fetch the baseline (may be empty but should not fail).
- c.expectations = types.TestExp{}
-
- return nil
-}
-
-func (c *cloudClient) Test(name string, imgFileName string) (bool, error) {
- if !c.ready {
- return false, sklog.FmtErrorf("Unable to process test result. Cloud Gold Client uninitialized. Call SetConfig before this call.")
- }
-
- // Load the PNG from disk and hash it.
- _, imgHash, err := loadAndHashFile(imgFileName)
- if err != nil {
- return false, err
- }
-
- // Check against known hashes and upload if needed.
- if !c.knownHashes[imgHash] {
- if err := c.gsUtilUploadImage(imgFileName, imgHash); err != nil {
- return false, sklog.FmtErrorf("Error uploading image: %s", err)
- }
- }
-
- // Upload the result of this test.
- c.addResult(name, imgHash)
- if err := c.gsUtilUploadResultsFile(); err != nil {
- return false, sklog.FmtErrorf("Error uploading result file: %s", err)
- }
-
- // If we do per test pass/fail then compare to the baseline and return accordingly
- if c.uploadResults.perTestPassFail {
- // Check if this is positive in the expectations.
- // TODO(stephana): Better define semantics of expecations.
- return c.expectations[name][imgHash] == types.POSITIVE, nil
- }
-
- // If we don't do per-test pass/fail then return true.
- return true, nil
+// getResultStateFile returns the name of the temporary file where the state is cached as JSON
+func (c *cloudClient) getResultStateFile() string {
+ return filepath.Join(c.workDir, resultStateFile)
}
func (c *cloudClient) addResult(name, imgHash string) {
@@ -188,62 +221,20 @@
Digest: imgHash,
Key: map[string]string{types.PRIMARY_KEY_FIELD: name},
- // TODO(stephana): check if the backend still relies on this. s
+ // TODO(stephana): check if the backend still relies on this.
Options: map[string]string{"ext": "png"},
}
// TODO(stephana): Make the corpus field an option.
- if _, ok := c.uploadResults.results.Key[types.CORPUS_FIELD]; !ok {
- newResult.Key[types.CORPUS_FIELD] = c.uploadResults.instanceID
+ if _, ok := c.resultState.GoldResults.Key[types.CORPUS_FIELD]; !ok {
+ newResult.Key[types.CORPUS_FIELD] = c.resultState.InstanceID
}
- c.uploadResults.results.Results = append(c.uploadResults.results.Results, newResult)
+ c.resultState.GoldResults.Results = append(c.resultState.GoldResults.Results, newResult)
}
-func (c *cloudClient) gsUtilUploadResultsFile() error {
- localFileName := filepath.Join(c.uploadResults.workDir, tempFileName)
- jsonBytes, err := json.MarshalIndent(c.uploadResults.results, "", " ")
- if err != nil {
- return err
- }
-
- if err := ioutil.WriteFile(localFileName, jsonBytes, 0600); err != nil {
- return err
- }
-
- // Get the path to upload to
- objPath := c.uploadResults.getResultFilePath()
- return c.gsUtilCopy(localFileName, objPath)
-}
-
-func (c *cloudClient) gsUtilCopy(srcFile, targetPath string) error {
- srcFile, err := filepath.Abs(srcFile)
- if err != nil {
- return err
- }
-
- // runCmd := &exec.Command{
- // Name: "gsutil",
- // Args: []string{"cp", srcFile, fmt.Sprintf("gs://%s/%s", c.bucket, targetPath)},
- // Timeout: 5 * time.Second,
- // CombinedOutput: &combinedBuf,
- // Verbose: exec.Silent,
- // }
-
- runCmd := exec.Command("gsutil", "cp", srcFile, fmt.Sprintf("gs://%s/%s", c.bucket, targetPath))
- outBytes, err := runCmd.CombinedOutput()
- if err != nil {
- return sklog.FmtErrorf("Error running gsutil. Got output \n%s\n and error: %s", outBytes, err)
- }
-
- return nil
-}
-
-func (c *cloudClient) gsUtilUploadImage(imagePath string, imgHash string) error {
- objPath := c.uploadResults.getImagePath(imgHash)
- return c.gsUtilCopy(imagePath, objPath)
-}
-
-func loadAndHashFile(fileName string) ([]byte, string, error) {
+// loadAndHashImage loads an image from disk and hashes the internal Pixel buffer. It returns
+// the bytes of the encoded image and the MD5 hash as hex encoded string.
+func loadAndHashImage(fileName string) ([]byte, string, error) {
// Load the image
reader, err := os.Open(fileName)
if err != nil {
@@ -253,14 +244,195 @@
imgBytes, err := ioutil.ReadAll(reader)
if err != nil {
- return nil, "", sklog.FmtErrorf("Error loading file %s: %s", fileName, err)
+ return nil, "", skerr.Fmt("Error loading file %s: %s", fileName, err)
}
img, err := png.Decode(bytes.NewBuffer(imgBytes))
if err != nil {
- return nil, "", sklog.FmtErrorf("Error decoding PNG in file %s: %s", fileName, err)
+ return nil, "", skerr.Fmt("Error decoding PNG in file %s: %s", fileName, err)
}
nrgbaImg := diff.GetNRGBA(img)
md5Hash := fmt.Sprintf("%x", md5.Sum(nrgbaImg.Pix))
return imgBytes, md5Hash, nil
}
+
+// resultState is an internal container for all information to upload results
+// to Gold, including the jsonio.GoldResult structure itself.
+type resultState struct {
+ // Extend the GoldResults struct with some meta data about uploading.
+ GoldResults *jsonio.GoldResults
+ PerTestPassFail bool
+ InstanceID string
+ GoldURL string
+ Bucket string
+ KnownHashes util.StringSet
+ Expectations types.TestExp
+
+ // not saved as state
+ httpClient *http.Client
+ workDir string
+}
+
+// newResultState creates a new instance resultState and downloads the relevant files from Gold.
+func newResultState(goldResult *jsonio.GoldResults, passFailStep bool, instanceID, workDir string, httpClient *http.Client) (*resultState, error) {
+
+ // TODO(stephana): Move deriving the URLs and the bucket to a central place in the backend
+ // or get rid of the bucket entirely and expose an upload URL (requires authentication)
+
+ ret := &resultState{
+ GoldResults: goldResult,
+ PerTestPassFail: passFailStep,
+ InstanceID: instanceID,
+ GoldURL: fmt.Sprintf("https://%s-gold.skia.org", instanceID),
+ Bucket: fmt.Sprintf("skia-gold-%s", instanceID),
+ workDir: workDir,
+ }
+
+ if err := ret.loadKnownHashes(); err != nil {
+ return nil, err
+ }
+
+ // TODO(stephana): Fetch the baseline (may be empty but should not fail).
+ if err := ret.loadExpectations(); err != nil {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+// loadStateFromJson loads a serialization of a resultState instance that was previously written
+// via the save method.
+func loadStateFromJson(fileName string) (*resultState, error) {
+ // If the state is not on disk, we return nil, indicating that a new resultState has to be created
+ if !fileutil.FileExists(fileName) {
+ return nil, nil
+ }
+
+ jsonBytes, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &resultState{}
+ if err := json.Unmarshal(jsonBytes, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// save serializes this instance to JSON and writes it to the given file.
+func (r *resultState) save(fileName string) error {
+ jsonBytes, err := json.Marshal(r)
+ if err != nil {
+ return skerr.Fmt("Error serializing resultState to JSON: %s", err)
+ }
+
+ if err := ioutil.WriteFile(fileName, jsonBytes, 0644); err != nil {
+ return skerr.Fmt("Error writing resultState to %s: %s", fileName, err)
+ }
+ return nil
+}
+
+// loadKnownHashes loads the list of known hashes from the Gold instance.
+func (r *resultState) loadKnownHashes() error {
+ r.KnownHashes = util.StringSet{}
+
+ // Fetch the known hashes via http
+ hashesURL := fmt.Sprintf("%s/%s", r.GoldURL, knownHashesURLPath)
+ resp, err := r.httpClient.Get(hashesURL)
+ if err != nil {
+ return skerr.Fmt("Error retrieving known hashes file: %s", err)
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ return nil
+ }
+
+ // Retrieve the body and parse the list of known hashes.
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return skerr.Fmt("Error reading body of HTTP response: %s", err)
+ }
+ if err := resp.Body.Close(); err != nil {
+ return skerr.Fmt("Error closing HTTP response: %s", err)
+ }
+
+ scanner := bufio.NewScanner(bytes.NewBuffer(body))
+ for scanner.Scan() {
+ // Ignore empty lines and lines that are not valid MD5 hashes
+ line := bytes.TrimSpace(scanner.Bytes())
+ if len(line) > 0 && md5Regexp.Match(line) {
+ r.KnownHashes[string(line)] = true
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return skerr.Fmt("Error scanning response of HTTP request: %s", err)
+ }
+ return nil
+}
+
+// loadExpecations fetches the expectations from Gold to compare to tests.
+func (r *resultState) loadExpectations() error {
+ var urlPath string
+ if r.GoldResults.Issue > 0 {
+ issueID := strconv.FormatInt(r.GoldResults.Issue, 10)
+ urlPath = strings.Replace(web.EXPECATIONS_ISSUE_ROUTE, "{issue_id}", issueID, 1)
+ } else {
+ urlPath = strings.Replace(web.EXPECATIONS_ROUTE, "{commit_hash}", r.GoldResults.GitHash, 1)
+ }
+ url := fmt.Sprintf("%s/%s", r.GoldURL, urlPath)
+
+ resp, err := r.httpClient.Get(url)
+ if err != nil {
+ return err
+ }
+ jsonBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return skerr.Fmt("Error reading body of request to %s: %s", url, err)
+ }
+ if err := resp.Body.Close(); err != nil {
+ return skerr.Fmt("Error closing response from request to %s: %s", url, err)
+ }
+
+ return json.Unmarshal(jsonBytes, &r.Expectations)
+}
+
+// getResultFilePath returns that path in GCS where the result file should be stored.
+//
+// The path follows the path described here:
+// https://github.com/google/skia-buildbot/blob/master/golden/docs/INGESTION.md
+// The file name of the path also contains a timestamp to make it unique since all
+// calls within the same test run are written to the same output path.
+func (r *resultState) getResultFilePath() string {
+ now := time.Now().UTC()
+ year, month, day := now.Date()
+ hour := now.Hour()
+
+ // Assemble a path that looks like this:
+ // <path_prefix>/YYYY/MM/DD/HH/<git_hash>/<build_id>/<time_stamp>/<per_run_file_name>.json
+ // The first segements up to 'HH' are required so the Gold ingester can scan these prefixes for
+ // new files. The later segments are necessary to make the path unique within the runs of one
+ // hour and increase readability of the paths for troubleshooting.
+ // It is vital that the times segments of the path are based on UTC location.
+ fileName := fmt.Sprintf("dm-%d.json", now.UnixNano())
+ segments := []interface{}{
+ resultPrefix,
+ year,
+ month,
+ day,
+ hour,
+ r.GoldResults.GitHash,
+ r.GoldResults.BuildBucketID,
+ time.Now().Unix(),
+ fileName}
+ path := fmt.Sprintf("%s/%04d/%02d/%02d/%02d/%s/%d/%d/%s", segments...)
+
+ if r.GoldResults.Issue > 0 {
+ path = "trybot/" + path
+ }
+ return fmt.Sprintf("%s/%s", r.Bucket, path)
+}
+
+// getGCSImagePath returns the path in GCS where the image with the given hash should be stored.
+func (r *resultState) getGCSImagePath(imgHash string) string {
+ return fmt.Sprintf("%s/%s/%s.png", r.Bucket, imagePrefix, imgHash)
+}
diff --git a/gold-client/go/goldclient/gsutil_util.go b/gold-client/go/goldclient/gsutil_util.go
new file mode 100644
index 0000000..31e32ed
--- /dev/null
+++ b/gold-client/go/goldclient/gsutil_util.go
@@ -0,0 +1,49 @@
+package goldclient
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os/exec"
+
+ "go.skia.org/infra/go/skerr"
+)
+
+// gsutilAvailable returns true if the 'gsutil' command could be found on the PATH
+func gsutilAvailable() bool {
+ _, err := exec.LookPath("gsutil")
+ return err == nil
+}
+
+// gsUtilUploadJson serializes the given data to JSON and writes the result to the given
+// tempFileName, then it copies the file to the given path in GCS. gcsObjPath is assumed
+// to have the form: <bucket_name>/path/to/object
+func gsUtilUploadJson(data interface{}, tempFileName, gcsObjPath string) error {
+ jsonBytes, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(tempFileName, jsonBytes, 0644); err != nil {
+ return err
+ }
+
+ // Upload the written file.
+ return gsutilCopy(tempFileName, prefixGCS(gcsObjPath))
+}
+
+// prefixGCS adds the "gs://" prefix to the given GCS path.
+func prefixGCS(gcsPath string) string {
+ return fmt.Sprintf("gs://%s", gcsPath)
+}
+
+// gsutilCopy shells out to gsutil to copy the given src to the given target. A path
+// starting with "gs://" is assumed to be in GCS.
+func gsutilCopy(src, dst string) error {
+ runCmd := exec.Command("gsutil", "cp", src, dst)
+ outBytes, err := runCmd.CombinedOutput()
+ if err != nil {
+ return skerr.Fmt("Error running gsutil. Got output \n%s\n and error: %s", outBytes, err)
+ }
+ return nil
+}
diff --git a/golden/cmd/baseline_server/main.go b/golden/cmd/baseline_server/main.go
index 4adb590..063967e 100644
--- a/golden/cmd/baseline_server/main.go
+++ b/golden/cmd/baseline_server/main.go
@@ -60,8 +60,8 @@
router := mux.NewRouter()
// Retrieving that baseline for master and an Gerrit issue are handled the same way
- router.HandleFunc(web.BASELINE_ROUTE, handlers.JsonBaselineHandler).Methods("GET")
- router.HandleFunc(web.BASELINE_ISSUE_ROUTE, handlers.JsonBaselineHandler).Methods("GET")
+ router.HandleFunc(web.EXPECATIONS_ROUTE, handlers.JsonBaselineHandler).Methods("GET")
+ router.HandleFunc(web.EXPECATIONS_ISSUE_ROUTE, handlers.JsonBaselineHandler).Methods("GET")
// Start the server
sklog.Infof("Serving on http://127.0.0.1" + *port)
diff --git a/golden/cmd/skiacorrectness/main.go b/golden/cmd/skiacorrectness/main.go
index 0bb5ea6..6640e8d 100644
--- a/golden/cmd/skiacorrectness/main.go
+++ b/golden/cmd/skiacorrectness/main.go
@@ -349,8 +349,13 @@
GerritAPI: gerritAPI,
GStorageClient: gsClient,
Git: git,
+ IsAuthoritative: *authoritative,
+ SiteURL: *siteURL,
}
+ // Initialize the Baseliner instance from the values set above.
+ storages.InitBaseliner()
+
// Load the whitelist if there is one and disable querying for issues.
if *pubWhiteList != "" && *pubWhiteList != WHITELIST_ALL {
if err := storages.LoadWhiteList(*pubWhiteList); err != nil {
@@ -446,8 +451,8 @@
router.HandleFunc("/json/tryjob/{id}", handlers.JsonTryjobSummaryHandler).Methods("GET")
// Retrieving that baseline for master and an Gerrit issue are handled the same way
- router.HandleFunc(web.BASELINE_ROUTE, handlers.JsonBaselineHandler).Methods("GET")
- router.HandleFunc(web.BASELINE_ISSUE_ROUTE, handlers.JsonBaselineHandler).Methods("GET")
+ router.HandleFunc(web.EXPECATIONS_ROUTE, handlers.JsonBaselineHandler).Methods("GET")
+ router.HandleFunc(web.EXPECATIONS_ISSUE_ROUTE, handlers.JsonBaselineHandler).Methods("GET")
router.HandleFunc("/json/refresh/{id}", handlers.JsonRefreshIssue).Methods("GET")
// Only expose these endpoints if login is enforced across the app or this an open site.
diff --git a/golden/go/indexer/indexer.go b/golden/go/indexer/indexer.go
index 874378e..8903357 100644
--- a/golden/go/indexer/indexer.go
+++ b/golden/go/indexer/indexer.go
@@ -332,7 +332,7 @@
// writeIssueBaseline handles changes to baselines for Gerrit issues and dumps
// the updated baseline to disk.
func (ixr *Indexer) writeIssueBaseline(evData interface{}) {
- if !ixr.storages.CanWriteBaseline() {
+ if !ixr.storages.Baseliner.CanWriteBaseline() {
return
}
@@ -343,7 +343,7 @@
}
idx := ixr.GetIndex()
- if err := ixr.storages.PushIssueBaseline(issueID, idx.GetTile(false), idx.tallies); err != nil {
+ if err := ixr.storages.Baseliner.PushIssueBaseline(issueID, idx.GetTile(false), idx.tallies); err != nil {
sklog.Errorf("Unable to push baseline for issue %d to GCS: %s", issueID, err)
return
}
@@ -438,13 +438,13 @@
func writeMasterBaseline(state interface{}) error {
idx := state.(*SearchIndex)
- if !idx.storages.CanWriteBaseline() {
+ if !idx.storages.Baseliner.CanWriteBaseline() {
return nil
}
// Write the baseline asynchronously.
go func() {
- if err := idx.storages.PushMasterBaseline(idx.GetTile(false)); err != nil {
+ if err := idx.storages.Baseliner.PushMasterBaselines(idx.GetTile(false)); err != nil {
sklog.Errorf("Error pushing master baseline to GCS: %s", err)
}
}()
diff --git a/golden/go/indexer/indexer_test.go b/golden/go/indexer/indexer_test.go
index 8ab3e70..edb739e 100644
--- a/golden/go/indexer/indexer_test.go
+++ b/golden/go/indexer/indexer_test.go
@@ -87,6 +87,8 @@
GStorageClient: gsClient,
}
+ storages.InitBaseliner()
+
ixr, err := New(storages, time.Minute)
assert.NoError(t, err)
diff --git a/golden/go/jsonio/jsonio.go b/golden/go/jsonio/jsonio.go
index 44e6f24..921ec82 100644
--- a/golden/go/jsonio/jsonio.go
+++ b/golden/go/jsonio/jsonio.go
@@ -145,6 +145,11 @@
// error messages (one for each field) and the returned error contains a
// concatenation of these error messages.
func (g *GoldResults) Validate(ignoreResults bool) ([]string, error) {
+ if g == nil {
+ msg := "Received nil pointer for GoldResult"
+ return []string{msg}, fmt.Errorf(msg)
+ }
+
jn := goldResultsJsonMap
errMsg := []string{}
diff --git a/golden/go/search/utils_test.go b/golden/go/search/utils_test.go
index 92fbc49..53398d0 100644
--- a/golden/go/search/utils_test.go
+++ b/golden/go/search/utils_test.go
@@ -157,6 +157,7 @@
DiffStore: mocks.NewMockDiffStore(),
EventBus: eventBus,
}
+ storages.InitBaseliner()
ixr, err := indexer.New(storages, 10*time.Minute)
assert.NoError(t, err)
diff --git a/golden/go/storage/baseliner.go b/golden/go/storage/baseliner.go
new file mode 100644
index 0000000..e34e77e
--- /dev/null
+++ b/golden/go/storage/baseliner.go
@@ -0,0 +1,132 @@
+package storage
+
+import (
+ "go.skia.org/infra/go/sklog"
+ "go.skia.org/infra/go/tiling"
+ "go.skia.org/infra/golden/go/baseline"
+ "go.skia.org/infra/golden/go/expstorage"
+ "go.skia.org/infra/golden/go/tally"
+ "go.skia.org/infra/golden/go/tryjobstore"
+ "go.skia.org/infra/golden/go/types"
+ "golang.org/x/sync/errgroup"
+)
+
+// TODO(stephana): Baseliner needs to merged into the baseline package and
+// the nomenclature should either change to Expectations or make a it clearer that
+// baselines are synonymous to expectations.
+// This needs to be extended to store per-commit expectations.
+
+// Baseliner is a helper type that provides functions to write baselines (expecations) to
+// GCS and retrieve them. Other packages use it to continuously write expecations to GCS
+// as they become available.
+type Baseliner struct {
+ gStorageClient *GStorageClient
+ expectationsStore expstorage.ExpectationsStore
+ issueExpStoreFactory expstorage.IssueExpStoreFactory
+ tryjobStore tryjobstore.TryjobStore
+}
+
+// NewBaseliner creates a new instance of Baseliner.
+func NewBaseliner(gStorageClient *GStorageClient, expectationsStore expstorage.ExpectationsStore, issueExpStoreFactory expstorage.IssueExpStoreFactory, tryjobStore tryjobstore.TryjobStore) *Baseliner {
+ return &Baseliner{
+ gStorageClient: gStorageClient,
+ expectationsStore: expectationsStore,
+ issueExpStoreFactory: issueExpStoreFactory,
+ tryjobStore: tryjobStore,
+ }
+}
+
+// CanWriteBaseline returns true if this instance was configured to write baseline files.
+func (b *Baseliner) CanWriteBaseline() bool {
+ return (b.gStorageClient != nil) && (b.gStorageClient.options.BaselineGSPath != "")
+}
+
+// PushMasterBaselines writes the baselines for the master branch to GCS.
+func (b *Baseliner) PushMasterBaselines(tile *tiling.Tile) error {
+ if !b.CanWriteBaseline() {
+ return sklog.FmtErrorf("Trying to write baseline while GCS path is not configured.")
+ }
+
+ _, baseLine, err := b.getMasterBaseline(tile)
+ if err != nil {
+ return sklog.FmtErrorf("Error retrieving master baseline: %s", err)
+ }
+
+ // Write the baseline to GCS.
+ outputPath, err := b.gStorageClient.WriteBaseLine(baseLine)
+ if err != nil {
+ return sklog.FmtErrorf("Error writing baseline to GCS: %s", err)
+ }
+ sklog.Infof("Baseline for master written to %s.", outputPath)
+ return nil
+}
+
+// PushIssueBaseline writes the baseline for a Gerrit issue to GCS.
+func (b *Baseliner) PushIssueBaseline(issueID int64, tile *tiling.Tile, tallies *tally.Tallies) error {
+ if !b.CanWriteBaseline() {
+ return sklog.FmtErrorf("Trying to write baseline while GCS path is not configured.")
+ }
+
+ issueExpStore := b.issueExpStoreFactory(issueID)
+ exp, err := issueExpStore.Get()
+ if err != nil {
+ return sklog.FmtErrorf("Unable to get issue expecations: %s", err)
+ }
+
+ tryjobs, tryjobResults, err := b.tryjobStore.GetTryjobs(issueID, nil, true, true)
+ if err != nil {
+ return sklog.FmtErrorf("Unable to get TryjobResults")
+ }
+ talliesByTest := tallies.ByTest()
+ baseLine := baseline.GetBaselineForIssue(issueID, tryjobs, tryjobResults, exp, tile.Commits, talliesByTest)
+
+ // Write the baseline to GCS.
+ outputPath, err := b.gStorageClient.WriteBaseLine(baseLine)
+ if err != nil {
+ return sklog.FmtErrorf("Error writing baseline to GCS: %s", err)
+ }
+ sklog.Infof("Baseline for issue %d written to %s.", issueID, outputPath)
+ return nil
+}
+
+// FetchBaseline fetches the complete baseline for the given Gerrit issue by
+// loading the master baseline and the issue baseline from GCS and combining
+// them. If either of them doesn't exist an empty baseline is assumed.
+func (b *Baseliner) FetchBaseline(commitHash string, issueID int64, patchsetID int64) (*baseline.CommitableBaseLine, error) {
+ var masterBaseline *baseline.CommitableBaseLine
+ var issueBaseline *baseline.CommitableBaseLine
+
+ var egroup errgroup.Group
+ egroup.Go(func() error {
+ var err error
+ masterBaseline, err = b.gStorageClient.ReadBaseline(commitHash, 0)
+ return err
+ })
+
+ if issueID > 0 {
+ egroup.Go(func() error {
+ var err error
+ issueBaseline, err = b.gStorageClient.ReadBaseline(commitHash, issueID)
+ return err
+ })
+ }
+
+ if err := egroup.Wait(); err != nil {
+ return nil, err
+ }
+
+ if issueBaseline != nil {
+ masterBaseline.Baseline.Update(issueBaseline.Baseline)
+ }
+ return masterBaseline, nil
+}
+
+// getMasterBaseline retrieves the master baseline based on the given tile.
+func (b *Baseliner) getMasterBaseline(tile *tiling.Tile) (types.Expectations, *baseline.CommitableBaseLine, error) {
+ exps, err := b.expectationsStore.Get()
+ if err != nil {
+ return nil, nil, sklog.FmtErrorf("Unable to retrieve expectations: %s", err)
+ }
+
+ return exps, baseline.GetBaselineForMaster(exps, tile), nil
+}
diff --git a/golden/go/storage/gsclient.go b/golden/go/storage/gsclient.go
index 94f06e8..7b25493 100644
--- a/golden/go/storage/gsclient.go
+++ b/golden/go/storage/gsclient.go
@@ -68,13 +68,25 @@
return nil
}
- outPath := g.getBaselinePath(baseLine.Issue)
+ outPath := g.getBaselinePath(baseLine.EndCommit.Hash, baseLine.Issue)
+ return "gs://" + outPath, g.writeToPath(outPath, "application/json", writeFn)
+}
+
+func (g *GStorageClient) WriteBaseLineForCommit(baseLine *baseline.CommitableBaseLine) (string, error) {
+ writeFn := func(w *gstorage.Writer) error {
+ if err := json.NewEncoder(w).Encode(baseLine); err != nil {
+ return fmt.Errorf("Error encoding baseline to JSON: %s", err)
+ }
+ return nil
+ }
+
+ outPath := g.getBaselinePath(baseLine.EndCommit.Hash, baseLine.Issue)
return "gs://" + outPath, g.writeToPath(outPath, "application/json", writeFn)
}
// ReadBaseline returns the baseline for the given issue from GCS.
-func (g *GStorageClient) ReadBaseline(issueID int64) (*baseline.CommitableBaseLine, error) {
- baselinePath := g.getBaselinePath(issueID)
+func (g *GStorageClient) ReadBaseline(commitHash string, issueID int64) (*baseline.CommitableBaseLine, error) {
+ baselinePath := g.getBaselinePath(commitHash, issueID)
bucketName, storagePath := gcs.SplitGSPath(baselinePath)
ctx := context.Background()
@@ -104,11 +116,15 @@
// getBaselinePath returns the baseline path in GCS for the given issueID.
// If issueID <= 0 it returns the path for the master baseline.
-func (g *GStorageClient) getBaselinePath(issueID int64) string {
+func (g *GStorageClient) getBaselinePath(commitHash string, issueID int64) string {
// Change the output file based on whether it's the master branch or a Gerrit issue.
- outPath := "master.json"
+ var outPath string
if issueID > 0 {
outPath = fmt.Sprintf("issue_%d.json", issueID)
+ } else if commitHash != "" {
+ outPath = fmt.Sprintf("master_%s.json", commitHash)
+ } else {
+ outPath = "master.json"
}
return g.options.BaselineGSPath + "/" + outPath
}
diff --git a/golden/go/storage/storage.go b/golden/go/storage/storage.go
index c3ba95a..bf15203 100644
--- a/golden/go/storage/storage.go
+++ b/golden/go/storage/storage.go
@@ -22,7 +22,6 @@
"go.skia.org/infra/golden/go/digeststore"
"go.skia.org/infra/golden/go/expstorage"
"go.skia.org/infra/golden/go/ignore"
- "go.skia.org/infra/golden/go/tally"
"go.skia.org/infra/golden/go/tryjobs"
"go.skia.org/infra/golden/go/tryjobstore"
"go.skia.org/infra/golden/go/types"
@@ -41,10 +40,13 @@
EventBus eventbus.EventBus
TryjobStore tryjobstore.TryjobStore
TryjobMonitor *tryjobs.TryjobMonitor
- GerritAPI *gerrit.Gerrit
+ GerritAPI gerrit.GerritInterface
GStorageClient *GStorageClient
+ Baseliner *Baseliner
Git *gitinfo.GitInfo
WhiteListQuery paramtools.ParamSet
+ IsAuthoritative bool
+ SiteURL string
// NCommits is the number of commits we should consider. If NCommits is
// 0 or smaller all commits in the last tile will be considered.
@@ -58,99 +60,12 @@
mutex sync.Mutex
}
-// CanWriteBaseline returns true if this instance was configured to write baseline files.
-func (s *Storage) CanWriteBaseline() bool {
- return (s.GStorageClient != nil) && (s.GStorageClient.options.BaselineGSPath != "")
-}
+// TODO(stephana): Baseliner will eventually factored into the baseline package and
+// InitBaseliner should go away.
-// PushMasterBaseline writes the baseline for the master branch to GCS.
-func (s *Storage) PushMasterBaseline(tile *tiling.Tile) error {
- if !s.CanWriteBaseline() {
- return sklog.FmtErrorf("Trying to write baseline while GCS path is not configured.")
- }
-
- _, baseLine, err := s.getMasterBaseline(tile)
- if err != nil {
- return sklog.FmtErrorf("Error retrieving master baseline: %s", err)
- }
-
- // Write the baseline to GCS.
- outputPath, err := s.GStorageClient.WriteBaseLine(baseLine)
- if err != nil {
- return sklog.FmtErrorf("Error writing baseline to GCS: %s", err)
- }
- sklog.Infof("Baseline for master written to %s.", outputPath)
- return nil
-}
-
-// getMasterBaseline retrieves the master baseline based on the given tile.
-func (s *Storage) getMasterBaseline(tile *tiling.Tile) (types.Expectations, *baseline.CommitableBaseLine, error) {
- exps, err := s.ExpectationsStore.Get()
- if err != nil {
- return nil, nil, sklog.FmtErrorf("Unable to retrieve expectations: %s", err)
- }
-
- return exps, baseline.GetBaselineForMaster(exps, tile), nil
-}
-
-// PushIssueBaseline writes the baseline for a Gerrit issue to GCS.
-func (s *Storage) PushIssueBaseline(issueID int64, tile *tiling.Tile, tallies *tally.Tallies) error {
- if !s.CanWriteBaseline() {
- return sklog.FmtErrorf("Trying to write baseline while GCS path is not configured.")
- }
-
- issueExpStore := s.IssueExpStoreFactory(issueID)
- exp, err := issueExpStore.Get()
- if err != nil {
- return sklog.FmtErrorf("Unable to get issue expecations: %s", err)
- }
-
- tryjobs, tryjobResults, err := s.TryjobStore.GetTryjobs(issueID, nil, true, true)
- if err != nil {
- return sklog.FmtErrorf("Unable to get TryjobResults")
- }
- talliesByTest := tallies.ByTest()
- baseLine := baseline.GetBaselineForIssue(issueID, tryjobs, tryjobResults, exp, tile.Commits, talliesByTest)
-
- // Write the baseline to GCS.
- outputPath, err := s.GStorageClient.WriteBaseLine(baseLine)
- if err != nil {
- return sklog.FmtErrorf("Error writing baseline to GCS: %s", err)
- }
- sklog.Infof("Baseline for issue %d written to %s.", issueID, outputPath)
- return nil
-}
-
-// FetchBaseline fetches the complete baseline for the given Gerrit issue by
-// loading the master baseline and the issue baseline from GCS and combining
-// them. If either of them doesn't exist an empty baseline is assumed.
-func (s *Storage) FetchBaseline(issueID int64) (*baseline.CommitableBaseLine, error) {
- var masterBaseline *baseline.CommitableBaseLine
- var issueBaseline *baseline.CommitableBaseLine
-
- var egroup errgroup.Group
- egroup.Go(func() error {
- var err error
- masterBaseline, err = s.GStorageClient.ReadBaseline(0)
- return err
- })
-
- if issueID > 0 {
- egroup.Go(func() error {
- var err error
- issueBaseline, err = s.GStorageClient.ReadBaseline(issueID)
- return err
- })
- }
-
- if err := egroup.Wait(); err != nil {
- return nil, err
- }
-
- if issueBaseline != nil {
- masterBaseline.Baseline.Update(issueBaseline.Baseline)
- }
- return masterBaseline, nil
+// InitBaseliner initializes the Baseliner instance from values already set on the storage instance.
+func (s *Storage) InitBaseliner() {
+ s.Baseliner = NewBaseliner(s.GStorageClient, s.ExpectationsStore, s.IssueExpStoreFactory, s.TryjobStore)
}
// LoadWhiteList loads the given JSON5 file that defines that query to
@@ -345,6 +260,10 @@
return digestInfo, nil
}
+func (s *Storage) GetExpectationsForCommit(parentCommit string) (types.Expectations, error) {
+ return nil, sklog.FmtErrorf("Not implemented yet !")
+}
+
// getWhiteListedTile creates a new tile from the given tile that contains
// only traces that match the whitelist that was loaded earlier.
func (s *Storage) getWhiteListedTile(tile *tiling.Tile) *tiling.Tile {
diff --git a/golden/go/storage/storage_test.go b/golden/go/storage/storage_test.go
index d31ecfc..35e2b2a 100644
--- a/golden/go/storage/storage_test.go
+++ b/golden/go/storage/storage_test.go
@@ -8,6 +8,7 @@
assert "github.com/stretchr/testify/require"
"go.skia.org/infra/go/testutils"
+ "go.skia.org/infra/go/tiling"
"go.skia.org/infra/golden/go/baseline"
"go.skia.org/infra/golden/go/types"
)
@@ -23,9 +24,20 @@
var (
issueID = int64(5678)
+ startCommit = &tiling.Commit{
+ CommitTime: time.Now().Add(-time.Hour * 10).Unix(),
+ Hash: "abb84b151a49eca5a6e107c51a1f1b7da73454bf",
+ Author: "Jon Doe",
+ }
+ endCommit = &tiling.Commit{
+ CommitTime: time.Now().Unix(),
+ Hash: "51465f0ed60ce2cacff3653c7d1d70317679fc06",
+ Author: "Jane Doe",
+ }
+
masterBaseline = &baseline.CommitableBaseLine{
- StartCommit: nil,
- EndCommit: nil,
+ StartCommit: startCommit,
+ EndCommit: endCommit,
Baseline: types.TestExp{
"test-1": map[string]types.Label{"d1": types.POSITIVE},
},
@@ -33,8 +45,8 @@
}
issueBaseline = &baseline.CommitableBaseLine{
- StartCommit: nil,
- EndCommit: nil,
+ StartCommit: endCommit,
+ EndCommit: endCommit,
Baseline: types.TestExp{
"test-3": map[string]types.Label{"d2": types.POSITIVE},
},
@@ -86,7 +98,7 @@
assert.NoError(t, err)
removePaths = append(removePaths, strings.TrimPrefix(path, "gs://"))
- foundBaseline, err := gsClient.ReadBaseline(0)
+ foundBaseline, err := gsClient.ReadBaseline(endCommit.Hash, 0)
assert.NoError(t, err)
assert.Equal(t, masterBaseline, foundBaseline)
@@ -95,20 +107,21 @@
assert.NoError(t, err)
removePaths = append(removePaths, strings.TrimPrefix(path, "gs://"))
- foundBaseline, err = gsClient.ReadBaseline(issueID)
+ foundBaseline, err = gsClient.ReadBaseline("", issueID)
assert.NoError(t, err)
assert.Equal(t, issueBaseline, foundBaseline)
// Fetch the combined baselines
storages := &Storage{
GStorageClient: gsClient,
+ Baseliner: NewBaseliner(gsClient, nil, nil, nil),
}
combined := &baseline.CommitableBaseLine{}
*combined = *masterBaseline
combined.Baseline = masterBaseline.Baseline.DeepCopy()
combined.Baseline.Update(issueBaseline.Baseline)
- foundBaseline, err = storages.FetchBaseline(issueID)
+ foundBaseline, err = storages.Baseliner.FetchBaseline(endCommit.Hash, issueID, 0)
assert.NoError(t, err)
assert.Equal(t, combined, foundBaseline)
}
@@ -127,12 +140,12 @@
// Read the master baseline that has not been written
emptyBaseline := &baseline.CommitableBaseLine{Baseline: types.TestExp{}}
- foundBaseline, err := gsClient.ReadBaseline(0)
+ foundBaseline, err := gsClient.ReadBaseline("", 0)
assert.NoError(t, err)
assert.Equal(t, emptyBaseline, foundBaseline)
// Test reading a non-existing baseline for an issue
- foundBaseline, err = gsClient.ReadBaseline(5344)
+ foundBaseline, err = gsClient.ReadBaseline("", 5344)
assert.NoError(t, err)
assert.Equal(t, emptyBaseline, foundBaseline)
@@ -143,8 +156,9 @@
// Fetch the combined baselines when there are no baselines for the issue
storages := &Storage{
GStorageClient: gsClient,
+ Baseliner: NewBaseliner(gsClient, nil, nil, nil),
}
- foundBaseline, err = storages.FetchBaseline(5344)
+ foundBaseline, err = storages.Baseliner.FetchBaseline(endCommit.Hash, 5344, 0)
assert.NoError(t, err)
assert.Equal(t, masterBaseline, foundBaseline)
}
diff --git a/golden/go/web/web.go b/golden/go/web/web.go
index 4c299e8..62d3599 100644
--- a/golden/go/web/web.go
+++ b/golden/go/web/web.go
@@ -16,6 +16,7 @@
"go.skia.org/infra/go/human"
"go.skia.org/infra/go/issues"
"go.skia.org/infra/go/login"
+ "go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/sklog"
"go.skia.org/infra/go/tiling"
"go.skia.org/infra/go/util"
@@ -35,11 +36,11 @@
// Define common routes used by multiple servers
const (
- // BASELINE_ROUTE serves the baseline of the master branch
- BASELINE_ROUTE = "/json/baseline"
+ // BASELINE_ROUTE serves the expectations of the master branch
+ EXPECATIONS_ROUTE = "/json/expecations/commit/{commit_hash}"
// BASELINE_ISSUE_ROUTE serves the baseline for the Gerrit CL identified by 'id'
- BASELINE_ISSUE_ROUTE = "/json/baseline/{id}"
+ EXPECATIONS_ISSUE_ROUTE = "/json/expecations/issue/{issue_id}"
)
const (
@@ -1017,18 +1018,26 @@
// baseline and the baseline defined for the issue (usually based on tryjob
// results).
func (wh *WebHandlers) JsonBaselineHandler(w http.ResponseWriter, r *http.Request) {
+ commitHash := ""
issueID := int64(0)
var err error
- issueIDStr, ok := mux.Vars(r)["id"]
- if ok {
+ if issueIDStr, ok := mux.Vars(r)["issue_id"]; ok {
issueID, err = strconv.ParseInt(issueIDStr, 10, 64)
if err != nil {
httputils.ReportError(w, r, err, "Issue ID must be valid integer.")
return
}
+ } else {
+ // Since this was not called for an issue, we need to extract a Git hash.
+ var ok bool
+ if commitHash, ok = mux.Vars(r)["git_hash"]; !ok {
+ msg := "No commit hash provided to fetch expectations"
+ httputils.ReportError(w, r, skerr.Fmt(msg), msg)
+ return
+ }
}
- baseline, err := wh.Storages.FetchBaseline(issueID)
+ baseline, err := wh.Storages.Baseliner.FetchBaseline(commitHash, issueID, 0)
if err != nil {
httputils.ReportError(w, r, err, "Fetching baselines failed.")
return