[CT] Use git from CIPD
Bug: skia:9538
Change-Id: I2d0e33db5e0d598635c2bff6065047f7561d1758
Reviewed-on: https://skia-review.googlesource.com/c/buildbot/+/253456
Commit-Queue: Ravi Mistry <rmistry@google.com>
Reviewed-by: Eric Boren <borenet@google.com>
Reviewed-by: Ben Wagner aka dogben <benjaminwagner@google.com>
diff --git a/ct/go/master_scripts/capture_archives_on_workers/main.go b/ct/go/master_scripts/capture_archives_on_workers/main.go
index 4c9f48f..a887029 100644
--- a/ct/go/master_scripts/capture_archives_on_workers/main.go
+++ b/ct/go/master_scripts/capture_archives_on_workers/main.go
@@ -14,6 +14,8 @@
"go.skia.org/infra/ct/go/master_scripts/master_common"
"go.skia.org/infra/ct/go/util"
+ "go.skia.org/infra/go/git"
+ "go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/sklog"
skutil "go.skia.org/infra/go/util"
)
@@ -50,7 +52,11 @@
skutil.LogErr(gs.DeleteRemoteDir(gsBaseDir))
// Find which chromium hash the workers should use.
- chromiumHash, err := util.GetChromiumHash(ctx)
+ gitExec, err := git.Executable(ctx)
+ if err != nil {
+ return skerr.Wrap(err)
+ }
+ chromiumHash, err := util.GetChromiumHash(ctx, gitExec)
if err != nil {
return fmt.Errorf("Could not find the latest chromium hash: %s", err)
}
diff --git a/ct/go/master_scripts/metrics_analysis_on_workers/main.go b/ct/go/master_scripts/metrics_analysis_on_workers/main.go
index 1fb10c1..d1ca265 100644
--- a/ct/go/master_scripts/metrics_analysis_on_workers/main.go
+++ b/ct/go/master_scripts/metrics_analysis_on_workers/main.go
@@ -22,6 +22,8 @@
"go.skia.org/infra/ct/go/master_scripts/master_common"
"go.skia.org/infra/ct/go/util"
"go.skia.org/infra/go/fileutil"
+ "go.skia.org/infra/go/git"
+ "go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/sklog"
skutil "go.skia.org/infra/go/util"
)
@@ -116,7 +118,11 @@
}
// Find which chromium hash the workers should use.
- chromiumHash, err := util.GetChromiumHash(ctx)
+ gitExec, err := git.Executable(ctx)
+ if err != nil {
+ return skerr.Wrap(err)
+ }
+ chromiumHash, err := util.GetChromiumHash(ctx, gitExec)
if err != nil {
return fmt.Errorf("Could not find the latest chromium hash: %s", err)
}
diff --git a/ct/go/master_scripts/run_chromium_analysis_on_workers/main.go b/ct/go/master_scripts/run_chromium_analysis_on_workers/main.go
index bc6d268..918d6cf 100644
--- a/ct/go/master_scripts/run_chromium_analysis_on_workers/main.go
+++ b/ct/go/master_scripts/run_chromium_analysis_on_workers/main.go
@@ -18,6 +18,7 @@
"go.skia.org/infra/ct/go/master_scripts/master_common"
"go.skia.org/infra/ct/go/util"
"go.skia.org/infra/go/auth"
+ "go.skia.org/infra/go/git"
"go.skia.org/infra/go/gitauth"
"go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/sklog"
@@ -60,6 +61,12 @@
return fmt.Errorf("Could not instantiate gsutil object: %s", err)
}
+ // Find git exec.
+ gitExec, err := git.Executable(ctx)
+ if err != nil {
+ return skerr.Wrap(err)
+ }
+
// Cleanup dirs after run completes.
defer skutil.RemoveAll(filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runID))
// Finish with glog flush and how long the task took.
@@ -113,7 +120,7 @@
// Find which chromium hash the builds should use.
if *chromiumHash == "" {
- *chromiumHash, err = util.GetChromiumHash(ctx)
+ *chromiumHash, err = util.GetChromiumHash(ctx, gitExec)
if err != nil {
return fmt.Errorf("Could not find the latest chromium hash: %s", err)
}
@@ -211,7 +218,7 @@
return fmt.Errorf("Failed to create git cookie updater: %s", err)
}
- if err := util.AddCTRunDataToPerf(ctx, *groupName, *runID, outputCSVLocalPath, gs); err != nil {
+ if err := util.AddCTRunDataToPerf(ctx, *groupName, *runID, outputCSVLocalPath, gitExec, gs); err != nil {
return fmt.Errorf("Could not add CT run data to ct-perf.skia.org: %s", err)
}
}
diff --git a/ct/go/master_scripts/run_chromium_perf_on_workers/main.go b/ct/go/master_scripts/run_chromium_perf_on_workers/main.go
index 17957f5..7d13114 100644
--- a/ct/go/master_scripts/run_chromium_perf_on_workers/main.go
+++ b/ct/go/master_scripts/run_chromium_perf_on_workers/main.go
@@ -18,6 +18,7 @@
"go.skia.org/infra/ct/go/master_scripts/master_common"
"go.skia.org/infra/ct/go/util"
"go.skia.org/infra/go/auth"
+ "go.skia.org/infra/go/git"
"go.skia.org/infra/go/gitauth"
"go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/sklog"
@@ -86,6 +87,12 @@
}
remoteOutputDir := util.GetPerfRemoteDir(*runID)
+ // Find git exec.
+ gitExec, err := git.Executable(ctx)
+ if err != nil {
+ return skerr.Wrap(err)
+ }
+
// TODO(rmistry): Fix the below.
//
// This is silly, we do the following here:
@@ -144,7 +151,7 @@
// Find which chromium hash the builds should use.
if *chromiumHash == "" {
- *chromiumHash, err = util.GetChromiumHash(ctx)
+ *chromiumHash, err = util.GetChromiumHash(ctx, gitExec)
if err != nil {
return fmt.Errorf("Could not find the latest chromium hash: %s", err)
}
@@ -327,7 +334,7 @@
return fmt.Errorf("Failed to create git cookie updater: %s", err)
}
- if err := util.AddCTRunDataToPerf(ctx, *groupName, *runID, withPatchCSVLocalPath, gs); err != nil {
+ if err := util.AddCTRunDataToPerf(ctx, *groupName, *runID, withPatchCSVLocalPath, gitExec, gs); err != nil {
return fmt.Errorf("Could not add CT run data to ct-perf.skia.org: %s", err)
}
}
diff --git a/ct/go/util/chromium_builds.go b/ct/go/util/chromium_builds.go
index 29a5229..c605cd2 100644
--- a/ct/go/util/chromium_builds.go
+++ b/ct/go/util/chromium_builds.go
@@ -49,13 +49,14 @@
// runID is the unique id of the current run (typically requester + timestamp).
// chromiumHash is the hash the checkout should be synced to.
// pathToPyFiles is the local path to CT's python scripts. Eg: sync_skia_in_chrome.py.
+// gitExec is the local path to the git binary.
// applyPatches if true looks for Chromium/Skia/V8/Catapult patches in the temp dir.
-func CreateTelemetryIsolates(ctx context.Context, runID, chromiumHash, pathToPyFiles string, applyPatches bool) error {
+func CreateTelemetryIsolates(ctx context.Context, runID, chromiumHash, pathToPyFiles, gitExec string, applyPatches bool) error {
chromiumBuildDir, _ := filepath.Split(ChromiumSrcDir)
util.MkdirAll(chromiumBuildDir, 0700)
// Make sure we are starting from a clean slate before the sync.
- if err := ResetChromiumCheckout(ctx, ChromiumSrcDir); err != nil {
+ if err := ResetChromiumCheckout(ctx, ChromiumSrcDir, gitExec); err != nil {
return fmt.Errorf("Could not reset the chromium checkout in %s: %s", chromiumBuildDir, err)
}
@@ -81,7 +82,7 @@
}
if applyPatches {
- if err := applyRepoPatches(ctx, ChromiumSrcDir, runID); err != nil {
+ if err := applyRepoPatches(ctx, ChromiumSrcDir, runID, gitExec); err != nil {
return fmt.Errorf("Could not apply patches in the chromium checkout in %s: %s", chromiumBuildDir, err)
}
}
@@ -122,10 +123,12 @@
// Chromium's Tot hash is used.
// skiaHash is the hash the checkout should be synced to. If not specified then
// Skia's LKGR hash is used (the hash in Chromium's DEPS file).
+// pathToPyFiles is the local path to CT's python scripts. Eg: sync_skia_in_chrome.py.
+// gitExec is the local path to the git binary.
// applyPatches if true looks for Chromium/Skia/V8/Catapult patches in the temp dir and
// runs once with the patch applied and once without the patch applied.
// uploadSingleBuild if true does not upload a 2nd build of Chromium.
-func CreateChromiumBuildOnSwarming(ctx context.Context, runID, targetPlatform, chromiumHash, skiaHash, pathToPyFiles string, applyPatches, uploadSingleBuild bool) (string, string, error) {
+func CreateChromiumBuildOnSwarming(ctx context.Context, runID, targetPlatform, chromiumHash, skiaHash, pathToPyFiles, gitExec string, applyPatches, uploadSingleBuild bool) (string, string, error) {
chromiumBuildDir, _ := filepath.Split(ChromiumSrcDir)
// Determine which fetch target to use.
var fetchTarget string
@@ -141,14 +144,14 @@
// Find which Chromium commit hash should be used.
var err error
if chromiumHash == "" {
- chromiumHash, err = GetChromiumHash(ctx)
+ chromiumHash, err = GetChromiumHash(ctx, gitExec)
if err != nil {
return "", "", fmt.Errorf("Error while finding Chromium's Hash: %s", err)
}
}
// Make sure we are starting from a clean slate before the sync.
- if err := ResetChromiumCheckout(ctx, filepath.Join(chromiumBuildDir, "src")); err != nil {
+ if err := ResetChromiumCheckout(ctx, filepath.Join(chromiumBuildDir, "src"), gitExec); err != nil {
return "", "", fmt.Errorf("Could not reset the chromium checkout in %s: %s", chromiumBuildDir, err)
}
@@ -178,7 +181,7 @@
googleStorageDirName := ChromiumBuildDir(chromiumHash, skiaHash, runID)
if applyPatches {
- if err := applyRepoPatches(ctx, filepath.Join(chromiumBuildDir, "src"), runID); err != nil {
+ if err := applyRepoPatches(ctx, filepath.Join(chromiumBuildDir, "src"), runID, gitExec); err != nil {
return "", "", fmt.Errorf("Could not apply patches in the chromium checkout in %s: %s", chromiumBuildDir, err)
}
// Add "try" prefix and "withpatch" suffix.
@@ -205,11 +208,11 @@
// will be created without applying any patches except the chromium_base_build patch if specified.
if !uploadSingleBuild {
// Make sure we are starting from a clean slate.
- if err := ResetChromiumCheckout(ctx, filepath.Join(chromiumBuildDir, "src")); err != nil {
+ if err := ResetChromiumCheckout(ctx, filepath.Join(chromiumBuildDir, "src"), gitExec); err != nil {
return "", "", fmt.Errorf("Could not reset the chromium checkout in %s: %s", chromiumBuildDir, err)
}
if applyPatches {
- if err := applyBaseBuildRepoPatches(ctx, filepath.Join(chromiumBuildDir, "src"), runID); err != nil {
+ if err := applyBaseBuildRepoPatches(ctx, filepath.Join(chromiumBuildDir, "src"), runID, gitExec); err != nil {
return "", "", fmt.Errorf("Could not apply patches in the chromium checkout in %s: %s", chromiumBuildDir, err)
}
}
@@ -227,10 +230,10 @@
}
// GetChromiumHash uses ls-remote to find and return Chromium's Tot commit hash.
-func GetChromiumHash(ctx context.Context) (string, error) {
+func GetChromiumHash(ctx context.Context, gitExec string) (string, error) {
stdoutBuf := bytes.Buffer{}
totArgs := []string{"ls-remote", "https://chromium.googlesource.com/chromium/src.git", "--verify", "refs/heads/master"}
- if err := ExecuteCmd(ctx, BINARY_GIT, totArgs, []string{}, GIT_LS_REMOTE_TIMEOUT, &stdoutBuf, nil); err != nil {
+ if err := ExecuteCmd(ctx, gitExec, totArgs, []string{}, GIT_LS_REMOTE_TIMEOUT, &stdoutBuf, nil); err != nil {
return "", fmt.Errorf("Error while finding Chromium's ToT: %s", err)
}
tokens := strings.Split(stdoutBuf.String(), "\t")
@@ -314,7 +317,7 @@
return commitHash[0:TRUNCATED_HASH_LENGTH]
}
-func ResetChromiumCheckout(ctx context.Context, chromiumSrcDir string) error {
+func ResetChromiumCheckout(ctx context.Context, chromiumSrcDir, gitExec string) error {
// Clean up any left over lock files from sync errors of previous runs.
err := os.Remove(filepath.Join(chromiumSrcDir, ".git", "index.lock"))
if err != nil {
@@ -322,36 +325,36 @@
}
sklog.Info("Resetting Skia")
skiaDir := filepath.Join(chromiumSrcDir, "third_party", "skia")
- if err := ResetCheckout(ctx, skiaDir, "HEAD", "master"); err != nil {
+ if err := ResetCheckout(ctx, skiaDir, "HEAD", "master", gitExec); err != nil {
return fmt.Errorf("Could not reset Skia's checkout in %s: %s", skiaDir, err)
}
sklog.Info("Resetting V8")
v8Dir := filepath.Join(chromiumSrcDir, "v8")
// Detach the v8 checkout because of the problem described in
// https://bugs.chromium.org/p/chromium/issues/detail?id=584742#c8
- if err := ResetCheckout(ctx, v8Dir, "HEAD", "--detach"); err != nil {
+ if err := ResetCheckout(ctx, v8Dir, "HEAD", "--detach", gitExec); err != nil {
return fmt.Errorf("Could not reset V8's checkout in %s: %s", v8Dir, err)
}
sklog.Info("Resetting Catapult")
catapultDir := filepath.Join(chromiumSrcDir, RelativeCatapultSrcDir)
- if err := ResetCheckout(ctx, catapultDir, "HEAD", "master"); err != nil {
+ if err := ResetCheckout(ctx, catapultDir, "HEAD", "master", gitExec); err != nil {
return fmt.Errorf("Could not reset Catapult's checkout in %s: %s", catapultDir, err)
}
sklog.Info("Resetting Chromium")
- if err := ResetCheckout(ctx, chromiumSrcDir, "HEAD", "master"); err != nil {
+ if err := ResetCheckout(ctx, chromiumSrcDir, "HEAD", "master", gitExec); err != nil {
return fmt.Errorf("Could not reset Chromium's checkout in %s: %s", chromiumSrcDir, err)
}
return nil
}
-func applyBaseBuildRepoPatches(ctx context.Context, chromiumSrcDir, runID string) error {
+func applyBaseBuildRepoPatches(ctx context.Context, chromiumSrcDir, runID, gitExec string) error {
// Apply Chromium patch for the base build if it exists.
chromiumPatch := filepath.Join(os.TempDir(), runID+".chromium_base_build.patch")
if _, err := os.Stat(chromiumPatch); err == nil {
chromiumPatchFile, _ := os.Open(chromiumPatch)
chromiumPatchFileInfo, _ := chromiumPatchFile.Stat()
if chromiumPatchFileInfo.Size() > 10 {
- if err := ApplyPatch(ctx, chromiumPatch, chromiumSrcDir); err != nil {
+ if err := ApplyPatch(ctx, chromiumPatch, chromiumSrcDir, gitExec); err != nil {
return fmt.Errorf("Could not apply Chromium's patch for the base build in %s: %s", chromiumSrcDir, err)
}
}
@@ -359,7 +362,7 @@
return nil
}
-func applyRepoPatches(ctx context.Context, chromiumSrcDir, runID string) error {
+func applyRepoPatches(ctx context.Context, chromiumSrcDir, runID, gitExec string) error {
// Apply Skia patch if it exists.
skiaDir := filepath.Join(chromiumSrcDir, "third_party", "skia")
skiaPatch := filepath.Join(os.TempDir(), runID+".skia.patch")
@@ -367,7 +370,7 @@
skiaPatchFile, _ := os.Open(skiaPatch)
skiaPatchFileInfo, _ := skiaPatchFile.Stat()
if skiaPatchFileInfo.Size() > 10 {
- if err := ApplyPatch(ctx, skiaPatch, skiaDir); err != nil {
+ if err := ApplyPatch(ctx, skiaPatch, skiaDir, gitExec); err != nil {
return fmt.Errorf("Could not apply Skia's patch in %s: %s", skiaDir, err)
}
}
@@ -379,7 +382,7 @@
v8PatchFile, _ := os.Open(v8Patch)
v8PatchFileInfo, _ := v8PatchFile.Stat()
if v8PatchFileInfo.Size() > 10 {
- if err := ApplyPatch(ctx, v8Patch, v8Dir); err != nil {
+ if err := ApplyPatch(ctx, v8Patch, v8Dir, gitExec); err != nil {
return fmt.Errorf("Could not apply V8's patch in %s: %s", v8Dir, err)
}
}
@@ -391,7 +394,7 @@
catapultPatchFile, _ := os.Open(catapultPatch)
catapultPatchFileInfo, _ := catapultPatchFile.Stat()
if catapultPatchFileInfo.Size() > 10 {
- if err := ApplyPatch(ctx, catapultPatch, catapultDir); err != nil {
+ if err := ApplyPatch(ctx, catapultPatch, catapultDir, gitExec); err != nil {
return fmt.Errorf("Could not apply Catapult's patch in %s: %s", catapultDir, err)
}
}
@@ -402,7 +405,7 @@
chromiumPatchFile, _ := os.Open(chromiumPatch)
chromiumPatchFileInfo, _ := chromiumPatchFile.Stat()
if chromiumPatchFileInfo.Size() > 10 {
- if err := ApplyPatch(ctx, chromiumPatch, chromiumSrcDir); err != nil {
+ if err := ApplyPatch(ctx, chromiumPatch, chromiumSrcDir, gitExec); err != nil {
return fmt.Errorf("Could not apply Chromium's patch in %s: %s", chromiumSrcDir, err)
}
}
diff --git a/ct/go/util/constants.go b/ct/go/util/constants.go
index fa40edd..cb5a283 100644
--- a/ct/go/util/constants.go
+++ b/ct/go/util/constants.go
@@ -52,7 +52,6 @@
BINARY_LUA_PICTURES = "lua_pictures"
BINARY_SKPINFO = "skpinfo"
BINARY_ADB = "adb"
- BINARY_GIT = "git"
BINARY_MAIL = "mail"
BINARY_LUA = "lua"
diff --git a/ct/go/util/ct_perf.go b/ct/go/util/ct_perf.go
index fe444ff..2d31cc1 100644
--- a/ct/go/util/ct_perf.go
+++ b/ct/go/util/ct_perf.go
@@ -80,7 +80,7 @@
// }
// }
//
-func AddCTRunDataToPerf(ctx context.Context, groupName, runID, pathToCSVResults string, gs *GcsUtil) error {
+func AddCTRunDataToPerf(ctx context.Context, groupName, runID, pathToCSVResults, gitExec string, gs *GcsUtil) error {
// Set uniqueID and create the workdir.
uniqueID := fmt.Sprintf("%s-%d", runID, time.Now().Unix())
workdir := path.Join(CTPerfWorkDir, uniqueID)
@@ -96,7 +96,7 @@
if err != nil {
return skerr.Fmt("Could not create %s checkout in %s: %s", CT_PERF_REPO, tmpDir, err)
}
- hash, err := commitToSyntheticRepo(ctx, groupName, uniqueID, checkout)
+ hash, err := commitToSyntheticRepo(ctx, groupName, uniqueID, gitExec, checkout)
if err != nil {
return skerr.Fmt("Could not commit to %s: %s", CT_PERF_REPO, err)
}
@@ -131,7 +131,7 @@
// commitToSyntheticRepo creates a file with the same name as the uniqueID and commits
// it into the specified repo. Returns the full hash of the commit.
-func commitToSyntheticRepo(ctx context.Context, groupName, uniqueID string, checkout *git.Checkout) (string, error) {
+func commitToSyntheticRepo(ctx context.Context, groupName, uniqueID, gitExec string, checkout *git.Checkout) (string, error) {
// Create a new file using the uniqueID and commit it to the synthetic repo.
if err := ioutil.WriteFile(filepath.Join(checkout.Dir(), uniqueID), []byte(uniqueID), 0644); err != nil {
return "", skerr.Fmt("Failed to write %s: %s", uniqueID, err)
@@ -141,7 +141,7 @@
}
output := bytes.Buffer{}
cmd := exec.Command{
- Name: "git",
+ Name: gitExec,
Args: []string{"commit", "-m", fmt.Sprintf("Commit for %s by %s", groupName, uniqueID)},
Dir: checkout.Dir(),
InheritEnv: true,
diff --git a/ct/go/util/ct_perf_test.go b/ct/go/util/ct_perf_test.go
index 997fdc2..4f1174f 100644
--- a/ct/go/util/ct_perf_test.go
+++ b/ct/go/util/ct_perf_test.go
@@ -51,7 +51,9 @@
require.NoError(t, err)
// Commit to the synthetic repo
- hash, err := commitToSyntheticRepo(ctx, TEST_GROUP_NAME, TEST_UNIQUE_ID, checkout)
+ gitExec, err := git.Executable(ctx)
+ require.NoError(t, err)
+ hash, err := commitToSyntheticRepo(ctx, TEST_GROUP_NAME, TEST_UNIQUE_ID, gitExec, checkout)
require.NoError(t, err)
// Make sure email and name are correctly set.
diff --git a/ct/go/util/util.go b/ct/go/util/util.go
index 2ba6926..62a079b 100644
--- a/ct/go/util/util.go
+++ b/ct/go/util/util.go
@@ -22,6 +22,7 @@
"sync"
"time"
+ "go.skia.org/infra/go/cipd"
"go.skia.org/infra/go/common"
"go.skia.org/infra/go/exec"
"go.skia.org/infra/go/fileutil"
@@ -74,7 +75,7 @@
// SyncDir runs "git pull" and "gclient sync" on the specified directory.
// The revisions map enforces revision/hash for the solutions with the format
// branch@rev.
-func SyncDir(ctx context.Context, dir string, revisions map[string]string, additionalArgs []string) error {
+func SyncDir(ctx context.Context, dir string, revisions map[string]string, additionalArgs []string, gitExec string) error {
err := os.Chdir(dir)
if err != nil {
return fmt.Errorf("Could not chdir to %s: %s", dir, err)
@@ -85,7 +86,7 @@
sklog.Warningf("%d. retry for syncing %s", i, dir)
}
- err = syncDirStep(ctx, revisions, additionalArgs)
+ err = syncDirStep(ctx, revisions, additionalArgs, gitExec)
if err == nil {
break
}
@@ -98,8 +99,8 @@
return err
}
-func syncDirStep(ctx context.Context, revisions map[string]string, additionalArgs []string) error {
- err := ExecuteCmd(ctx, BINARY_GIT, []string{"pull"}, []string{}, GIT_PULL_TIMEOUT, nil, nil)
+func syncDirStep(ctx context.Context, revisions map[string]string, additionalArgs []string, gitExec string) error {
+ err := ExecuteCmd(ctx, gitExec, []string{"pull"}, []string{}, GIT_PULL_TIMEOUT, nil, nil)
if err != nil {
return fmt.Errorf("Error running git pull: %s", err)
}
@@ -180,37 +181,37 @@
}
// ResetCheckout resets the specified Git checkout.
-func ResetCheckout(ctx context.Context, dir, resetTo, checkoutArg string) error {
+func ResetCheckout(ctx context.Context, dir, resetTo, checkoutArg, gitExec string) error {
if err := os.Chdir(dir); err != nil {
return fmt.Errorf("Could not chdir to %s: %s", dir, err)
}
// Clear out remnants of incomplete rebases from .git/rebase-apply.
rebaseArgs := []string{"rebase", "--abort"}
- util.LogErr(ExecuteCmd(ctx, BINARY_GIT, rebaseArgs, []string{}, GIT_REBASE_TIMEOUT, nil, nil))
+ util.LogErr(ExecuteCmd(ctx, gitExec, rebaseArgs, []string{}, GIT_REBASE_TIMEOUT, nil, nil))
// Checkout the specified branch or argument (eg: --detach).
checkoutArgs := []string{"checkout", checkoutArg}
- util.LogErr(ExecuteCmd(ctx, BINARY_GIT, checkoutArgs, []string{}, GIT_CHECKOUT_TIMEOUT, nil, nil))
+ util.LogErr(ExecuteCmd(ctx, gitExec, checkoutArgs, []string{}, GIT_CHECKOUT_TIMEOUT, nil, nil))
// Run "git reset --hard HEAD"
resetArgs := []string{"reset", "--hard", resetTo}
- util.LogErr(ExecuteCmd(ctx, BINARY_GIT, resetArgs, []string{}, GIT_RESET_TIMEOUT, nil, nil))
+ util.LogErr(ExecuteCmd(ctx, gitExec, resetArgs, []string{}, GIT_RESET_TIMEOUT, nil, nil))
// Run "git clean -f"
// Not doing "-d" here because it can delete directories like "/android_build_tools/aapt2/lib64/"
// even if "/android_build_tools/aapt2/lib64/*.so" is in .gitignore.
cleanArgs := []string{"clean", "-f"}
- util.LogErr(ExecuteCmd(ctx, BINARY_GIT, cleanArgs, []string{}, GIT_CLEAN_TIMEOUT, nil, nil))
+ util.LogErr(ExecuteCmd(ctx, gitExec, cleanArgs, []string{}, GIT_CLEAN_TIMEOUT, nil, nil))
return nil
}
// ApplyPatch applies a patch to a Git checkout.
-func ApplyPatch(ctx context.Context, patch, dir string) error {
+func ApplyPatch(ctx context.Context, patch, dir, gitExec string) error {
if err := os.Chdir(dir); err != nil {
return fmt.Errorf("Could not chdir to %s: %s", dir, err)
}
// Run "git apply --index -p1 --verbose --ignore-whitespace
// --ignore-space-change ${PATCH_FILE}"
args := []string{"apply", "--index", "-p1", "--verbose", "--ignore-whitespace", "--ignore-space-change", patch}
- return ExecuteCmd(ctx, BINARY_GIT, args, []string{}, GIT_APPLY_TIMEOUT, nil, nil)
+ return ExecuteCmd(ctx, gitExec, args, []string{}, GIT_APPLY_TIMEOUT, nil, nil)
}
// CleanTmpDir deletes all tmp files from the caller because telemetry tends to
@@ -518,7 +519,7 @@
// Trigger and collect swarming tasks.
for taskMap := range chTasks {
// Trigger swarming using the isolate hashes.
- tasks, err := s.TriggerSwarmingTasks(ctx, taskMap, dimensions, map[string]string{"runid": runID}, []string{}, priority, 7*24*time.Hour, hardTimeout, ioTimeout, false, true, getServiceAccount(dimensions))
+ tasks, err := s.TriggerSwarmingTasks(ctx, taskMap, dimensions, map[string]string{"runid": runID}, map[string]string{}, []string{}, priority, 7*24*time.Hour, hardTimeout, ioTimeout, false, true, getServiceAccount(dimensions))
if err != nil {
return numTasks, fmt.Errorf("Could not trigger swarming tasks: %s", err)
}
@@ -538,7 +539,7 @@
return
}
sklog.Infof("Retrying task %s with high priority %d", task.Title, TASKS_PRIORITY_HIGH)
- retryTask, err := s.TriggerSwarmingTasks(ctx, map[string]string{task.Title: tasksToHashes[task.Title]}, dimensions, map[string]string{"runid": runID}, []string{}, TASKS_PRIORITY_HIGH, 7*24*time.Hour, hardTimeout, ioTimeout, false, true, getServiceAccount(dimensions))
+ retryTask, err := s.TriggerSwarmingTasks(ctx, map[string]string{task.Title: tasksToHashes[task.Title]}, dimensions, map[string]string{"runid": runID}, map[string]string{}, []string{}, TASKS_PRIORITY_HIGH, 7*24*time.Hour, hardTimeout, ioTimeout, false, true, getServiceAccount(dimensions))
if err != nil {
sklog.Errorf("Could not trigger retry of task %s: %s", task.Title, err)
return
@@ -1011,8 +1012,8 @@
if err != nil {
return "", fmt.Errorf("Could not batch archive target: %s", err)
}
- // Trigger swarming using the isolate hash.
- tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, map[string]string{"runid": runID}, []string{}, swarming.RECOMMENDED_PRIORITY, 2*24*time.Hour, hardTimeout, ioTimeout, false, true, getServiceAccount(dimensions))
+ // Trigger swarming using the isolate hash. Specify CIPD git packages to use for isolate telemetry's git operations.
+ tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, map[string]string{"runid": runID}, map[string]string{"PATH": "cipd_bin_packages"}, cipd.GetStrCIPDPkgs(cipd.PkgsGit), swarming.RECOMMENDED_PRIORITY, 2*24*time.Hour, hardTimeout, ioTimeout, false, true, getServiceAccount(dimensions))
if err != nil {
return "", fmt.Errorf("Could not trigger swarming task: %s", err)
}
@@ -1064,9 +1065,9 @@
if err != nil {
return "", fmt.Errorf("Could not batch archive target: %s", err)
}
- // Trigger swarming using the isolate hash.
+ // Trigger swarming using the isolate hash. Specify CIPD git packages to use for the master script's git operations.
dimensions := GCE_LINUX_MASTER_DIMENSIONS
- tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, map[string]string{"runid": runID}, nil, swarming.RECOMMENDED_PRIORITY, 7*24*time.Hour, 3*24*time.Hour, 3*24*time.Hour, false, true, getServiceAccount(dimensions))
+ tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, map[string]string{"runid": runID}, map[string]string{"PATH": "cipd_bin_packages"}, cipd.GetStrCIPDPkgs(cipd.PkgsGit), swarming.RECOMMENDED_PRIORITY, 7*24*time.Hour, 3*24*time.Hour, 3*24*time.Hour, false, true, getServiceAccount(dimensions))
if err != nil {
return "", fmt.Errorf("Could not trigger swarming task: %s", err)
}
@@ -1118,6 +1119,8 @@
if err != nil {
return nil, fmt.Errorf("Could not batch archive target: %s", err)
}
+ // Specify CIPD git packages to use for build repo's git operations.
+ cipdPackages = append(cipdPackages, cipd.GetStrCIPDPkgs(cipd.PkgsGit)...)
// Trigger swarming using the isolate hash.
var dimensions map[string]string
if targetPlatform == PLATFORM_WINDOWS {
@@ -1127,7 +1130,7 @@
} else {
dimensions = GCE_LINUX_BUILDER_DIMENSIONS
}
- tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, map[string]string{"runid": runID}, cipdPackages, swarming.RECOMMENDED_PRIORITY, 2*24*time.Hour, hardTimeout, ioTimeout, false, true, getServiceAccount(dimensions))
+ tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, map[string]string{"runid": runID}, map[string]string{"PATH": "cipd_bin_packages"}, cipdPackages, swarming.RECOMMENDED_PRIORITY, 2*24*time.Hour, hardTimeout, ioTimeout, false, true, getServiceAccount(dimensions))
if err != nil {
return nil, fmt.Errorf("Could not trigger swarming task: %s", err)
}
@@ -1166,7 +1169,7 @@
return written, nil
}
-func DownloadAndApplyPatch(ctx context.Context, patchName, localDir, remotePatchesDir, checkout string, gs *GcsUtil) error {
+func DownloadAndApplyPatch(ctx context.Context, patchName, localDir, remotePatchesDir, checkout, gitExec string, gs *GcsUtil) error {
patchLocalPath := filepath.Join(localDir, patchName)
patchRemotePath := filepath.Join(remotePatchesDir, patchName)
written, err := DownloadPatch(patchLocalPath, patchRemotePath, gs)
@@ -1175,7 +1178,7 @@
}
// Apply patch to the local checkout.
if written > 10 {
- if err := ApplyPatch(ctx, patchLocalPath, checkout); err != nil {
+ if err := ApplyPatch(ctx, patchLocalPath, checkout, gitExec); err != nil {
return fmt.Errorf("Could not apply patch in %s: %s", checkout, err)
}
}
diff --git a/ct/go/worker_scripts/build_repo/main.go b/ct/go/worker_scripts/build_repo/main.go
index 8c5d07e..64caebd 100644
--- a/ct/go/worker_scripts/build_repo/main.go
+++ b/ct/go/worker_scripts/build_repo/main.go
@@ -14,6 +14,8 @@
"go.skia.org/infra/ct/go/util"
"go.skia.org/infra/ct/go/worker_scripts/worker_common"
+ "go.skia.org/infra/go/git"
+ "go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/sklog"
skutil "go.skia.org/infra/go/util"
)
@@ -38,6 +40,12 @@
return errors.New("Must specify --out")
}
+ // Find git exec.
+ gitExec, err := git.Executable(ctx)
+ if err != nil {
+ return skerr.Wrap(err)
+ }
+
// Instantiate GcsUtil object.
gs, err := util.NewGcsUtil(nil)
if err != nil {
@@ -69,7 +77,7 @@
}
}
pathToPyFiles := util.GetPathToPyFiles(*worker_common.Local)
- chromiumHash, skiaHash, err := util.CreateChromiumBuildOnSwarming(ctx, *runID, *targetPlatform, chromiumHash, skiaHash, pathToPyFiles, applyPatches, *uploadSingleBuild)
+ chromiumHash, skiaHash, err := util.CreateChromiumBuildOnSwarming(ctx, *runID, *targetPlatform, chromiumHash, skiaHash, pathToPyFiles, gitExec, applyPatches, *uploadSingleBuild)
if err != nil {
return fmt.Errorf("Could not create chromium build: %s", err)
}
@@ -80,7 +88,7 @@
remoteDirs = append(remoteDirs, fmt.Sprintf("try-%s-withpatch", util.ChromiumBuildDir(chromiumHash, skiaHash, *runID)))
} else if *repoAndTarget == "skiaLuaPictures" {
// Sync Skia tree. Specify --nohooks otherwise this step could log errors.
- if err := util.SyncDir(ctx, util.SkiaTreeDir, map[string]string{}, []string{"--nohooks"}); err != nil {
+ if err := util.SyncDir(ctx, util.SkiaTreeDir, map[string]string{}, []string{"--nohooks"}, gitExec); err != nil {
return fmt.Errorf("Could not sync Skia: %s", err)
}
// Build lua_pictures.
@@ -96,7 +104,7 @@
remoteDirs = append(remoteDirs, *runID)
} else if *repoAndTarget == "skiaSKPInfo" {
// Sync Skia tree. Specify --nohooks otherwise this step could log errors.
- if err := util.SyncDir(ctx, util.SkiaTreeDir, map[string]string{}, []string{"--nohooks"}); err != nil {
+ if err := util.SyncDir(ctx, util.SkiaTreeDir, map[string]string{}, []string{"--nohooks"}, gitExec); err != nil {
return fmt.Errorf("Could not sync Skia: %s", err)
}
// Build skpinfo.
diff --git a/ct/go/worker_scripts/isolate_telemetry/main.go b/ct/go/worker_scripts/isolate_telemetry/main.go
index 9957828..6eb4717 100644
--- a/ct/go/worker_scripts/isolate_telemetry/main.go
+++ b/ct/go/worker_scripts/isolate_telemetry/main.go
@@ -15,7 +15,9 @@
"go.skia.org/infra/ct/go/util"
"go.skia.org/infra/ct/go/worker_scripts/worker_common"
+ "go.skia.org/infra/go/git"
"go.skia.org/infra/go/isolate"
+ "go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/sklog"
skutil "go.skia.org/infra/go/util"
)
@@ -41,6 +43,12 @@
return errors.New("Must specify --out")
}
+ // Find git exec.
+ gitExec, err := git.Executable(ctx)
+ if err != nil {
+ return skerr.Wrap(err)
+ }
+
// Instantiate GcsUtil object.
gs, err := util.NewGcsUtil(nil)
if err != nil {
@@ -60,7 +68,7 @@
}
}
pathToPyFiles := util.GetPathToPyFiles(*worker_common.Local)
- if err = util.CreateTelemetryIsolates(ctx, *runID, *chromiumHash, pathToPyFiles, applyPatches); err != nil {
+ if err = util.CreateTelemetryIsolates(ctx, *runID, *chromiumHash, pathToPyFiles, gitExec, applyPatches); err != nil {
return fmt.Errorf("Could not create telemetry isolates: %s", err)
}
diff --git a/go/cipd/cipd.go b/go/cipd/cipd.go
index bbb5b18..f495d20 100644
--- a/go/cipd/cipd.go
+++ b/go/cipd/cipd.go
@@ -61,6 +61,10 @@
Version string `json:"version"`
}
+func (p *Package) String() string {
+ return fmt.Sprintf("%s:%s:%s", p.Path, p.Name, p.Version)
+}
+
// GetPackage returns the definition for the package with the given name, or an
// error if the package does not exist in the registry.
func GetPackage(pkg string) (*Package, error) {
@@ -81,6 +85,16 @@
return rv
}
+// Utility function that returns CIPD packages as slice of strings. Created for
+// go/swarming, this can be removed when go/swarming has no more clients.
+func GetStrCIPDPkgs(pkgs []*Package) []string {
+ cipdPkgs := []string{}
+ for _, p := range pkgs {
+ cipdPkgs = append(cipdPkgs, p.String())
+ }
+ return cipdPkgs
+}
+
// Run "cipd ensure" to get the correct packages in the given location. Note
// that any previously-installed packages in the given rootDir will be removed
// if not specified again.
diff --git a/go/swarming/swarming.go b/go/swarming/swarming.go
index d54f9b1..db2b780 100644
--- a/go/swarming/swarming.go
+++ b/go/swarming/swarming.go
@@ -55,6 +55,7 @@
Expiration time.Duration
Idempotent bool
ServiceAccount string
+ EnvPrefixes map[string]string
TaskID string // Populated after the task is triggered.
}
@@ -96,6 +97,9 @@
for _, c := range t.CipdPackages {
triggerArgs = append(triggerArgs, "--cipd-package", c)
}
+ for k, v := range t.EnvPrefixes {
+ triggerArgs = append(triggerArgs, "--env-prefix", k, v)
+ }
if t.Idempotent {
triggerArgs = append(triggerArgs, "--idempotent")
}
@@ -278,7 +282,7 @@
}
// Trigger swarming using the specified hashes and dimensions.
-func (s *SwarmingClient) TriggerSwarmingTasks(ctx context.Context, tasksToHashes, dimensions, tags map[string]string, cipdPackages []string, priority int, expiration, hardTimeout, ioTimeout time.Duration, idempotent, addTaskNameAsTag bool, serviceAccount string) ([]*SwarmingTask, error) {
+func (s *SwarmingClient) TriggerSwarmingTasks(ctx context.Context, tasksToHashes, dimensions, tags, envPrefixes map[string]string, cipdPackages []string, priority int, expiration, hardTimeout, ioTimeout time.Duration, idempotent, addTaskNameAsTag bool, serviceAccount string) ([]*SwarmingTask, error) {
tasks := []*SwarmingTask{}
for taskName, hash := range tasksToHashes {
@@ -304,6 +308,7 @@
Expiration: expiration,
Idempotent: idempotent,
ServiceAccount: serviceAccount,
+ EnvPrefixes: envPrefixes,
}
if err := task.Trigger(ctx, s, hardTimeout, ioTimeout); err != nil {
return nil, fmt.Errorf("Could not trigger task %s: %s", taskName, err)
diff --git a/go/swarming/swarming_test.go b/go/swarming/swarming_test.go
index f843033..71523dc 100644
--- a/go/swarming/swarming_test.go
+++ b/go/swarming/swarming_test.go
@@ -115,7 +115,7 @@
// Trigger swarming using the isolate hashes.
dimensions := map[string]string{"pool": "Chrome"}
tags := map[string]string{"testing": "123"}
- tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, tags, []string{}, RECOMMENDED_PRIORITY, RECOMMENDED_EXPIRATION, RECOMMENDED_HARD_TIMEOUT, RECOMMENDED_IO_TIMEOUT, false, true, "")
+ tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, tags, map[string]string{}, []string{}, RECOMMENDED_PRIORITY, RECOMMENDED_EXPIRATION, RECOMMENDED_HARD_TIMEOUT, RECOMMENDED_IO_TIMEOUT, false, true, "")
require.NoError(t, err)
// Collect both output and file output of all tasks.
@@ -179,7 +179,7 @@
// Trigger swarming using the isolate hashes.
dimensions := map[string]string{"pool": "Chrome"}
tags := map[string]string{"testing": "123"}
- tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, tags, []string{}, RECOMMENDED_PRIORITY, RECOMMENDED_EXPIRATION, RECOMMENDED_HARD_TIMEOUT, RECOMMENDED_IO_TIMEOUT, false, false, "")
+ tasks, err := s.TriggerSwarmingTasks(ctx, tasksToHashes, dimensions, tags, map[string]string{}, []string{}, RECOMMENDED_PRIORITY, RECOMMENDED_EXPIRATION, RECOMMENDED_HARD_TIMEOUT, RECOMMENDED_IO_TIMEOUT, false, false, "")
require.NoError(t, err)
// Collect testTask1. It should have failed.