[bigtable] Consolidate to prod, staging, and internal instances

These are just my projects which use BigTable; the others should
eventually follow.

Bug: skia:
Change-Id: I61703ec99fef46e6138f427d725c74dac89bcb69
Reviewed-on: https://skia-review.googlesource.com/c/179251
Commit-Queue: Eric Boren <borenet@google.com>
Reviewed-by: Stephan Altmueller <stephana@google.com>
Reviewed-by: Ben Wagner <benjaminwagner@google.com>
diff --git a/datahopper/go/bot_metrics/bot_metrics.go b/datahopper/go/bot_metrics/bot_metrics.go
index 7775837..691d8a6 100644
--- a/datahopper/go/bot_metrics/bot_metrics.go
+++ b/datahopper/go/bot_metrics/bot_metrics.go
@@ -400,7 +400,7 @@
 }
 
 // Start initiates "average time to X% bot coverage" metrics data generation.
-func Start(ctx context.Context, taskDb db.TaskReader, workdir, recipesCfgFile, tasksCfgProject, tasksCfgInstance string, ts oauth2.TokenSource) error {
+func Start(ctx context.Context, taskDb db.TaskReader, workdir, recipesCfgFile, btProject, btInstance string, ts oauth2.TokenSource) error {
 	// Setup.
 	if err := os.MkdirAll(workdir, os.ModePerm); err != nil {
 		return err
@@ -418,7 +418,7 @@
 		return fmt.Errorf("Failed to sync depot_tools: %s", err)
 	}
 
-	tcc, err := specs.NewTaskCfgCache(ctx, repos, depotTools, path.Join(workdir, "taskCfgCache"), 1, tasksCfgProject, tasksCfgInstance, ts)
+	tcc, err := specs.NewTaskCfgCache(ctx, repos, depotTools, path.Join(workdir, "taskCfgCache"), 1, btProject, btInstance, ts)
 	if err != nil {
 		return fmt.Errorf("Failed to create TaskCfgCache: %s", err)
 	}
diff --git a/datahopper/go/datahopper/main.go b/datahopper/go/datahopper/main.go
index 15484e8..cc3f320 100644
--- a/datahopper/go/datahopper/main.go
+++ b/datahopper/go/datahopper/main.go
@@ -37,6 +37,10 @@
 
 // flags
 var (
+	// TODO(borenet): Combine btInstance, firestoreInstance, and
+	// pubsubTopicSet.
+	btInstance         = flag.String("bigtable_instance", "", "BigTable instance to use.")
+	btProject          = flag.String("bigtable_project", "", "GCE project to use for BigTable.")
 	firestoreInstance  = flag.String("firestore_instance", "", "Firestore instance to use, eg. \"prod\"")
 	local              = flag.Bool("local", false, "Running locally if true. As opposed to in production.")
 	promPort           = flag.String("prom_port", ":20000", "Metrics service address (e.g., ':10110')")
@@ -44,11 +48,9 @@
 	taskSchedulerDbUrl = flag.String("task_db_url", "http://skia-task-scheduler:8008/db/", "Where the Skia task scheduler database is hosted.")
 	workdir            = flag.String("workdir", ".", "Working directory used by data processors.")
 
-	perfBucket       = flag.String("perf_bucket", "skia-perf", "The GCS bucket that should be used for writing into perf")
-	perfPrefix       = flag.String("perf_duration_prefix", "task-duration", "The folder name in the bucket that task duration metric shoudl be written.")
-	pubsubTopicSet   = flag.String("pubsub_topic_set", "", fmt.Sprintf("Pubsub topic set; one of: %v", pubsub.VALID_TOPIC_SETS))
-	tasksCfgProject  = flag.String("tasks_cfg_project", "", "GCE project to use for tasks cfg cache.")
-	tasksCfgInstance = flag.String("tasks_cfg_instance", "", "BigTable instance to use for tasks cfg cache.")
+	perfBucket     = flag.String("perf_bucket", "skia-perf", "The GCS bucket that should be used for writing into perf")
+	perfPrefix     = flag.String("perf_duration_prefix", "task-duration", "The folder name in the bucket that task duration metric shoudl be written.")
+	pubsubTopicSet = flag.String("pubsub_topic_set", "", fmt.Sprintf("Pubsub topic set; one of: %v", pubsub.VALID_TOPIC_SETS))
 )
 
 var (
@@ -196,7 +198,7 @@
 	if *recipesCfgFile == "" {
 		*recipesCfgFile = path.Join(*workdir, "recipes.cfg")
 	}
-	if err := bot_metrics.Start(ctx, d, *workdir, *recipesCfgFile, *tasksCfgProject, *tasksCfgInstance, newTs); err != nil {
+	if err := bot_metrics.Start(ctx, d, *workdir, *recipesCfgFile, *btProject, *btInstance, newTs); err != nil {
 		sklog.Fatal(err)
 	}
 
diff --git a/datahopper/sys/datahopperd.service b/datahopper/sys/datahopperd.service
index 81a0cfe..cad45be 100644
--- a/datahopper/sys/datahopperd.service
+++ b/datahopper/sys/datahopperd.service
@@ -6,8 +6,8 @@
 [Service]
 ExecStart=/usr/local/bin/datahopper \
     --logtostderr \
-    --tasks_cfg_project=skia-public \
-    --tasks_cfg_instance=tasks-cfg-prod \
+    --bigtable_project=skia-public \
+    --bigtable_instance=tasks-cfg-prod \
     --workdir=/mnt/pd0/datahopper_workdir \
 Restart=always
 User=default
diff --git a/go/bt/testutil/testutil.go b/go/bt/testutil/testutil.go
new file mode 100644
index 0000000..78f94a8
--- /dev/null
+++ b/go/bt/testutil/testutil.go
@@ -0,0 +1,19 @@
+package bt_testutil
+
+import (
+	"fmt"
+
+	"github.com/google/uuid"
+	assert "github.com/stretchr/testify/require"
+	"go.skia.org/infra/go/bt"
+	"go.skia.org/infra/go/testutils"
+)
+
+func SetupBigTable(t testutils.TestingT, cfgs ...bt.TableConfig) (string, string, func()) {
+	project := "test-project"
+	instance := fmt.Sprintf("test-instance-%s", uuid.New())
+	assert.NoError(t, bt.InitBigtable(project, instance, cfgs...))
+	return project, instance, func() {
+		assert.NoError(t, bt.DeleteTables(project, instance, cfgs...))
+	}
+}
diff --git a/status/go/status/main.go b/status/go/status/main.go
index 4d9462b..6b2f5d4 100644
--- a/status/go/status/main.go
+++ b/status/go/status/main.go
@@ -88,6 +88,10 @@
 
 // flags
 var (
+	// TODO(borenet): Combine btInstance, firestoreInstance, and
+	// pubsubTopicSet.
+	btInstance                  = flag.String("bigtable_instance", "", "BigTable instance to use.")
+	btProject                   = flag.String("bigtable_project", "", "GCE project to use for BigTable.")
 	capacityRecalculateInterval = flag.Duration("capacity_recalculate_interval", 10*time.Minute, "How often to re-calculate capacity statistics.")
 	firestoreInstance           = flag.String("firestore_instance", "", "Firestore instance to use, eg. \"prod\"")
 	host                        = flag.String("host", "localhost", "HTTP service host")
@@ -96,8 +100,6 @@
 	repoUrls                    = common.NewMultiStringFlag("repo", nil, "Repositories to query for status.")
 	resourcesDir                = flag.String("resources_dir", "", "The directory to find templates, JS, and CSS files. If blank the current directory will be used.")
 	swarmingUrl                 = flag.String("swarming_url", "https://chromium-swarm.appspot.com", "URL of the Swarming server.")
-	tasksCfgProject             = flag.String("tasks_cfg_project", "", "GCE project to use for tasks cfg cache.")
-	tasksCfgInstance            = flag.String("tasks_cfg_instance", "", "BigTable instance to use for tasks cfg cache.")
 	taskSchedulerDbUrl          = flag.String("task_db_url", "http://skia-task-scheduler:8008/db/", "Where the Skia task scheduler database is hosted.")
 	taskSchedulerUrl            = flag.String("task_scheduler_url", "https://task-scheduler.skia.org", "URL of the Task Scheduler server.")
 	testing                     = flag.Bool("testing", false, "Set to true for locally testing rules. No email will be sent.")
@@ -775,7 +777,7 @@
 	sklog.Info("Checkout complete")
 
 	// Cache for buildProgressHandler.
-	tasksPerCommit, err = newTasksPerCommitCache(ctx, *workdir, *repoUrls, 14*24*time.Hour, *tasksCfgProject, *tasksCfgInstance, ts)
+	tasksPerCommit, err = newTasksPerCommitCache(ctx, *workdir, *repoUrls, 14*24*time.Hour, *btProject, *btInstance, ts)
 	if err != nil {
 		sklog.Fatalf("Failed to create tasksPerCommitCache: %s", err)
 	}
@@ -849,12 +851,11 @@
 	})
 
 	// Create the TaskDriver DB.
-	btProject := "skia-public"
-	taskDriverDb, err = bigtable_db.NewBigTableDB(ctx, btProject, bigtable_db.BT_INSTANCE, ts)
+	taskDriverDb, err = bigtable_db.NewBigTableDB(ctx, *btProject, *btInstance, ts)
 	if err != nil {
 		sklog.Fatal(err)
 	}
-	taskDriverLogs, err = logs.NewLogsManager(ctx, btProject, logs.BT_INSTANCE, ts)
+	taskDriverLogs, err = logs.NewLogsManager(ctx, *btProject, *btInstance, ts)
 	if err != nil {
 		sklog.Fatal(err)
 	}
diff --git a/status/go/status/tasks_per_commit.go b/status/go/status/tasks_per_commit.go
index 4a648ab..32340a5 100644
--- a/status/go/status/tasks_per_commit.go
+++ b/status/go/status/tasks_per_commit.go
@@ -29,7 +29,7 @@
 }
 
 // newTasksPerCommitCache returns a tasksPerCommitCache instance.
-func newTasksPerCommitCache(ctx context.Context, workdir string, repoUrls []string, period time.Duration, tasksCfgProject, tasksCfgInstance string, ts oauth2.TokenSource) (*tasksPerCommitCache, error) {
+func newTasksPerCommitCache(ctx context.Context, workdir string, repoUrls []string, period time.Duration, btProject, btInstance string, ts oauth2.TokenSource) (*tasksPerCommitCache, error) {
 	wd := path.Join(workdir, "tasksPerCommitCache")
 	if _, err := os.Stat(wd); err != nil {
 		if os.IsNotExist(err) {
@@ -52,7 +52,7 @@
 		return nil, err
 	}
 	gitCache := path.Join(wd, "cache")
-	tcc, err := specs.NewTaskCfgCache(ctx, repos, depotTools.Dir(), gitCache, 3, tasksCfgProject, tasksCfgInstance, ts)
+	tcc, err := specs.NewTaskCfgCache(ctx, repos, depotTools.Dir(), gitCache, 3, btProject, btInstance, ts)
 	if err != nil {
 		return nil, err
 	}
diff --git a/status/go/statusk/main.go b/status/go/statusk/main.go
index 0a48154..9e3660b 100644
--- a/status/go/statusk/main.go
+++ b/status/go/statusk/main.go
@@ -94,6 +94,10 @@
 
 // flags
 var (
+	// TODO(borenet): Combine btInstance, firestoreInstance, and
+	// pubsubTopicSet.
+	btInstance                  = flag.String("bigtable_instance", "", "BigTable instance to use.")
+	btProject                   = flag.String("bigtable_project", "", "GCE project to use for BigTable.")
 	capacityRecalculateInterval = flag.Duration("capacity_recalculate_interval", 10*time.Minute, "How often to re-calculate capacity statistics.")
 	firestoreInstance           = flag.String("firestore_instance", "", "Firestore instance to use, eg. \"prod\"")
 	host                        = flag.String("host", "localhost", "HTTP service host")
@@ -104,8 +108,6 @@
 	resourcesDir                = flag.String("resources_dir", "", "The directory to find templates, JS, and CSS files. If blank the current directory will be used.")
 	chromeInfraAuthJWT          = flag.String("service_account_jwt", "/var/secrets/skia-public-auth/key.json", "The JWT key for the service account that has access to chrome infra auth.")
 	swarmingUrl                 = flag.String("swarming_url", "https://chromium-swarm.appspot.com", "URL of the Swarming server.")
-	tasksCfgProject             = flag.String("tasks_cfg_project", "", "GCE project to use for tasks cfg cache.")
-	tasksCfgInstance            = flag.String("tasks_cfg_instance", "", "BigTable instance to use for tasks cfg cache.")
 	taskSchedulerDbUrl          = flag.String("task_db_url", "https://task-scheduler.skia.org/db/", "Where the Skia task scheduler database is hosted.")
 	taskSchedulerUrl            = flag.String("task_scheduler_url", "https://task-scheduler.skia.org", "URL of the Task Scheduler server.")
 	testing                     = flag.Bool("testing", false, "Set to true for locally testing rules. No email will be sent.")
@@ -780,7 +782,7 @@
 	sklog.Info("Checkout complete")
 
 	// Cache for buildProgressHandler.
-	tasksPerCommit, err = newTasksPerCommitCache(ctx, *workdir, *recipesCfgFile, repos, 14*24*time.Hour, *tasksCfgProject, *tasksCfgInstance, ts)
+	tasksPerCommit, err = newTasksPerCommitCache(ctx, *workdir, *recipesCfgFile, repos, 14*24*time.Hour, *btProject, *btInstance, ts)
 	if err != nil {
 		sklog.Fatalf("Failed to create tasksPerCommitCache: %s", err)
 	}
@@ -854,12 +856,11 @@
 	})
 
 	// Create the TaskDriver DB.
-	btProject := "skia-public"
-	taskDriverDb, err = bigtable_db.NewBigTableDB(ctx, btProject, bigtable_db.BT_INSTANCE, ts)
+	taskDriverDb, err = bigtable_db.NewBigTableDB(ctx, *btProject, *btInstance, ts)
 	if err != nil {
 		sklog.Fatal(err)
 	}
-	taskDriverLogs, err = logs.NewLogsManager(ctx, btProject, logs.BT_INSTANCE, ts)
+	taskDriverLogs, err = logs.NewLogsManager(ctx, *btProject, *btInstance, ts)
 	if err != nil {
 		sklog.Fatal(err)
 	}
diff --git a/status/go/statusk/tasks_per_commit.go b/status/go/statusk/tasks_per_commit.go
index 3c28ddc..d70a669 100644
--- a/status/go/statusk/tasks_per_commit.go
+++ b/status/go/statusk/tasks_per_commit.go
@@ -28,7 +28,7 @@
 }
 
 // newTasksPerCommitCache returns a tasksPerCommitCache instance.
-func newTasksPerCommitCache(ctx context.Context, workdir, recipesCfgFile string, repos repograph.Map, period time.Duration, tasksCfgProject, tasksCfgInstance string, ts oauth2.TokenSource) (*tasksPerCommitCache, error) {
+func newTasksPerCommitCache(ctx context.Context, workdir, recipesCfgFile string, repos repograph.Map, period time.Duration, btProject, btInstance string, ts oauth2.TokenSource) (*tasksPerCommitCache, error) {
 	wd := path.Join(workdir, "tasksPerCommitCache")
 	if _, err := os.Stat(wd); err != nil {
 		if os.IsNotExist(err) {
@@ -44,7 +44,7 @@
 		return nil, err
 	}
 	gitCache := path.Join(wd, "cache")
-	tcc, err := specs.NewTaskCfgCache(ctx, repos, depotTools, gitCache, 3, tasksCfgProject, tasksCfgInstance, ts)
+	tcc, err := specs.NewTaskCfgCache(ctx, repos, depotTools, gitCache, 3, btProject, btInstance, ts)
 	if err != nil {
 		return nil, err
 	}
diff --git a/status/sys/status-internal.service b/status/sys/status-internal.service
index b265e78..e824a15 100644
--- a/status/sys/status-internal.service
+++ b/status/sys/status-internal.service
@@ -6,6 +6,8 @@
 
 [Service]
 ExecStart=/usr/local/bin/status \
+    --bigtable_project=skia-corp \
+    --bigtable_instance=internal \
     --logtostderr \
     --workdir=/mnt/pd0/status_workdir \
     --use_metadata=true \
@@ -17,9 +19,7 @@
     --repo=https://skia.googlesource.com/internal_test.git \
     --swarming_url=https://chrome-swarming.appspot.com \
     --task_scheduler_url=https://skia-task-scheduler-internal-8000-proxy.skia.org \
-    --task_db_url=http://skia-task-scheduler-internal:8008/db/ \
-    --tasks_cfg_project=skia-corp \
-    --tasks_cfg_instance=tasks-cfg-internal
+    --task_db_url=http://skia-task-scheduler-internal:8008/db/
 Restart=always
 User=default
 Group=default
diff --git a/status/sys/status-staging.service b/status/sys/status-staging.service
index d2ac273..0b85400 100644
--- a/status/sys/status-staging.service
+++ b/status/sys/status-staging.service
@@ -6,6 +6,8 @@
 
 [Service]
 ExecStart=/usr/local/bin/status \
+    --bigtable_project=skia-public \
+    --bigtable_instance=staging \
     --logtostderr \
     --workdir=/mnt/pd0/status_workdir \
     --use_metadata=true \
@@ -17,9 +19,7 @@
     --repo=https://skia.googlesource.com/skiabot-test.git \
     --swarming_url=https://chromium-swarm-dev.appspot.com \
     --task_scheduler_url=https://task-scheduler-staging.skia.org \
-    --task_db_url=http://skia-task-scheduler-staging:8008/db/ \
-    --tasks_cfg_project=skia-public \
-    --tasks_cfg_instance=tasks-cfg-staging
+    --task_db_url=http://skia-task-scheduler-staging:8008/db/
 Restart=always
 User=default
 Group=default
diff --git a/status/sys/statusd.service b/status/sys/statusd.service
index d30e3d4..24dd1d4 100644
--- a/status/sys/statusd.service
+++ b/status/sys/statusd.service
@@ -6,6 +6,8 @@
 
 [Service]
 ExecStart=/usr/local/bin/status \
+    --bigtable_project=skia-public \
+    --bigtable_instance=production \
     --logtostderr \
     --workdir=/mnt/pd0/status_workdir \
     --use_metadata=true \
@@ -16,9 +18,7 @@
     --repo=https://skia.googlesource.com/skia.git \
     --repo=https://skia.googlesource.com/buildbot.git \
     --repo=https://skia.googlesource.com/lottie-ci.git \
-    --task_db_url=http://skia-task-scheduler:8008/db/ \
-    --tasks_cfg_project=skia-public \
-    --tasks_cfg_instance=tasks-cfg-prod
+    --task_db_url=http://skia-task-scheduler:8008/db/
 Restart=always
 User=default
 Group=default
diff --git a/task_driver/go/db/bigtable/bigtable.go b/task_driver/go/db/bigtable/bigtable.go
index 5b23a7f..1fabe57 100644
--- a/task_driver/go/db/bigtable/bigtable.go
+++ b/task_driver/go/db/bigtable/bigtable.go
@@ -20,9 +20,6 @@
 )
 
 const (
-	// We use a single BigTable instance for Task Drivers per project.
-	BT_INSTANCE = "task-driver"
-
 	// We use a single BigTable table for storing Task Driver runs.
 	BT_TABLE = "task-driver-runs"
 
diff --git a/task_driver/go/db/bigtable/bigtable_test.go b/task_driver/go/db/bigtable/bigtable_test.go
index 7a71ea7..c2eade5 100644
--- a/task_driver/go/db/bigtable/bigtable_test.go
+++ b/task_driver/go/db/bigtable/bigtable_test.go
@@ -6,26 +6,24 @@
 
 	assert "github.com/stretchr/testify/require"
 	"go.skia.org/infra/go/bt"
+	bt_testutil "go.skia.org/infra/go/bt/testutil"
 	"go.skia.org/infra/go/testutils"
 	"go.skia.org/infra/task_driver/go/db"
 )
 
 func setup(t *testing.T) (db.DB, func()) {
 	testutils.LargeTest(t)
-	project := "test-project"
-	instance := "test-instance"
-
-	// Set up the table and column families.
-	assert.NoError(t, bt.InitBigtable(project, instance, bt.TableConfig{
+	project, instance, cleanup := bt_testutil.SetupBigTable(t, bt.TableConfig{
 		BT_TABLE: {
 			BT_COLUMN_FAMILY,
 		},
-	}))
+	})
 
 	d, err := NewBigTableDB(context.Background(), project, instance, nil)
 	assert.NoError(t, err)
 	return d, func() {
 		testutils.AssertCloses(t, d)
+		cleanup()
 	}
 }
 
diff --git a/task_driver/go/logs/logs.go b/task_driver/go/logs/logs.go
index a141ad6..ed2d718 100644
--- a/task_driver/go/logs/logs.go
+++ b/task_driver/go/logs/logs.go
@@ -22,9 +22,6 @@
 )
 
 const (
-	// We use a single BigTable instance for all Task Driver logs.
-	BT_INSTANCE = "task-driver"
-
 	// We use a single BigTable table for storing logs.
 	BT_TABLE = "task-driver-logs"
 
diff --git a/task_driver/go/task-driver-server/main.go b/task_driver/go/task-driver-server/main.go
index 73f00aa..bd8e66b 100644
--- a/task_driver/go/task-driver-server/main.go
+++ b/task_driver/go/task-driver-server/main.go
@@ -38,6 +38,8 @@
 
 var (
 	// Flags.
+	btInstance   = flag.String("bigtable_instance", "", "BigTable instance to use.")
+	btProject    = flag.String("bigtable_project", "", "GCE project to use for BigTable.")
 	host         = flag.String("host", "localhost", "HTTP service host")
 	local        = flag.Bool("local", false, "Running locally if true. As opposed to in production.")
 	port         = flag.String("port", ":8000", "HTTP service port (e.g., ':8000')")
@@ -328,14 +330,11 @@
 	if err != nil {
 		sklog.Fatal(err)
 	}
-	// We read TaskDrivers from *project, but the BigTable instance is
-	// actually in skia-public.
-	btProject := "skia-public"
-	d, err = bigtable_db.NewBigTableDB(ctx, btProject, bigtable_db.BT_INSTANCE, ts)
+	d, err = bigtable_db.NewBigTableDB(ctx, *btProject, *btInstance, ts)
 	if err != nil {
 		sklog.Fatal(err)
 	}
-	lm, err = logs.NewLogsManager(ctx, btProject, logs.BT_INSTANCE, ts)
+	lm, err = logs.NewLogsManager(ctx, *btProject, *btInstance, ts)
 	if err != nil {
 		sklog.Fatal(err)
 	}
diff --git a/task_driver/setup-bigtable.sh b/task_driver/setup-bigtable.sh
index ecb8418..148fa1e 100755
--- a/task_driver/setup-bigtable.sh
+++ b/task_driver/setup-bigtable.sh
@@ -3,13 +3,15 @@
 set -e -x
 
 PROJECT="${PROJECT:-skia-public}"
-BIGTABLE_INSTANCE="${BIGTABLE_INSTANCE:-task-driver}"
 PUBSUB_TOPIC="${PUBSUB_TOPIC:-task-driver-logs}"
 LOG_NAME="${LOG_NAME:-task-driver}"
 
 # Set up BigTable tables and column families.
 go get -u cloud.google.com/go/bigtable/cmd/cbt
 
+PROJECT="skia-public"
+BIGTABLE_INSTANCE="production"
+
 BIGTABLE_TABLE="task-driver-runs"
 BIGTABLE_COLUMN_FAMILY="MSGS"
 cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
@@ -20,6 +22,32 @@
 cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
 cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createfamily ${BIGTABLE_TABLE} ${BIGTABLE_COLUMN_FAMILY}
 
+BIGTABLE_INSTANCE="staging"
+
+BIGTABLE_TABLE="task-driver-runs"
+BIGTABLE_COLUMN_FAMILY="MSGS"
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createfamily ${BIGTABLE_TABLE} ${BIGTABLE_COLUMN_FAMILY}
+
+BIGTABLE_TABLE="task-driver-logs"
+BIGTABLE_COLUMN_FAMILY="LOGS"
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createfamily ${BIGTABLE_TABLE} ${BIGTABLE_COLUMN_FAMILY}
+
+PROJECT="skia-corp"
+BIGTABLE_INSTANCE="internal"
+
+BIGTABLE_TABLE="task-driver-runs"
+BIGTABLE_COLUMN_FAMILY="MSGS"
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createfamily ${BIGTABLE_TABLE} ${BIGTABLE_COLUMN_FAMILY}
+
+BIGTABLE_TABLE="task-driver-logs"
+BIGTABLE_COLUMN_FAMILY="LOGS"
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createfamily ${BIGTABLE_TABLE} ${BIGTABLE_COLUMN_FAMILY}
+
+
 # Set up logs export to pubsub.
 gcloud --project=${PROJECT} logging sinks create task-driver-logs-to-pubsub \
     pubsub.googleapis.com/projects/${PROJECT}/topics/${PUBSUB_TOPIC} \
diff --git a/task_scheduler/go/scheduling/task_scheduler.go b/task_scheduler/go/scheduling/task_scheduler.go
index c770390..3f0aa12 100644
--- a/task_scheduler/go/scheduling/task_scheduler.go
+++ b/task_scheduler/go/scheduling/task_scheduler.go
@@ -126,7 +126,7 @@
 	workdir        string
 }
 
-func NewTaskScheduler(ctx context.Context, d db.DB, bl *blacklist.Blacklist, period time.Duration, numCommits int, workdir, host string, repos repograph.Map, isolateClient *isolate.Client, swarmingClient swarming.ApiClient, c *http.Client, timeDecayAmt24Hr float64, buildbucketApiUrl, trybotBucket string, projectRepoMapping map[string]string, pools []string, pubsubTopic, depotTools string, gerrit gerrit.GerritInterface, tasksCfgProject, tasksCfgInstance string, ts oauth2.TokenSource) (*TaskScheduler, error) {
+func NewTaskScheduler(ctx context.Context, d db.DB, bl *blacklist.Blacklist, period time.Duration, numCommits int, workdir, host string, repos repograph.Map, isolateClient *isolate.Client, swarmingClient swarming.ApiClient, c *http.Client, timeDecayAmt24Hr float64, buildbucketApiUrl, trybotBucket string, projectRepoMapping map[string]string, pools []string, pubsubTopic, depotTools string, gerrit gerrit.GerritInterface, btProject, btInstance string, ts oauth2.TokenSource) (*TaskScheduler, error) {
 	// Repos must be updated before window is initialized; otherwise the repos may be uninitialized,
 	// resulting in the window being too short, causing the caches to be loaded with incomplete data.
 	for _, r := range repos {
@@ -150,7 +150,7 @@
 		return nil, fmt.Errorf("Failed to create JobCache: %s", err)
 	}
 
-	taskCfgCache, err := specs.NewTaskCfgCache(ctx, repos, depotTools, path.Join(workdir, "taskCfgCache"), specs.DEFAULT_NUM_WORKERS, tasksCfgProject, tasksCfgInstance, ts)
+	taskCfgCache, err := specs.NewTaskCfgCache(ctx, repos, depotTools, path.Join(workdir, "taskCfgCache"), specs.DEFAULT_NUM_WORKERS, btProject, btInstance, ts)
 	if err != nil {
 		return nil, fmt.Errorf("Failed to create TaskCfgCache: %s", err)
 	}
diff --git a/task_scheduler/go/scheduling/task_scheduler_test.go b/task_scheduler/go/scheduling/task_scheduler_test.go
index 9646dd3..8ac2804 100644
--- a/task_scheduler/go/scheduling/task_scheduler_test.go
+++ b/task_scheduler/go/scheduling/task_scheduler_test.go
@@ -190,7 +190,6 @@
 func setup(t *testing.T) (context.Context, *git_testutils.GitBuilder, db.DB, *swarming_testutils.TestClient, *TaskScheduler, *mockhttpclient.URLMock, func()) {
 	testutils.LargeTest(t)
 
-	specs_testutils.SetupBigTable(t)
 	ctx, gb, _, _ := specs_testutils.SetupTestRepo(t)
 
 	tmp, err := ioutil.TempDir("", "")
diff --git a/task_scheduler/go/specs/specs.go b/task_scheduler/go/specs/specs.go
index e3dc18d..dbed268 100644
--- a/task_scheduler/go/specs/specs.go
+++ b/task_scheduler/go/specs/specs.go
@@ -443,8 +443,8 @@
 }
 
 // NewTaskCfgCache returns a TaskCfgCache instance.
-func NewTaskCfgCache(ctx context.Context, repos repograph.Map, depotToolsDir, workdir string, numWorkers int, project, instance string, ts oauth2.TokenSource) (*TaskCfgCache, error) {
-	client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(ts))
+func NewTaskCfgCache(ctx context.Context, repos repograph.Map, depotToolsDir, workdir string, numWorkers int, btProject, btInstance string, ts oauth2.TokenSource) (*TaskCfgCache, error) {
+	client, err := bigtable.NewClient(ctx, btProject, btInstance, option.WithTokenSource(ts))
 	if err != nil {
 		return nil, fmt.Errorf("Failed to create BigTable client: %s", err)
 	}
diff --git a/task_scheduler/go/specs/testutils/testutils.go b/task_scheduler/go/specs/testutils/testutils.go
index a81ed6c..b11d718 100644
--- a/task_scheduler/go/specs/testutils/testutils.go
+++ b/task_scheduler/go/specs/testutils/testutils.go
@@ -2,12 +2,10 @@
 
 import (
 	"context"
-	"fmt"
 	"time"
 
-	"github.com/google/uuid"
-	assert "github.com/stretchr/testify/require"
 	"go.skia.org/infra/go/bt"
+	bt_testutil "go.skia.org/infra/go/bt/testutil"
 	git_testutils "go.skia.org/infra/go/git/testutils"
 	"go.skia.org/infra/go/testutils"
 )
@@ -218,20 +216,15 @@
 }
 
 // SetupBigTable performs setup for the TaskCfgCache in BigTable. Returns the
-// project and instance names which should be used to instantiate TaskCfgCache
-// and a cleanup function which should be deferred.
+// BigTable instance name which should be used to instantiate TaskCfgCache and a
+// cleanup function which should be deferred.
 func SetupBigTable(t testutils.TestingT) (string, string, func()) {
 	// The table and column family names are specs.BT_TABLE and
 	// specs.BT_COLUMN_FAMILY, but are hard-coded here to avoid a dependency
 	// cycle.
-	cfg := bt.TableConfig{
+	return bt_testutil.SetupBigTable(t, bt.TableConfig{
 		"tasks-cfg": {
 			"CFGS",
 		},
-	}
-	instance := fmt.Sprintf("specs-testutils-%s", uuid.New())
-	assert.NoError(t, bt.InitBigtable(BT_PROJECT, instance, cfg))
-	return BT_PROJECT, instance, func() {
-		assert.NoError(t, bt.DeleteTables(BT_PROJECT, instance, cfg))
-	}
+	})
 }
diff --git a/task_scheduler/go/task_scheduler/main.go b/task_scheduler/go/task_scheduler/main.go
index f9db99f..fd012b1 100644
--- a/task_scheduler/go/task_scheduler/main.go
+++ b/task_scheduler/go/task_scheduler/main.go
@@ -84,6 +84,8 @@
 	triggerTemplate   *template.Template = nil
 
 	// Flags.
+	btInstance        = flag.String("bigtable_instance", "", "BigTable instance to use.")
+	btProject         = flag.String("bigtable_project", "", "GCE project to use for BigTable.")
 	host              = flag.String("host", "localhost", "HTTP service host")
 	port              = flag.String("port", ":8000", "HTTP service port for the web server (e.g., ':8000')")
 	dbPort            = flag.String("db_port", ":8008", "HTTP service port for the database RPC server (e.g., ':8008')")
@@ -94,22 +96,21 @@
 	// TODO(borenet): pubsubTopicSet is also used for as the blacklist
 	// instance name. Once all schedulers are using Firestore for their
 	// task DB, firestoreInstance will have the same value. We should
-	// combine into a single instanceName flag.
-	pubsubTopicSet   = flag.String("pubsub_topic_set", "", fmt.Sprintf("Pubsub topic set; one of: %v", pubsub.VALID_TOPIC_SETS))
-	repoUrls         = common.NewMultiStringFlag("repo", nil, "Repositories for which to schedule tasks.")
-	recipesCfgFile   = flag.String("recipes_cfg", "", "Path to the recipes.cfg file.")
-	resourcesDir     = flag.String("resources_dir", "", "The directory to find templates, JS, and CSS files. If blank, assumes you're running inside a checkout and will attempt to find the resources relative to this source file.")
-	scoreDecay24Hr   = flag.Float64("scoreDecay24Hr", 0.9, "Task candidate scores are penalized using linear time decay. This is the desired value after 24 hours. Setting it to 1.0 causes commits not to be prioritized according to commit time.")
-	swarmingPools    = common.NewMultiStringFlag("pool", swarming.POOLS_PUBLIC, "Which Swarming pools to use.")
-	swarmingServer   = flag.String("swarming_server", swarming.SWARMING_SERVER, "Which Swarming server to use.")
-	tasksCfgProject  = flag.String("tasks_cfg_project", "", "GCE project to use for tasks cfg cache.")
-	tasksCfgInstance = flag.String("tasks_cfg_instance", "", "BigTable instance to use for tasks cfg cache.")
-	timePeriod       = flag.String("timeWindow", "4d", "Time period to use.")
-	tryJobBucket     = flag.String("tryjob_bucket", tryjobs.BUCKET_PRIMARY, "Which Buildbucket bucket to use for try jobs.")
-	commitWindow     = flag.Int("commitWindow", 10, "Minimum number of recent commits to keep in the timeWindow.")
-	gsBucket         = flag.String("gsBucket", "skia-task-scheduler", "Name of Google Cloud Storage bucket to use for backups and recovery.")
-	workdir          = flag.String("workdir", "workdir", "Working directory to use.")
-	promPort         = flag.String("prom_port", ":20000", "Metrics service address (e.g., ':10110')")
+	// combine into a single instanceName flag. Additionally, the BigTable
+	// instance flag has the same set of values.
+	pubsubTopicSet = flag.String("pubsub_topic_set", "", fmt.Sprintf("Pubsub topic set; one of: %v", pubsub.VALID_TOPIC_SETS))
+	repoUrls       = common.NewMultiStringFlag("repo", nil, "Repositories for which to schedule tasks.")
+	recipesCfgFile = flag.String("recipes_cfg", "", "Path to the recipes.cfg file.")
+	resourcesDir   = flag.String("resources_dir", "", "The directory to find templates, JS, and CSS files. If blank, assumes you're running inside a checkout and will attempt to find the resources relative to this source file.")
+	scoreDecay24Hr = flag.Float64("scoreDecay24Hr", 0.9, "Task candidate scores are penalized using linear time decay. This is the desired value after 24 hours. Setting it to 1.0 causes commits not to be prioritized according to commit time.")
+	swarmingPools  = common.NewMultiStringFlag("pool", swarming.POOLS_PUBLIC, "Which Swarming pools to use.")
+	swarmingServer = flag.String("swarming_server", swarming.SWARMING_SERVER, "Which Swarming server to use.")
+	timePeriod     = flag.String("timeWindow", "4d", "Time period to use.")
+	tryJobBucket   = flag.String("tryjob_bucket", tryjobs.BUCKET_PRIMARY, "Which Buildbucket bucket to use for try jobs.")
+	commitWindow   = flag.Int("commitWindow", 10, "Minimum number of recent commits to keep in the timeWindow.")
+	gsBucket       = flag.String("gsBucket", "skia-task-scheduler", "Name of Google Cloud Storage bucket to use for backups and recovery.")
+	workdir        = flag.String("workdir", "workdir", "Working directory to use.")
+	promPort       = flag.String("prom_port", ":20000", "Metrics service address (e.g., ':10110')")
 
 	pubsubTopicName      = flag.String("pubsub_topic", swarming.PUBSUB_TOPIC_SWARMING_TASKS, "Pub/Sub topic to use for Swarming tasks.")
 	pubsubSubscriberName = flag.String("pubsub_subscriber", PUBSUB_SUBSCRIBER_TASK_SCHEDULER, "Pub/Sub subscriber name.")
@@ -748,7 +749,7 @@
 	if err := swarming.InitPubSub(serverURL, *pubsubTopicName, *pubsubSubscriberName); err != nil {
 		sklog.Fatal(err)
 	}
-	ts, err = scheduling.NewTaskScheduler(ctx, tsDb, bl, period, *commitWindow, wdAbs, serverURL, repos, isolateClient, swarm, httpClient, *scoreDecay24Hr, tryjobs.API_URL_PROD, *tryJobBucket, common.PROJECT_REPO_MAPPING, *swarmingPools, *pubsubTopicName, depotTools, gerrit, *tasksCfgProject, *tasksCfgInstance, tokenSource)
+	ts, err = scheduling.NewTaskScheduler(ctx, tsDb, bl, period, *commitWindow, wdAbs, serverURL, repos, isolateClient, swarm, httpClient, *scoreDecay24Hr, tryjobs.API_URL_PROD, *tryJobBucket, common.PROJECT_REPO_MAPPING, *swarmingPools, *pubsubTopicName, depotTools, gerrit, *btProject, *btInstance, tokenSource)
 	if err != nil {
 		sklog.Fatal(err)
 	}
diff --git a/task_scheduler/setup-bigtable.sh b/task_scheduler/setup-bigtable.sh
new file mode 100755
index 0000000..1af1ab8
--- /dev/null
+++ b/task_scheduler/setup-bigtable.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+set -e -x
+
+# Set up BigTable tables and column families.
+go get -u cloud.google.com/go/bigtable/cmd/cbt
+
+BIGTABLE_TABLE="tasks-cfg"
+BIGTABLE_COLUMN_FAMILY="CFGS"
+PROJECT="skia-public"
+BIGTABLE_INSTANCE="production"
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createfamily ${BIGTABLE_TABLE} ${BIGTABLE_COLUMN_FAMILY}
+
+BIGTABLE_INSTANCE="staging"
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createfamily ${BIGTABLE_TABLE} ${BIGTABLE_COLUMN_FAMILY}
+
+PROJECT="skia-corp"
+BIGTABLE_INSTANCE="internal"
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createtable ${BIGTABLE_TABLE}
+cbt --project=${PROJECT} --instance=${BIGTABLE_INSTANCE} createfamily ${BIGTABLE_TABLE} ${BIGTABLE_COLUMN_FAMILY}
diff --git a/task_scheduler/sys/task-scheduler-internal.service b/task_scheduler/sys/task-scheduler-internal.service
index d70f271..bbcec68 100644
--- a/task_scheduler/sys/task-scheduler-internal.service
+++ b/task_scheduler/sys/task-scheduler-internal.service
@@ -6,6 +6,8 @@
 
 [Service]
 ExecStart=/usr/local/bin/task_scheduler \
+    --bigtable_project=skia-corp \
+    --bigtable_instance=internal \
     --gsBucket=skia-task-scheduler-internal \
     --host=task-scheduler-internal.skia.org \
     --isolate_server=https://chrome-isolated.appspot.com \
@@ -18,8 +20,6 @@
     --repo=https://skia.googlesource.com/skia_internal.git \
     --resources_dir=/usr/local/share/task-scheduler/ \
     --swarming_server=chrome-swarming.appspot.com \
-    --tasks_cfg_project=skia-corp \
-    --tasks_cfg_instance=tasks-cfg-internal \
     --tryjob_bucket=skia.internal \
     --workdir=/mnt/pd0/task_scheduler_workdir
 Restart=always
diff --git a/task_scheduler/sys/task-scheduler-staging.service b/task_scheduler/sys/task-scheduler-staging.service
index 611b8f5..2a37ae4 100644
--- a/task_scheduler/sys/task-scheduler-staging.service
+++ b/task_scheduler/sys/task-scheduler-staging.service
@@ -6,6 +6,8 @@
 
 [Service]
 ExecStart=/usr/local/bin/task_scheduler \
+    --bigtable_project=skia-public \
+    --bigtable_instance=staging \
     --gsBucket=skia-task-scheduler-staging \
     --host=task-scheduler-staging.skia.org \
     --firestore_instance=staging \
@@ -18,8 +20,6 @@
     --repo=https://skia.googlesource.com/skiabot-test.git \
     --resources_dir=/usr/local/share/task-scheduler/ \
     --swarming_server=chromium-swarm-dev.appspot.com \
-    --tasks_cfg_project=skia-public \
-    --tasks_cfg_instance=tasks-cfg-staging \
     --tryjob_bucket=skia.testing \
     --workdir=/mnt/pd0/task_scheduler_workdir
 Restart=always
diff --git a/task_scheduler/sys/task-scheduler.service b/task_scheduler/sys/task-scheduler.service
index 8702ef5..4c10046 100644
--- a/task_scheduler/sys/task-scheduler.service
+++ b/task_scheduler/sys/task-scheduler.service
@@ -6,11 +6,11 @@
 
 [Service]
 ExecStart=/usr/local/bin/task_scheduler \
+    --bigtable_project=skia-public \
+    --bigtable_instance=production \
     --host=task-scheduler.skia.org \
     --logtostderr \
     --pubsub_topic_set=prod \
-    --tasks_cfg_project=skia-public \
-    --tasks_cfg_instance=tasks-cfg-prod \
     --workdir=/mnt/pd0/task_scheduler_workdir \
     --resources_dir=/usr/local/share/task-scheduler/
 Restart=always