[task scheduler] Make blacklist use Firestore

Bug: skia:8636
Change-Id: I0000c5edada36569ac18378fadbb15e9f23e4eb6
Reviewed-on: https://skia-review.googlesource.com/c/178864
Reviewed-by: Ben Wagner <benjaminwagner@google.com>
Commit-Queue: Eric Boren <borenet@google.com>
diff --git a/task_scheduler/go/blacklist/blacklist.go b/task_scheduler/go/blacklist/blacklist.go
index 5d99905..b120928 100644
--- a/task_scheduler/go/blacklist/blacklist.go
+++ b/task_scheduler/go/blacklist/blacklist.go
@@ -2,33 +2,93 @@
 
 import (
 	"context"
-	"encoding/json"
+	"errors"
 	"fmt"
-	"os"
 	"regexp"
 	"sync"
+	"time"
 
+	fs "cloud.google.com/go/firestore"
+	"go.skia.org/infra/go/firestore"
 	"go.skia.org/infra/go/git/repograph"
 	"go.skia.org/infra/go/sklog"
 	"go.skia.org/infra/go/util"
+	"golang.org/x/oauth2"
 )
 
 const (
+	// Collection name for blacklist entries.
+	COLLECTION_BLACKLISTS = "blacklist_rules"
+
+	// We'll perform this many attempts for a given request.
+	DEFAULT_ATTEMPTS = 3
+
+	// Timeouts for various requests.
+	TIMEOUT_GET = 60 * time.Second
+	TIMEOUT_PUT = 10 * time.Second
+
 	MAX_NAME_CHARS = 50
 )
 
 var (
-	DEFAULT_RULES = []*Rule{}
-
 	ERR_NO_SUCH_RULE = fmt.Errorf("No such rule.")
 )
 
 // Blacklist is a struct which contains rules specifying tasks which should
 // not be scheduled.
 type Blacklist struct {
-	backingFile string
-	Rules       map[string]*Rule `json:"rules"`
-	mtx         sync.RWMutex
+	client *firestore.Client
+	coll   *fs.CollectionRef
+	mtx    sync.RWMutex
+	rules  map[string]*Rule
+}
+
+// New returns a Blacklist instance.
+func New(ctx context.Context, project, instance string, ts oauth2.TokenSource) (*Blacklist, error) {
+	client, err := firestore.NewClient(ctx, project, firestore.APP_TASK_SCHEDULER, instance, ts)
+	if err != nil {
+		return nil, err
+	}
+	b := &Blacklist{
+		client: client,
+		coll:   client.Collection(COLLECTION_BLACKLISTS),
+	}
+	if err := b.Update(); err != nil {
+		util.LogErr(b.Close())
+		return nil, err
+	}
+	return b, nil
+}
+
+// Close closes the database.
+func (b *Blacklist) Close() error {
+	if b != nil {
+		return b.client.Close()
+	}
+	return nil
+}
+
+// Update updates the local view of the Blacklist to match the remote DB.
+func (b *Blacklist) Update() error {
+	if b == nil {
+		return nil
+	}
+	rules := map[string]*Rule{}
+	q := b.coll.Query
+	if err := firestore.IterDocs(q, DEFAULT_ATTEMPTS, TIMEOUT_GET, func(doc *fs.DocumentSnapshot) error {
+		var r Rule
+		if err := doc.DataTo(&r); err != nil {
+			return err
+		}
+		rules[r.Name] = &r
+		return nil
+	}); err != nil {
+		return err
+	}
+	b.mtx.Lock()
+	defer b.mtx.Unlock()
+	b.rules = rules
+	return nil
 }
 
 // Match determines whether the given taskSpec/commit pair matches one of the
@@ -41,9 +101,12 @@
 // Rules in the Blacklist. Returns the name of the matched Rule or the empty
 // string if no Rules match.
 func (b *Blacklist) MatchRule(taskSpec, commit string) string {
+	if b == nil {
+		return ""
+	}
 	b.mtx.RLock()
 	defer b.mtx.RUnlock()
-	for _, rule := range b.Rules {
+	for _, rule := range b.rules {
 		if rule.Match(taskSpec, commit) {
 			return rule.Name
 		}
@@ -51,34 +114,11 @@
 	return ""
 }
 
-// ensureDefaults adds the necessary default blacklist rules if necessary.
-func (b *Blacklist) ensureDefaults() error {
-	for _, rule := range DEFAULT_RULES {
-		if err := b.removeRule(rule.Name); err != nil {
-			if err.Error() != ERR_NO_SUCH_RULE.Error() {
-				return err
-			}
-		}
-		if err := b.addRule(rule); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// writeOut writes the Blacklist to its backing file. Assumes that the caller
-// holds a write lock.
-func (b *Blacklist) writeOut() error {
-	f, err := os.Create(b.backingFile)
-	if err != nil {
-		return err
-	}
-	defer util.Close(f)
-	return json.NewEncoder(f).Encode(b)
-}
-
 // Add adds a new Rule to the Blacklist.
 func (b *Blacklist) AddRule(r *Rule, repos repograph.Map) error {
+	if b == nil {
+		return errors.New("Blacklist is nil; cannot add rules.")
+	}
 	if err := ValidateRule(r, repos); err != nil {
 		return err
 	}
@@ -86,17 +126,14 @@
 }
 
 // addRule adds a new Rule to the Blacklist.
-func (b *Blacklist) addRule(r *Rule) error {
-	b.mtx.Lock()
-	defer b.mtx.Unlock()
-	if _, ok := b.Rules[r.Name]; ok {
-		return fmt.Errorf("Blacklist already contains a rule named %q", r.Name)
-	}
-	b.Rules[r.Name] = r
-	if err := b.writeOut(); err != nil {
-		delete(b.Rules, r.Name)
+func (b *Blacklist) addRule(r *Rule) (rvErr error) {
+	ref := b.coll.Doc(r.Name)
+	if _, err := firestore.Create(ref, r, DEFAULT_ATTEMPTS, TIMEOUT_PUT); err != nil {
 		return err
 	}
+	b.mtx.Lock()
+	defer b.mtx.Unlock()
+	b.rules[r.Name] = r
 	return nil
 }
 
@@ -152,30 +189,33 @@
 	return rule, nil
 }
 
-// removeRule removes the Rule from the Blacklist.
-func (b *Blacklist) removeRule(name string) error {
-	b.mtx.Lock()
-	defer b.mtx.Unlock()
-	r, ok := b.Rules[name]
-	if !ok {
-		return ERR_NO_SUCH_RULE
+// RemoveRule removes the Rule from the Blacklist.
+func (b *Blacklist) RemoveRule(id string) error {
+	if b == nil {
+		return errors.New("Blacklist is nil; cannot remove rules.")
 	}
-	delete(b.Rules, name)
-	if err := b.writeOut(); err != nil {
-		b.Rules[name] = r
+	ref := b.coll.Doc(id)
+	if _, err := firestore.Delete(ref, DEFAULT_ATTEMPTS, TIMEOUT_PUT); err != nil {
 		return err
 	}
+	b.mtx.Lock()
+	defer b.mtx.Unlock()
+	delete(b.rules, id)
 	return nil
 }
 
-// RemoveRule removes the Rule from the Blacklist.
-func (b *Blacklist) RemoveRule(name string) error {
-	for _, r := range DEFAULT_RULES {
-		if r.Name == name {
-			return fmt.Errorf("Cannot remove built-in rule %q", name)
-		}
+// GetRules returns a slice containing all of the Rules in the Blacklist.
+func (b *Blacklist) GetRules() []*Rule {
+	if b == nil {
+		return []*Rule{}
 	}
-	return b.removeRule(name)
+	b.mtx.RLock()
+	defer b.mtx.RUnlock()
+	rv := make([]*Rule, 0, len(b.rules))
+	for _, r := range b.rules {
+		rv = append(rv, r.Copy())
+	}
+	return rv
 }
 
 // Rule is a struct which indicates a specific task or set of tasks which
@@ -199,16 +239,16 @@
 // ValidateRule returns an error if the given Rule is not valid.
 func ValidateRule(r *Rule, repos repograph.Map) error {
 	if r.Name == "" {
-		return fmt.Errorf("Rules must have a name.")
+		return errors.New("Rules must have a name.")
 	}
 	if len(r.Name) > MAX_NAME_CHARS {
 		return fmt.Errorf("Rule names must be shorter than %d characters. Use the Description field for detailed information.", MAX_NAME_CHARS)
 	}
 	if r.AddedBy == "" {
-		return fmt.Errorf("Rules must have an AddedBy user.")
+		return errors.New("Rules must have an AddedBy user.")
 	}
 	if len(r.TaskSpecPatterns) == 0 && len(r.Commits) == 0 {
-		return fmt.Errorf("Rules must include a taskSpec pattern and/or a commit/range.")
+		return errors.New("Rules must include a taskSpec pattern and/or a commit/range.")
 	}
 	for _, c := range r.Commits {
 		if _, _, _, err := repos.FindCommit(c); err != nil {
@@ -259,32 +299,13 @@
 	return r.matchTaskSpec(taskSpec) && r.matchCommit(commit)
 }
 
-// FromFile returns a Blacklist instance based on the given file. If the file
-// does not exist, the Blacklist will be empty and will attempt to use the file
-// for writing.
-func FromFile(file string) (*Blacklist, error) {
-	b := &Blacklist{
-		backingFile: file,
-		mtx:         sync.RWMutex{},
+// Copy returns a deep copy of the Rule.
+func (r *Rule) Copy() *Rule {
+	return &Rule{
+		AddedBy:          r.AddedBy,
+		TaskSpecPatterns: util.CopyStringSlice(r.TaskSpecPatterns),
+		Commits:          util.CopyStringSlice(r.Commits),
+		Description:      r.Description,
+		Name:             r.Name,
 	}
-	f, err := os.Open(file)
-	if err != nil {
-		if os.IsNotExist(err) {
-			b.Rules = map[string]*Rule{}
-			if err := b.writeOut(); err != nil {
-				return nil, err
-			}
-		} else {
-			return nil, err
-		}
-	} else {
-		defer util.Close(f)
-		if err := json.NewDecoder(f).Decode(b); err != nil {
-			return nil, err
-		}
-	}
-	if err := b.ensureDefaults(); err != nil {
-		return nil, err
-	}
-	return b, nil
 }
diff --git a/task_scheduler/go/blacklist/blacklist_test.go b/task_scheduler/go/blacklist/blacklist_test.go
index 27310e2..66aaaf5 100644
--- a/task_scheduler/go/blacklist/blacklist_test.go
+++ b/task_scheduler/go/blacklist/blacklist_test.go
@@ -4,44 +4,71 @@
 	"context"
 	"fmt"
 	"io/ioutil"
-	"path"
 	"testing"
+	"time"
 
+	"github.com/google/uuid"
 	assert "github.com/stretchr/testify/require"
 	"go.skia.org/infra/go/deepequal"
+	"go.skia.org/infra/go/firestore"
 	"go.skia.org/infra/go/git/repograph"
 	git_testutils "go.skia.org/infra/go/git/testutils"
-	"go.skia.org/infra/go/sklog"
 	"go.skia.org/infra/go/testutils"
 )
 
+func setup(t *testing.T) (*Blacklist, func()) {
+	testutils.MediumTest(t)
+	testutils.ManualTest(t)
+	instance := fmt.Sprintf("test-%s", uuid.New())
+	b, err := New(context.Background(), firestore.FIRESTORE_PROJECT, instance, nil)
+	assert.NoError(t, err)
+	cleanup := func() {
+		assert.NoError(t, firestore.RecursiveDelete(b.client, b.client.ParentDoc, 5, 30*time.Second))
+		assert.NoError(t, b.Close())
+	}
+	return b, cleanup
+}
+
 func TestAddRemove(t *testing.T) {
-	testutils.SmallTest(t)
-	// Setup.
-	tmp, err := ioutil.TempDir("", "")
-	assert.NoError(t, err)
-	defer testutils.RemoveAll(t, tmp)
-	assert.NoError(t, err)
-	f := path.Join(tmp, "blacklist.json")
-	b1, err := FromFile(f)
-	assert.NoError(t, err)
+	b1, cleanup1 := setup(t)
+	defer cleanup1()
 
 	// Test.
-	assert.Equal(t, len(DEFAULT_RULES), len(b1.Rules))
 	r1 := &Rule{
 		AddedBy:          "test@google.com",
 		TaskSpecPatterns: []string{".*"},
 		Name:             "My Rule",
 	}
 	assert.NoError(t, b1.addRule(r1))
-	b2, err := FromFile(f)
+	b2, err := New(context.Background(), firestore.FIRESTORE_PROJECT, b1.client.ParentDoc.ID, nil)
 	assert.NoError(t, err)
-	deepequal.AssertDeepEqual(t, b1, b2)
+	assertEqual := func() {
+		assert.NoError(t, testutils.EventuallyConsistent(10*time.Second, func() error {
+			assert.NoError(t, b2.Update())
+			if len(b1.rules) == len(b2.rules) {
+				deepequal.AssertDeepEqual(t, b1.rules, b2.rules)
+				return nil
+			}
+			time.Sleep(100 * time.Millisecond)
+			return testutils.TryAgainErr
+		}))
+	}
+	assertEqual()
 
 	assert.NoError(t, b1.RemoveRule(r1.Name))
-	b2, err = FromFile(f)
-	assert.NoError(t, err)
-	deepequal.AssertDeepEqual(t, b1, b2)
+	assertEqual()
+}
+
+func TestRuleCopy(t *testing.T) {
+	testutils.SmallTest(t)
+	r := &Rule{
+		AddedBy:          "me@google.com",
+		TaskSpecPatterns: []string{"a", "b"},
+		Commits:          []string{"abc123", "def456"},
+		Description:      "this is a rule",
+		Name:             "example",
+	}
+	deepequal.AssertCopy(t, r, r.Copy())
 }
 
 func TestRules(t *testing.T) {
@@ -359,7 +386,6 @@
 		},
 	}
 	for _, test := range tests {
-		sklog.Infof(test.msg)
 		assert.Equal(t, test.expect, ValidateRule(&test.rule, repos), test.msg)
 	}
 }
@@ -377,9 +403,8 @@
 	assert.NoError(t, err)
 	repos[gb.RepoUrl()] = repo
 	assert.NoError(t, repos.Update(ctx))
-	f := path.Join(tmp, "blacklist.json")
-	b, err := FromFile(f)
-	assert.NoError(t, err)
+	b, cleanup := setup(t)
+	defer cleanup()
 
 	// Test.
 
@@ -396,7 +421,7 @@
 		commits[5],
 		commits[1],
 		commits[0],
-	}, b.Rules["commit range"].Commits)
+	}, b.rules[rule.Name].Commits)
 
 	// Test a few commits.
 	tc := []struct {
diff --git a/task_scheduler/go/db/firestore/firestore.go b/task_scheduler/go/db/firestore/firestore.go
index 5607ca8..1f3e9c6 100644
--- a/task_scheduler/go/db/firestore/firestore.go
+++ b/task_scheduler/go/db/firestore/firestore.go
@@ -53,16 +53,12 @@
 
 // firestoreDB is a db.DB which uses Cloud Firestore for storage.
 type firestoreDB struct {
-	client    *firestore.Client
-	parentDoc string
+	client *firestore.Client
 
 	db.ModifiedData
 }
 
-// NewDB returns a db.DB which uses Cloud Firestore for storage. The parentDoc
-// parameter is optional and indicates the path of a parent document to which
-// all collections within the DB will belong. If it is not supplied, then the
-// collections will be at the top level.
+// NewDB returns a db.DB which uses Cloud Firestore for storage.
 func NewDB(ctx context.Context, project, instance string, ts oauth2.TokenSource, mod db.ModifiedData) (db.BackupDBCloser, error) {
 	client, err := firestore.NewClient(ctx, project, firestore.APP_TASK_SCHEDULER, instance, ts)
 	if err != nil {
diff --git a/task_scheduler/go/db/firestore/firestore_test.go b/task_scheduler/go/db/firestore/firestore_test.go
index 4002133..b63e817 100644
--- a/task_scheduler/go/db/firestore/firestore_test.go
+++ b/task_scheduler/go/db/firestore/firestore_test.go
@@ -24,7 +24,7 @@
 	testutils.MediumTest(t)
 	testutils.ManualTest(t)
 	instance := fmt.Sprintf("test-%s", uuid.New())
-	d, err := NewDB(context.Background(), "skia-firestore", instance, nil, nil)
+	d, err := NewDB(context.Background(), firestore.FIRESTORE_PROJECT, instance, nil, nil)
 	assert.NoError(t, err)
 	cleanup := func() {
 		c := d.(*firestoreDB).client
diff --git a/task_scheduler/go/db/local_db/busywork/main.go b/task_scheduler/go/db/local_db/busywork/main.go
index 41b083c..4d107c7 100644
--- a/task_scheduler/go/db/local_db/busywork/main.go
+++ b/task_scheduler/go/db/local_db/busywork/main.go
@@ -520,7 +520,7 @@
 		sklog.Fatal(err)
 	}
 	id := fmt.Sprintf("busywork_%s", hostname)
-	d, err := firestore.NewDB(context.Background(), "skia-firestore", id, ts, nil)
+	d, err := firestore.NewDB(context.Background(), firestore.FIRESTORE_PROJECT, id, ts, nil)
 	if err != nil {
 		sklog.Fatal(err)
 	}
diff --git a/task_scheduler/go/scheduling/perftest/perftest.go b/task_scheduler/go/scheduling/perftest/perftest.go
index 96b710b..a2b9c85 100644
--- a/task_scheduler/go/scheduling/perftest/perftest.go
+++ b/task_scheduler/go/scheduling/perftest/perftest.go
@@ -294,7 +294,7 @@
 	assertNoError(ioutil.WriteFile(gitcookies, []byte(".googlesource.com\tTRUE\t/\tTRUE\t123\to\tgit-user.google.com=abc123"), os.ModePerm))
 	g, err := gerrit.NewGerrit("https://fake-skia-review.googlesource.com", gitcookies, urlMock.Client())
 	assertNoError(err)
-	s, err := scheduling.NewTaskScheduler(ctx, d, time.Duration(math.MaxInt64), 0, workdir, "fake.server", repograph.Map{repoName: repo}, isolateClient, swarmingClient, http.DefaultClient, 0.9, tryjobs.API_URL_TESTING, tryjobs.BUCKET_TESTING, map[string]string{"skia": repoName}, swarming.POOLS_PUBLIC, "", depotTools, g)
+	s, err := scheduling.NewTaskScheduler(ctx, d, nil, time.Duration(math.MaxInt64), 0, workdir, "fake.server", repograph.Map{repoName: repo}, isolateClient, swarmingClient, http.DefaultClient, 0.9, tryjobs.API_URL_TESTING, tryjobs.BUCKET_TESTING, map[string]string{"skia": repoName}, swarming.POOLS_PUBLIC, "", depotTools, g)
 	assertNoError(err)
 
 	runTasks := func(bots []*swarming_api.SwarmingRpcsBotInfo) {
diff --git a/task_scheduler/go/scheduling/task_scheduler.go b/task_scheduler/go/scheduling/task_scheduler.go
index fd9c7da..30001ca 100644
--- a/task_scheduler/go/scheduling/task_scheduler.go
+++ b/task_scheduler/go/scheduling/task_scheduler.go
@@ -125,12 +125,7 @@
 	workdir        string
 }
 
-func NewTaskScheduler(ctx context.Context, d db.DB, period time.Duration, numCommits int, workdir, host string, repos repograph.Map, isolateClient *isolate.Client, swarmingClient swarming.ApiClient, c *http.Client, timeDecayAmt24Hr float64, buildbucketApiUrl, trybotBucket string, projectRepoMapping map[string]string, pools []string, pubsubTopic, depotTools string, gerrit gerrit.GerritInterface) (*TaskScheduler, error) {
-	bl, err := blacklist.FromFile(path.Join(workdir, "blacklist.json"))
-	if err != nil {
-		return nil, fmt.Errorf("Failed to create blacklist from file: %s", err)
-	}
-
+func NewTaskScheduler(ctx context.Context, d db.DB, bl *blacklist.Blacklist, period time.Duration, numCommits int, workdir, host string, repos repograph.Map, isolateClient *isolate.Client, swarmingClient swarming.ApiClient, c *http.Client, timeDecayAmt24Hr float64, buildbucketApiUrl, trybotBucket string, projectRepoMapping map[string]string, pools []string, pubsubTopic, depotTools string, gerrit gerrit.GerritInterface) (*TaskScheduler, error) {
 	// Repos must be updated before window is initialized; otherwise the repos may be uninitialized,
 	// resulting in the window being too short, causing the caches to be loaded with incomplete data.
 	for _, r := range repos {
@@ -1353,6 +1348,10 @@
 		return err
 	}
 
+	if err := s.bl.Update(); err != nil {
+		return err
+	}
+
 	wg2.Wait()
 	if e2 != nil {
 		return e2
diff --git a/task_scheduler/go/scheduling/task_scheduler_test.go b/task_scheduler/go/scheduling/task_scheduler_test.go
index 6ea2f69..c7b7629 100644
--- a/task_scheduler/go/scheduling/task_scheduler_test.go
+++ b/task_scheduler/go/scheduling/task_scheduler_test.go
@@ -13,6 +13,7 @@
 	"testing"
 	"time"
 
+	"github.com/google/uuid"
 	assert "github.com/stretchr/testify/require"
 	buildbucket_api "go.chromium.org/luci/common/api/buildbucket/buildbucket/v1"
 	swarming_api "go.chromium.org/luci/common/api/swarming/swarming/v1"
@@ -32,6 +33,7 @@
 	"go.skia.org/infra/task_scheduler/go/blacklist"
 	"go.skia.org/infra/task_scheduler/go/db"
 	"go.skia.org/infra/task_scheduler/go/db/cache"
+	"go.skia.org/infra/task_scheduler/go/db/firestore"
 	"go.skia.org/infra/task_scheduler/go/db/memory"
 	"go.skia.org/infra/task_scheduler/go/specs"
 	specs_testutils "go.skia.org/infra/task_scheduler/go/specs/testutils"
@@ -210,7 +212,7 @@
 	assert.NoError(t, ioutil.WriteFile(gitcookies, []byte(".googlesource.com\tTRUE\t/\tTRUE\t123\to\tgit-user.google.com=abc123"), os.ModePerm))
 	g, err := gerrit.NewGerrit(fakeGerritUrl, gitcookies, urlMock.Client())
 	assert.NoError(t, err)
-	s, err := NewTaskScheduler(ctx, d, time.Duration(math.MaxInt64), 0, tmp, "fake.server", repos, isolateClient, swarmingClient, urlMock.Client(), 1.0, tryjobs.API_URL_TESTING, tryjobs.BUCKET_TESTING, projectRepoMapping, swarming.POOLS_PUBLIC, "", depotTools, g)
+	s, err := NewTaskScheduler(ctx, d, nil, time.Duration(math.MaxInt64), 0, tmp, "fake.server", repos, isolateClient, swarmingClient, urlMock.Client(), 1.0, tryjobs.API_URL_TESTING, tryjobs.BUCKET_TESTING, projectRepoMapping, swarming.POOLS_PUBLIC, "", depotTools, g)
 	assert.NoError(t, err)
 	return ctx, gb, d, swarmingClient, s, urlMock, func() {
 		testutils.RemoveAll(t, tmp)
@@ -2025,7 +2027,7 @@
 	g, err := gerrit.NewGerrit(fakeGerritUrl, gitcookies, urlMock.Client())
 	assert.NoError(t, err)
 
-	s, err := NewTaskScheduler(ctx, d, time.Duration(math.MaxInt64), 0, workdir, "fake.server", repos, isolateClient, swarmingClient, mockhttpclient.NewURLMock().Client(), 1.0, tryjobs.API_URL_TESTING, tryjobs.BUCKET_TESTING, projectRepoMapping, swarming.POOLS_PUBLIC, "", depotTools, g)
+	s, err := NewTaskScheduler(ctx, d, nil, time.Duration(math.MaxInt64), 0, workdir, "fake.server", repos, isolateClient, swarmingClient, mockhttpclient.NewURLMock().Client(), 1.0, tryjobs.API_URL_TESTING, tryjobs.BUCKET_TESTING, projectRepoMapping, swarming.POOLS_PUBLIC, "", depotTools, g)
 	assert.NoError(t, err)
 
 	mockTasks := []*swarming_api.SwarmingRpcsTaskRequestMetadata{}
@@ -2291,6 +2293,11 @@
 	// actually integrated into the scheduler.
 	ctx, gb, _, swarmingClient, s, _, cleanup := setup(t)
 	defer cleanup()
+	testutils.ManualTest(t)
+	instance := fmt.Sprintf("task-scheduler-test-%s", uuid.New())
+	bl, err := blacklist.New(context.Background(), firestore.FIRESTORE_PROJECT, instance, nil)
+	assert.NoError(t, err)
+	s.bl = bl
 
 	c1 := getRS1(t, ctx, gb).Revision
 
diff --git a/task_scheduler/go/task_scheduler/main.go b/task_scheduler/go/task_scheduler/main.go
index 1c195ec..5aeb9a4 100644
--- a/task_scheduler/go/task_scheduler/main.go
+++ b/task_scheduler/go/task_scheduler/main.go
@@ -68,6 +68,9 @@
 	// Task Scheduler database.
 	tsDb db.BackupDBCloser
 
+	// Task Scheduler blacklist.
+	bl *blacklist.Blacklist
+
 	// Git repo objects.
 	repos repograph.Map
 
@@ -87,19 +90,23 @@
 	firestoreInstance = flag.String("firestore_instance", "", "Firestore instance to use, eg. \"prod\"")
 	isolateServer     = flag.String("isolate_server", isolate.ISOLATE_SERVER_URL, "Which Isolate server to use.")
 	local             = flag.Bool("local", false, "Whether we're running on a dev machine vs in production.")
-	pubsubTopicSet    = flag.String("pubsub_topic_set", "", fmt.Sprintf("Pubsub topic set; one of: %v", pubsub.VALID_TOPIC_SETS))
-	repoUrls          = common.NewMultiStringFlag("repo", nil, "Repositories for which to schedule tasks.")
-	recipesCfgFile    = flag.String("recipes_cfg", "", "Path to the recipes.cfg file.")
-	resourcesDir      = flag.String("resources_dir", "", "The directory to find templates, JS, and CSS files. If blank, assumes you're running inside a checkout and will attempt to find the resources relative to this source file.")
-	scoreDecay24Hr    = flag.Float64("scoreDecay24Hr", 0.9, "Task candidate scores are penalized using linear time decay. This is the desired value after 24 hours. Setting it to 1.0 causes commits not to be prioritized according to commit time.")
-	swarmingPools     = common.NewMultiStringFlag("pool", swarming.POOLS_PUBLIC, "Which Swarming pools to use.")
-	swarmingServer    = flag.String("swarming_server", swarming.SWARMING_SERVER, "Which Swarming server to use.")
-	timePeriod        = flag.String("timeWindow", "4d", "Time period to use.")
-	tryJobBucket      = flag.String("tryjob_bucket", tryjobs.BUCKET_PRIMARY, "Which Buildbucket bucket to use for try jobs.")
-	commitWindow      = flag.Int("commitWindow", 10, "Minimum number of recent commits to keep in the timeWindow.")
-	gsBucket          = flag.String("gsBucket", "skia-task-scheduler", "Name of Google Cloud Storage bucket to use for backups and recovery.")
-	workdir           = flag.String("workdir", "workdir", "Working directory to use.")
-	promPort          = flag.String("prom_port", ":20000", "Metrics service address (e.g., ':10110')")
+	// TODO(borenet): pubsubTopicSet is also used for as the blacklist
+	// instance name. Once all schedulers are using Firestore for their
+	// task DB, firestoreInstance will have the same value. We should
+	// combine into a single instanceName flag.
+	pubsubTopicSet = flag.String("pubsub_topic_set", "", fmt.Sprintf("Pubsub topic set; one of: %v", pubsub.VALID_TOPIC_SETS))
+	repoUrls       = common.NewMultiStringFlag("repo", nil, "Repositories for which to schedule tasks.")
+	recipesCfgFile = flag.String("recipes_cfg", "", "Path to the recipes.cfg file.")
+	resourcesDir   = flag.String("resources_dir", "", "The directory to find templates, JS, and CSS files. If blank, assumes you're running inside a checkout and will attempt to find the resources relative to this source file.")
+	scoreDecay24Hr = flag.Float64("scoreDecay24Hr", 0.9, "Task candidate scores are penalized using linear time decay. This is the desired value after 24 hours. Setting it to 1.0 causes commits not to be prioritized according to commit time.")
+	swarmingPools  = common.NewMultiStringFlag("pool", swarming.POOLS_PUBLIC, "Which Swarming pools to use.")
+	swarmingServer = flag.String("swarming_server", swarming.SWARMING_SERVER, "Which Swarming server to use.")
+	timePeriod     = flag.String("timeWindow", "4d", "Time period to use.")
+	tryJobBucket   = flag.String("tryjob_bucket", tryjobs.BUCKET_PRIMARY, "Which Buildbucket bucket to use for try jobs.")
+	commitWindow   = flag.Int("commitWindow", 10, "Minimum number of recent commits to keep in the timeWindow.")
+	gsBucket       = flag.String("gsBucket", "skia-task-scheduler", "Name of Google Cloud Storage bucket to use for backups and recovery.")
+	workdir        = flag.String("workdir", "workdir", "Working directory to use.")
+	promPort       = flag.String("prom_port", ":20000", "Metrics service address (e.g., ':10110')")
 
 	pubsubTopicName      = flag.String("pubsub_topic", swarming.PUBSUB_TOPIC_SWARMING_TASKS, "Pub/Sub topic to use for Swarming tasks.")
 	pubsubSubscriberName = flag.String("pubsub_subscriber", PUBSUB_SUBSCRIBER_TASK_SCHEDULER, "Pub/Sub subscriber name.")
@@ -165,11 +172,7 @@
 		reloadTemplates()
 	}
 	_, t, c := ts.RecentSpecsAndCommits()
-	rulesMap := ts.GetBlacklist().Rules
-	rules := make([]*blacklist.Rule, 0, len(rulesMap))
-	for _, r := range rulesMap {
-		rules = append(rules, r)
-	}
+	rules := ts.GetBlacklist().GetRules()
 	enc, err := json.Marshal(&struct {
 		Commits   []string
 		Rules     []*blacklist.Rule
@@ -218,14 +221,14 @@
 	w.Header().Set("Content-Type", "application/json")
 	if r.Method == http.MethodDelete {
 		var msg struct {
-			Name string `json:"name"`
+			Id string `json:"id"`
 		}
 		if err := json.NewDecoder(r.Body).Decode(&msg); err != nil {
 			httputils.ReportError(w, r, err, fmt.Sprintf("Failed to decode request body: %s", err))
 			return
 		}
 		defer util.Close(r.Body)
-		if err := ts.GetBlacklist().RemoveRule(msg.Name); err != nil {
+		if err := ts.GetBlacklist().RemoveRule(msg.Id); err != nil {
 			httputils.ReportError(w, r, err, fmt.Sprintf("Failed to delete blacklist rule: %s", err))
 			return
 		}
@@ -675,6 +678,12 @@
 		util.Close(tsDb)
 	})
 
+	// Blacklist DB.
+	bl, err = blacklist.New(ctx, firestore.FIRESTORE_PROJECT, *pubsubTopicSet, tokenSource)
+	if err != nil {
+		sklog.Fatal(err)
+	}
+
 	// Git repos.
 	if *repoUrls == nil {
 		*repoUrls = common.PUBLIC_REPOS
@@ -736,7 +745,7 @@
 	if err := swarming.InitPubSub(serverURL, *pubsubTopicName, *pubsubSubscriberName); err != nil {
 		sklog.Fatal(err)
 	}
-	ts, err = scheduling.NewTaskScheduler(ctx, tsDb, period, *commitWindow, wdAbs, serverURL, repos, isolateClient, swarm, httpClient, *scoreDecay24Hr, tryjobs.API_URL_PROD, *tryJobBucket, common.PROJECT_REPO_MAPPING, *swarmingPools, *pubsubTopicName, depotTools, gerrit)
+	ts, err = scheduling.NewTaskScheduler(ctx, tsDb, bl, period, *commitWindow, wdAbs, serverURL, repos, isolateClient, swarm, httpClient, *scoreDecay24Hr, tryjobs.API_URL_PROD, *tryJobBucket, common.PROJECT_REPO_MAPPING, *swarmingPools, *pubsubTopicName, depotTools, gerrit)
 	if err != nil {
 		sklog.Fatal(err)
 	}
diff --git a/task_scheduler/res/imp/task-scheduler-blacklist-sk-demo.html b/task_scheduler/res/imp/task-scheduler-blacklist-sk-demo.html
index c0810cf..d8e4a9b 100644
--- a/task_scheduler/res/imp/task-scheduler-blacklist-sk-demo.html
+++ b/task_scheduler/res/imp/task-scheduler-blacklist-sk-demo.html
@@ -18,6 +18,7 @@
         ],
         "description": "Infra-PerCommit is broken at this revision.",
         "name": "Infra-PerCommit@355d0d3",
+        "id": "1",
       },
       {
         "added_by": "TaskScheduler",
@@ -29,6 +30,7 @@
         "commits": [],
         "description": "Bots which the Task Scheduler should not schedule because they run on a timer.",
         "name": "Timed Bots",
+        "id": "2",
       },
       {
         "added_by": "TaskScheduler",
@@ -38,6 +40,7 @@
         "commits": [],
         "description": "Trybots are scheduled through Gerrit or the Commit Queue.",
         "name": "Trybots",
+        "id": "3",
       },
     ];
 
@@ -68,7 +71,7 @@
     var gen_response = function() {
       var data = {"rules":{}};
       for (var i = 0; i < blacklist.length; ++i) {
-        data["rules"][blacklist[i]["name"]] = blacklist[i];
+        data["rules"][blacklist[i]["id"]] = blacklist[i];
       }
       return JSON.stringify(data);
     };
@@ -98,13 +101,13 @@
     sk.delete = function(url, body) {
       return new Promise(function (resolve, reject) {
         var data = JSON.parse(body);
-        if (!data["name"]) {
-          reject("No rule name specified.");
+        if (!data["id"]) {
+          reject("No rule ID specified.");
           return;
         }
         var idx = -1;
         for (var i = 0; i < blacklist.length; i++) {
-          if (blacklist[i]["name"] == data["name"]) {
+          if (blacklist[i]["id"] == data["id"]) {
             idx = i;
             break;
           }
diff --git a/task_scheduler/res/imp/task-scheduler-blacklist-sk.html b/task_scheduler/res/imp/task-scheduler-blacklist-sk.html
index ea15833..83a8ad6 100644
--- a/task_scheduler/res/imp/task-scheduler-blacklist-sk.html
+++ b/task_scheduler/res/imp/task-scheduler-blacklist-sk.html
@@ -323,7 +323,7 @@
 
       _remove_rule(e) {
         var data = {
-          "name": e.model.item.name,
+          "id": e.model.item.id,
         };
         var str = JSON.stringify(data);
         this._loading = true;