Add genpromcrd.

A command-line application for producing CRDs for Managed Prometheus.

$ go run ./promk/go/genpromcrd  --help
usage: genpromcrd --directory=[k8s-config checkout dir] [options]
options:
  -directory string
        The directory that contains a checkout of k8s-config.
  -dryrun
        If true then just print the names of the files that would be written.
  -logtostdout
        If true then write logging on stdout.

The genpromcrd cmd runs over all Deployments and StatefulSets and
writes out Managed Prometheus CRDs for both scraping and alerting.
For example, given the following file in the git repo that contains
all the cluster config:

        k8s-config/
        ├── monitoring
        │   └── appgroups
        │       └── perf.yml
        └── skia-infra-public
            └── perf.yml

All the Rules files for alerts to run for all Deployments and
StatefulSets are held under /monitoring/appgroups and the name
of the file before the '.yml' corresponds to an appgroup label.

Since perf.yaml resides inside a directory associated with a
cluster, the Deployment there runs in the namespace 'somenamespace',
and has .template.label.appgroup=perf, a new file will be written to:

   skia-infra-public/perf_somenamespace_appgroup_alerts.yml

which is a modified version of /monitoring/appgroups/perf.yaml, updated
to scrape the deployment in the correct namespace, and it will also
contain 'absent()' alerts for all the alerts defined in 'perf.yml'.

The list of directories processed are defined in:

    //kube/clusters/config.json



Bug: skia:13542
Change-Id: If4c1f6b281a236a3613ca4b65c6279d0be90e3d8
Reviewed-on: https://skia-review.googlesource.com/c/buildbot/+/561420
Reviewed-by: Ravi Mistry <rmistry@google.com>
Commit-Queue: Joe Gregorio <jcgregorio@google.com>
diff --git a/go/kube/clusterconfig/BUILD.bazel b/go/kube/clusterconfig/BUILD.bazel
index 3e985b9..6919cf3 100644
--- a/go/kube/clusterconfig/BUILD.bazel
+++ b/go/kube/clusterconfig/BUILD.bazel
@@ -1,3 +1,4 @@
+load("//bazel/go:go_test.bzl", "go_test")
 load("@io_bazel_rules_go//go:def.bzl", "go_library")
 
 go_library(
@@ -8,5 +9,16 @@
     deps = [
         "//go/git",
         "//go/skerr",
+        "//kube/clusters",
+    ],
+)
+
+go_test(
+    name = "clusterconfig_test",
+    srcs = ["clusterconfig_test.go"],
+    embed = [":clusterconfig"],
+    deps = [
+        "//go/testutils/unittest",
+        "@com_github_stretchr_testify//require",
     ],
 )
diff --git a/go/kube/clusterconfig/clusterconfig.go b/go/kube/clusterconfig/clusterconfig.go
index 030b942..c655b33 100644
--- a/go/kube/clusterconfig/clusterconfig.go
+++ b/go/kube/clusterconfig/clusterconfig.go
@@ -13,6 +13,7 @@
 
 	"go.skia.org/infra/go/git"
 	"go.skia.org/infra/go/skerr"
+	"go.skia.org/infra/kube/clusters"
 )
 
 // Cluster is detailed info on a particular cluster in a ClusterConfig.
@@ -73,6 +74,16 @@
 	return ret, nil
 }
 
+// NewFromEmbeddedConfig returns a new ClusterConfig from the embedded
+// config.json file in //kube/clusters.
+func NewFromEmbeddedConfig() (*ClusterConfig, error) {
+	var ret ClusterConfig
+	if err := json.Unmarshal([]byte(clusters.ClusterConfig), &ret); err != nil {
+		return nil, skerr.Wrapf(err, "Failed to decode embedded cluster config.")
+	}
+	return &ret, nil
+}
+
 // NewWithCheckout returns a ClusterConfig for accessing the config.json file
 // that contains information on each cluster we use, and also checks out the
 // YAML files for all the clusters.
diff --git a/go/kube/clusterconfig/clusterconfig_test.go b/go/kube/clusterconfig/clusterconfig_test.go
new file mode 100644
index 0000000..e4e700f
--- /dev/null
+++ b/go/kube/clusterconfig/clusterconfig_test.go
@@ -0,0 +1,16 @@
+package clusterconfig
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"go.skia.org/infra/go/testutils/unittest"
+)
+
+func TestNewFromEmbeddedConfig_HappyPath(t *testing.T) {
+	unittest.SmallTest(t)
+
+	cfg, err := NewFromEmbeddedConfig()
+	require.NoError(t, err)
+	require.Equal(t, "https://skia.googlesource.com/k8s-config", cfg.Repo)
+}
diff --git a/go/prom/BUILD.bazel b/go/prom/BUILD.bazel
new file mode 100644
index 0000000..4ac8674
--- /dev/null
+++ b/go/prom/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("//bazel/go:go_test.bzl", "go_test")
+
+go_library(
+    name = "prom",
+    srcs = ["prom.go"],
+    importpath = "go.skia.org/infra/go/prom",
+    visibility = ["//visibility:public"],
+)
+
+go_test(
+    name = "prom_test",
+    srcs = ["prom_test.go"],
+    embed = [":prom"],
+    deps = [
+        "//go/testutils/unittest",
+        "@com_github_stretchr_testify//assert",
+    ],
+)
diff --git a/go/prom/crd/BUILD.bazel b/go/prom/crd/BUILD.bazel
new file mode 100644
index 0000000..6b5e9e2
--- /dev/null
+++ b/go/prom/crd/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("//bazel/go:go_test.bzl", "go_test")
+
+go_library(
+    name = "crd",
+    srcs = ["crd.go"],
+    importpath = "go.skia.org/infra/go/prom/crd",
+    visibility = ["//visibility:public"],
+    deps = ["//go/prom"],
+)
+
+go_test(
+    name = "crd_test",
+    srcs = ["crd_test.go"],
+    embed = [":crd"],
+    deps = [
+        "//go/testutils/unittest",
+        "@com_github_stretchr_testify//require",
+        "@in_gopkg_yaml_v2//:yaml_v2",
+    ],
+)
diff --git a/go/prom/crd/crd.go b/go/prom/crd/crd.go
new file mode 100644
index 0000000..42d05d2
--- /dev/null
+++ b/go/prom/crd/crd.go
@@ -0,0 +1,77 @@
+// Package crd handles Managed Prometheus Custom Resource Definitions.
+package crd
+
+import (
+	"fmt"
+
+	"go.skia.org/infra/go/prom"
+)
+
+// Rules Custom Resource representation.
+//
+// In theory we should be able to import this from the Managed Prometheus repo, but
+// they don't currently provide a separate repo with just the json annotated structs.
+type Rules struct {
+	Version  string   `yaml:"apiVersion"`
+	Kind     string   `yaml:"kind"`
+	MetaData MetaData `yaml:"metadata"`
+	Spec     Spec     `yaml:"spec"`
+}
+
+// MetaData for the Rules CRD.
+type MetaData struct {
+	Name      string `yaml:"name"`
+	Namespace string `yaml:"namespace,omitempty"`
+}
+
+// Spec for parsing the yaml format of Prometheus alerts.
+type Spec struct {
+	Groups []Group `yaml:"groups"`
+}
+
+// Group of Rules.
+type Group struct {
+	Name     string `yaml:"name"`
+	Interval string `yaml:"interval"`
+	Rules    []Rule `yaml:"rules"`
+}
+
+// Rule is a single Prometheus Alert.
+type Rule struct {
+	Alert       string            `yaml:"alert"`
+	Expr        string            `yaml:"expr"`
+	Labels      map[string]string `yaml:"labels"`
+	Annotations map[string]string `yaml:"annotations"`
+}
+
+// AddAbsentRules adds an `absent()` alert for each Rule, where possible.
+func (r *Rules) AddAbsentRules() {
+	absentGroups := []Group{}
+	for _, g := range r.Spec.Groups {
+		rules := []Rule{}
+		for _, rule := range g.Rules {
+			equation, ignore := prom.EquationFromExpr(rule.Expr)
+			if ignore {
+				continue
+			}
+			rules = append(rules, Rule{
+				Alert: "Absent",
+				Expr:  fmt.Sprintf("absent(%s)", equation),
+				Labels: map[string]string{
+					"category": "infra",
+					"severify": "critical",
+				},
+				Annotations: map[string]string{
+					"abbr":        rule.Alert,
+					"description": fmt.Sprintf("There is no data for the Alert: %q", rule.Alert),
+				},
+			})
+		}
+		absentGroups = append(absentGroups, Group{
+			Name:     fmt.Sprintf("absent-%s", g.Name),
+			Interval: g.Interval,
+			Rules:    rules,
+		})
+	}
+	r.Spec.Groups = append(r.Spec.Groups, absentGroups...)
+}
diff --git a/go/prom/crd/crd_test.go b/go/prom/crd/crd_test.go
new file mode 100644
index 0000000..a393fe7
--- /dev/null
+++ b/go/prom/crd/crd_test.go
@@ -0,0 +1,116 @@
+package crd
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"go.skia.org/infra/go/testutils/unittest"
+	yaml "gopkg.in/yaml.v2"
+)
+
+const original = `apiVersion: monitoring.googleapis.com/v1
+kind: Rules
+metadata:
+  name: perf
+  namespace: default
+spec:
+  groups:
+  - name: example
+    interval: 30s
+    rules:
+    - alert: AndroidIngestFailures
+      expr: rate(process_failures[1h]) > 0.01
+      labels:
+        category: infra
+      annotations:
+        description: Error rate for processing buildids is too high. See ...
+    - alert: AndroidIngestLiveness
+      expr: liveness_last_successful_add_s > 300
+      labels:
+        category: infra
+      annotations:
+        description: Liveness for processing buildids is too high. See https://github.com/google/skia-buildbot/blob/main/android_ingest/PROD.md#liveness
+`
+
+func TestStructs_RoundTripYAMLDocThroughStructs_YAMLDocIsUnchanged(t *testing.T) {
+	unittest.SmallTest(t)
+
+	var deserialized Rules
+	err := yaml.Unmarshal([]byte(original), &deserialized)
+	require.NoError(t, err)
+
+	reserialized, err := yaml.Marshal(deserialized)
+	require.NoError(t, err)
+
+	require.Equal(t, original, string(reserialized))
+}
+
+func TestRules_AddAbsentRules_AlertWithDoubleComparisonIsSkipped(t *testing.T) {
+	unittest.SmallTest(t)
+
+	rules := Rules{
+		Spec: Spec{
+			Groups: []Group{
+				{
+					Name:     "example",
+					Interval: "15s",
+					Rules: []Rule{
+						{
+							Alert: "ThisWillNotGetAnAbsentAlert",
+							Expr:  "rate(process_failures[1h]) > 0.01 && rate(process_failures[1h]) < 10.0",
+						},
+						{
+							Alert: "AndroidIngestLiveness",
+							Expr:  "liveness_last_successful_add_s > 300",
+						},
+					},
+				},
+			},
+		},
+	}
+
+	rules.AddAbsentRules()
+
+	expected := Rules{
+		Spec: Spec{
+			Groups: []Group{
+				{
+					Name:     "example",
+					Interval: "15s",
+					Rules: []Rule{
+						{
+							Alert: "ThisWillNotGetAnAbsentAlert",
+							Expr:  "rate(process_failures[1h]) > 0.01 && rate(process_failures[1h]) < 10.0",
+						},
+						{
+							Alert: "AndroidIngestLiveness",
+							Expr:  "liveness_last_successful_add_s > 300",
+						},
+					},
+				},
+				// A new group should be added.
+				{
+					Name:     "absent-example",
+					Interval: "15s",
+					Rules: []Rule{
+						// But the new group only contains one Alert, the one for AndroidIngestLiveness.
+						{
+							Alert: "Absent",
+							Expr:  "absent(liveness_last_successful_add_s)",
+							Labels: map[string]string{
+								"category": "infra",
+								"severify": "critical",
+							},
+							Annotations: map[string]string{
+								"abbr":        "AndroidIngestLiveness",
+								"description": "There is no data for the Alert: \"AndroidIngestLiveness\"",
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	require.Equal(t, expected, rules)
+}
diff --git a/go/prom/prom.go b/go/prom/prom.go
new file mode 100644
index 0000000..bb224c7
--- /dev/null
+++ b/go/prom/prom.go
@@ -0,0 +1,50 @@
+// Package prom has functions for Prometheus.
+package prom
+
+import (
+	"regexp"
+	"strings"
+)
+
+var (
+	// atComparison is used to chop up expressions at a comparison. Note that we
+	// require a trailing space, which avoids matching the equals sign inside
+	// a label, such as {app="foo"}.
+	atComparison = regexp.MustCompile(`[<>=!]+\s`)
+)
+
+// EquationFromExpr returns the equation from an expression. For example:
+//
+//      "liveness_ci_pubsub_receive_s > 60 * 60 * 24 * 2"
+//
+// Will return:
+//
+//       "liveness_ci_pubsub_receive_s"
+//
+// Note that for this to work the equation needs to be on the right hand side fo
+// the expression, and there must be spaces on either side of any comparison
+// operator.
+//
+// If an equation can't be extracted from the expression then false is returned.
+func EquationFromExpr(expr string) (string, bool) {
+	if expr == "" {
+		return "", false
+	}
+	// Ignore computed metrics, which by convention have a ":".
+	if strings.Contains(expr, ":") {
+		return "", true
+	}
+
+	parts := atComparison.Split(expr, -1)
+	// Ignore multipart relations, e.g. "a < b and b > c".
+	if len(parts) != 2 {
+		return "", true
+	}
+
+	ret := strings.TrimSpace(parts[0])
+	if ret == "" {
+		return "", false
+	}
+
+	return ret, false
+}
diff --git a/go/prom/prom_test.go b/go/prom/prom_test.go
new file mode 100644
index 0000000..6056764
--- /dev/null
+++ b/go/prom/prom_test.go
@@ -0,0 +1,72 @@
+package prom
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"go.skia.org/infra/go/testutils/unittest"
+)
+
+func TestEquationFromExpr_Equality(t *testing.T) {
+	unittest.SmallTest(t)
+
+	got, ignore := EquationFromExpr("up == 0")
+	assert.False(t, ignore)
+	assert.Equal(t, "up", got)
+}
+
+func TestEquationFromExpr_NoOperations(t *testing.T) {
+	unittest.SmallTest(t)
+
+	got, ignore := EquationFromExpr("vector(1)")
+	assert.True(t, ignore)
+	assert.Equal(t, "", got)
+}
+
+func TestEquationFromExpr_LessThan(t *testing.T) {
+	unittest.SmallTest(t)
+
+	got, ignore := EquationFromExpr("liveness_ci_pubsub_receive_s > 60 * 60 * 24 * 2")
+	assert.False(t, ignore)
+	assert.Equal(t, "liveness_ci_pubsub_receive_s", got)
+}
+
+func TestEquationFromExpr_LessThanOrEqual(t *testing.T) {
+	unittest.SmallTest(t)
+
+	got, ignore := EquationFromExpr("cq_watcher_in_flight_waiting_in_cq{app=\"cq-watcher\"} >= 10")
+	assert.False(t, ignore)
+	assert.Equal(t, "cq_watcher_in_flight_waiting_in_cq{app=\"cq-watcher\"}", got)
+}
+
+func TestEquationFromExpr_NotEqual(t *testing.T) {
+	unittest.SmallTest(t)
+
+	got, ignore := EquationFromExpr("healthy{app=\"ct-perf\"} != 1")
+	assert.False(t, ignore)
+	assert.Equal(t, "healthy{app=\"ct-perf\"}", got)
+}
+
+func TestEquationFromExpr_Empty(t *testing.T) {
+	unittest.SmallTest(t)
+
+	got, ignore := EquationFromExpr("")
+	assert.False(t, ignore)
+	assert.Equal(t, "", got)
+}
+
+func TestEquationFromExpr_IgnoreComputedEquations(t *testing.T) {
+	unittest.SmallTest(t)
+
+	got, ignore := EquationFromExpr("computed:value")
+	assert.True(t, ignore)
+	assert.Equal(t, "", got)
+}
+
+func TestEquationFromExpr_IgnoreMultipleComparisons(t *testing.T) {
+	unittest.SmallTest(t)
+
+	got, ignore := EquationFromExpr("a < b and b > c")
+	assert.True(t, ignore)
+	assert.Equal(t, "", got)
+}
diff --git a/kube/clusters/BUILD.bazel b/kube/clusters/BUILD.bazel
index 77ecd3b..a777402 100644
--- a/kube/clusters/BUILD.bazel
+++ b/kube/clusters/BUILD.bazel
@@ -1 +1,11 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
 exports_files(["config.json"])
+
+go_library(
+    name = "clusters",
+    srcs = ["embed.go"],
+    embedsrcs = ["config.json"],
+    importpath = "go.skia.org/infra/kube/clusters",
+    visibility = ["//visibility:public"],
+)
diff --git a/kube/clusters/embed.go b/kube/clusters/embed.go
new file mode 100644
index 0000000..dd34e13
--- /dev/null
+++ b/kube/clusters/embed.go
@@ -0,0 +1,10 @@
+// Package clusters contains the current cluster config json file as an embedded
+// string.
+package clusters
+
+import (
+	_ "embed"
+)
+
+//go:embed config.json
+var ClusterConfig string
diff --git a/promk/go/genpromcrd/BUILD.bazel b/promk/go/genpromcrd/BUILD.bazel
new file mode 100644
index 0000000..b305830
--- /dev/null
+++ b/promk/go/genpromcrd/BUILD.bazel
@@ -0,0 +1,18 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+
+go_library(
+    name = "genpromcrd_lib",
+    srcs = ["main.go"],
+    importpath = "go.skia.org/infra/promk/go/genpromcrd",
+    visibility = ["//visibility:private"],
+    deps = [
+        "//go/sklog",
+        "//promk/go/genpromcrd/genpromcrd",
+    ],
+)
+
+go_binary(
+    name = "genpromcrd",
+    embed = [":genpromcrd_lib"],
+    visibility = ["//visibility:public"],
+)
diff --git a/promk/go/genpromcrd/genpromcrd/BUILD.bazel b/promk/go/genpromcrd/genpromcrd/BUILD.bazel
new file mode 100644
index 0000000..1a490a6
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/BUILD.bazel
@@ -0,0 +1,34 @@
+load("//bazel/go:go_test.bzl", "go_test")
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "genpromcrd",
+    srcs = ["genpromcrd.go"],
+    importpath = "go.skia.org/infra/promk/go/genpromcrd/genpromcrd",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//go/kube/clusterconfig",
+        "//go/prom/crd",
+        "//go/skerr",
+        "//go/sklog",
+        "//go/sklog/nooplogging",
+        "//go/sklog/sklogimpl",
+        "//go/sklog/stdlogging",
+        "//go/util",
+        "//k8s-checker/go/k8s_config",
+        "@in_gopkg_yaml_v2//:yaml_v2",
+    ],
+)
+
+go_test(
+    name = "genpromcrd_test",
+    srcs = ["genpromcrd_test.go"],
+    data = glob(["testdata/**"]),
+    embed = [":genpromcrd"],
+    deps = [
+        "//go/testutils",
+        "//go/testutils/unittest",
+        "@com_github_otiai10_copy//:copy",
+        "@com_github_stretchr_testify//require",
+    ],
+)
diff --git a/promk/go/genpromcrd/genpromcrd/genpromcrd.go b/promk/go/genpromcrd/genpromcrd/genpromcrd.go
new file mode 100644
index 0000000..85e5ca2
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/genpromcrd.go
@@ -0,0 +1,337 @@
+// Package genpromcrd implements all the functionality for the genpromcrd
+// command line application.
+package genpromcrd
+
+import (
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	"io/fs"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"text/template"
+
+	"go.skia.org/infra/go/kube/clusterconfig"
+	"go.skia.org/infra/go/prom/crd"
+	"go.skia.org/infra/go/skerr"
+	"go.skia.org/infra/go/sklog"
+	"go.skia.org/infra/go/sklog/nooplogging"
+	"go.skia.org/infra/go/sklog/sklogimpl"
+	"go.skia.org/infra/go/sklog/stdlogging"
+	"go.skia.org/infra/go/util"
+	"go.skia.org/infra/k8s-checker/go/k8s_config"
+	yaml "gopkg.in/yaml.v2"
+)
+
+// podMonitoring is a template for how an appgroup should be scraped by Managed
+// Promenteus.
+const podMonitoring = `apiVersion: monitoring.googleapis.com/v1
+kind: PodMonitoring
+metadata:
+ name: {{ .AppGroup }}-{{ .Namespace }}
+spec:
+ selector:
+   matchLabels:
+      appgroup: {{ .AppGroup }}
+ endpoints:
+   - port: prom
+     interval: 15s
+ targetLabels:
+   fromPod:
+     - from: app
+     - from: appgroup
+`
+
+// podMonitoringTemplate is the compiled podMonitoring template.
+var podMonitoringTemplate = template.Must(template.New("podMonitoring").Parse(podMonitoring))
+
+// AlertTarget represents a single appgroup that might need monitoring.
+type AlertTarget struct {
+	// AppGroup is the value of the template.label.appgroup for the pods to be monitored.
+	AppGroup string
+
+	// Namespace the pods are running in.
+	Namespace string
+
+	// Directory where the YAML file was found for this appgroup. The scraping
+	// and alerting file will be writtin back into this directory.
+	Directory string
+}
+
+// TargetFilename is the absolute filename where the pod scraping and alert
+// rules should be written as YAML.
+func (a AlertTarget) TargetFilename() string {
+	return filepath.Join(a.Directory, fmt.Sprintf("%s_%s_appgroup_alerts.yml", a.AppGroup, a.Namespace))
+}
+
+// PodMonitoring is a YAML CRD of how the pods should be scraped.
+func (a AlertTarget) PodMonitoring() (string, error) {
+	var out bytes.Buffer
+	if err := podMonitoringTemplate.Execute(&out, a); err != nil {
+		return "", skerr.Wrapf(err, "Failed to write PodMonitoring for %v", a)
+	}
+	return out.String(), nil
+}
+
+// AlertTargets keeps track of multiple found AlertTarget's, de-duplicating
+// AlertTargets that are the same.
+type AlertTargets map[AlertTarget]bool
+
+// NamespaceOrDefault returns "default" if the empty string is passed in as a
+// namespace.
+func NamespaceOrDefault(ns string) string {
+	if ns == "" {
+		return "default"
+	}
+	return ns
+}
+
+// The possible file extensions used for YAML files.
+var yamlFileExtensions = []string{".yaml", ".yml"}
+
+// getAlertTargetsFromFilename parses the given file and for each Deployment or
+// StatefulSet found in the file will return an AlertTarget for each one found
+// that has an `appgroup` label.
+func getAlertTargetsFromFilename(filename string) (AlertTargets, error) {
+	ret := AlertTargets{}
+	err := util.WithReadFile(filename, func(f io.Reader) error {
+		b, err := ioutil.ReadAll(f)
+		if err != nil {
+			return err
+		}
+		deployments, statefulSets, _, err := k8s_config.ParseK8sConfigFile(b)
+		if err != nil {
+			return skerr.Wrapf(err, "failed to parse")
+		}
+		for _, d := range deployments {
+			if appgroup, ok := d.Spec.Template.Labels["appgroup"]; ok {
+				ret[AlertTarget{
+					AppGroup:  appgroup,
+					Namespace: NamespaceOrDefault(d.Namespace),
+					Directory: filepath.Dir(filename),
+				}] = true
+			}
+		}
+		for _, d := range statefulSets {
+			if appgroup, ok := d.Spec.Template.Labels["appgroup"]; ok {
+				ret[AlertTarget{
+					AppGroup:  appgroup,
+					Namespace: NamespaceOrDefault(d.Namespace),
+					Directory: filepath.Dir(filename),
+				}] = true
+			}
+		}
+		return nil
+	})
+
+	if err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// getAllAlertTargetsUnderDir walks the given directory tree and applies
+// getAlertTargetsFromFilename to each file and returns all the collected
+// AlertTarget's.
+//
+// getAllAlertTargetsUnderDir will only look in sub-directories that correspond
+// to cluster names.
+func getAllAlertTargetsUnderDir(root string) (AlertTargets, error) {
+	ret := AlertTargets{}
+
+	// Load up the cluster config so we can use the cluster names
+	// to know which sub-directories of the git repo we should
+	// process.
+	clusters, err := clusterconfig.NewFromEmbeddedConfig()
+	if err != nil {
+		return nil, skerr.Wrap(err)
+	}
+
+	for clusterName := range clusters.Clusters {
+		dir := filepath.Join(root, clusterName)
+		if _, err := os.Stat(dir); errors.Is(err, os.ErrNotExist) {
+			sklog.Infof("Skipping cluster as the corresponding directory does not exist: %q", dir)
+			continue
+		}
+
+		fileSystem := os.DirFS(dir)
+		err = fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error {
+			if err != nil {
+				return err
+			}
+			if d.IsDir() {
+				return nil
+			}
+			if !util.In(filepath.Ext(path), yamlFileExtensions) {
+				return nil
+			}
+			alertTargets, err := getAlertTargetsFromFilename(filepath.Join(dir, path))
+			if err != nil {
+				sklog.Errorf("Failed to read file: %s", err)
+				return nil
+			}
+			for key := range alertTargets {
+				ret[key] = true
+			}
+
+			return nil
+		})
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return ret, nil
+}
+
+// App is the application.
+type App struct {
+	directory string
+	logging   bool
+	dryrun    bool
+}
+
+// NewApp returns a new *App.
+func NewApp() *App {
+	return &App{}
+}
+
+// flagSet returns a flag.FlagSet for the App.
+func (a *App) flagSet() *flag.FlagSet {
+	ret := flag.NewFlagSet("genpromcmd", flag.ExitOnError)
+	ret.StringVar(&(a.directory), "directory", "", "The directory that contains a checkout of k8s-config.")
+	ret.BoolVar(&(a.logging), "logtostdout", false, "If true then write logging on stdout.")
+	ret.BoolVar(&(a.dryrun), "dryrun", false, "If true then just print the names of the files that would be written.")
+	ret.Usage = func() {
+
+		fmt.Printf("usage: genpromcrd --directory=[k8s-config checkout dir] [options]\n")
+		fmt.Printf("options:\n")
+		ret.PrintDefaults()
+
+		usage := `
+The genpromcrd cmd runs over all Deployments and StatefulSets and
+writes out Managed Prometheus CRDs for both scraping and alerting.
+For example, given the following file in the git repo that contains
+all the cluster config:
+
+	k8s-config/
+	├── monitoring
+	│   └── appgroups
+	│       └── perf.yml
+	└── skia-infra-public
+	    └── perf.yml
+
+All the Rules files for alerts to run for all Deployments and
+StatefulSets are held under /monitoring/appgroups and the name
+of the file before the '.yml' corresponds to an appgroup label.
+
+Since perf.yaml resides inside a directory associated with a
+cluster, the Deployment there runs in the namespace 'somenamespace',
+and has .template.label.appgroup=perf, a new file will be written to:
+
+   skia-infra-public/perf_somenamespace_appgroup_alerts.yml
+
+which is a modified version of /monitoring/appgroups/perf.yaml, updated
+to scrape the deployment in the correct namespace, and it will also
+contain 'absent()' alerts for all the alerts defined in 'perf.yml'.
+
+The list of directories processed are defined in:
+
+    //kube/clusters/config.json
+
+`
+		fmt.Println(usage)
+	}
+
+	return ret
+}
+
+// findRulesForAppGroup returns a parsed crd.Rules for the given appgroup if one
+// exists, otherwise it returns an error.
+func (a *App) findRulesForAppGroup(appgroup string) (*crd.Rules, error) {
+	filename := filepath.Join(a.directory, "monitoring", "appgroups", appgroup+".yml")
+	var out crd.Rules
+
+	err := util.WithReadFile(filename, func(f io.Reader) error {
+		if err := yaml.NewDecoder(f).Decode(&out); err != nil {
+			return skerr.Wrapf(err, "Failed to read rules file: %q", filename)
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, skerr.Wrapf(err, "Failed to open %q: %s", filename, err)
+	}
+	return &out, nil
+}
+
+// Main is the application main entry point.
+//
+// Args are the cli arguments, should be passed in as os.Args.
+func (a *App) Main(args []string) error {
+	if err := a.flagSet().Parse(args[1:]); err != nil {
+		return skerr.Wrapf(err, "Failed to parse flags")
+	}
+
+	if a.logging {
+		sklogimpl.SetLogger(stdlogging.New(os.Stdout))
+	} else {
+		sklogimpl.SetLogger(nooplogging.New())
+	}
+
+	if a.directory == "" {
+		return skerr.Fmt("--directory must be specified.")
+	}
+
+	absDirectory, err := filepath.Abs(a.directory)
+	if err != nil {
+		return skerr.Wrapf(err, "Can't make --directory value into an absoute path.")
+	}
+	allAppGroups, err := getAllAlertTargetsUnderDir(absDirectory)
+	if err != nil {
+		return skerr.Wrapf(err, "Failed parsing Deployments and StatefulSets.")
+	}
+
+	// Write CRDs for each appgroup.
+	for appGroup := range allAppGroups {
+		// Open and parse as Rules if it exists.
+		rules, err := a.findRulesForAppGroup(appGroup.AppGroup)
+		if err != nil {
+			// Just information because we expect that not all pods will use
+			// genpromcrd for controlling scraping and alerting.
+			sklog.Infof("Failed to find appgroup: %s", err)
+			continue
+		}
+
+		// Add in absent versions of rules.
+		rules.AddAbsentRules()
+
+		// Add Namespace
+		rules.MetaData.Namespace = appGroup.Namespace
+
+		// Write out the CRDs.
+		serializeRules, err := yaml.Marshal(rules)
+		if err != nil {
+			return skerr.Wrapf(err, "Failed to marshall new Rules into YAML for %v", appGroup)
+		}
+		serializedPodMonitoring, err := appGroup.PodMonitoring()
+		if err != nil {
+			return skerr.Wrapf(err, "Failed to write new PodMontoring into YAML for %v", appGroup)
+		}
+		if a.dryrun {
+			fmt.Println(appGroup.TargetFilename())
+			continue
+		}
+		err = util.WithWriteFile(appGroup.TargetFilename(), func(w io.Writer) error {
+			_, err := fmt.Fprintf(w, "%s\n---\n%s", serializeRules, serializedPodMonitoring)
+			return err
+		})
+		if err != nil {
+			return skerr.Wrapf(err, "Failed to write file for %v", appGroup)
+		}
+		sklog.Infof("Processed %v", appGroup)
+	}
+	return nil
+}
diff --git a/promk/go/genpromcrd/genpromcrd/genpromcrd_test.go b/promk/go/genpromcrd/genpromcrd/genpromcrd_test.go
new file mode 100644
index 0000000..7c70611
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/genpromcrd_test.go
@@ -0,0 +1,219 @@
+// Package genpromcrd implements all the functionality for the genpromcrd
+// command line application.
+package genpromcrd
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+	"testing"
+
+	"github.com/otiai10/copy"
+	"github.com/stretchr/testify/require"
+	"go.skia.org/infra/go/testutils"
+	"go.skia.org/infra/go/testutils/unittest"
+)
+
+var alertTarget = AlertTarget{
+	AppGroup:  "perf",
+	Namespace: "perfns",
+	Directory: "/some/sub-directory/in/the/git/checkout/",
+}
+
+func TestAlertTarget_TargetFilename_Success(t *testing.T) {
+	unittest.SmallTest(t)
+	require.Equal(t, "/some/sub-directory/in/the/git/checkout/perf_perfns_appgroup_alerts.yml", alertTarget.TargetFilename())
+}
+
+func TestAlertTarget_PodMonitoring_Success(t *testing.T) {
+	unittest.SmallTest(t)
+	expected := `apiVersion: monitoring.googleapis.com/v1
+kind: PodMonitoring
+metadata:
+ name: perf-perfns
+spec:
+ selector:
+   matchLabels:
+      appgroup: perf
+ endpoints:
+   - port: prom
+     interval: 15s
+ targetLabels:
+   fromPod:
+     - from: app
+     - from: appgroup
+`
+	got, err := alertTarget.PodMonitoring()
+	require.NoError(t, err)
+	require.Equal(t, expected, got)
+}
+
+func TestNameSpaceOrDefault_NoNamespaceProvided_ReturnsDefault(t *testing.T) {
+	unittest.SmallTest(t)
+
+	require.Equal(t, "default", NamespaceOrDefault(""))
+}
+
+func TestNameSpaceOrDefault_NamespaceProvided_ReturnsGivenNamespace(t *testing.T) {
+	unittest.SmallTest(t)
+
+	require.Equal(t, "foo", NamespaceOrDefault("foo"))
+}
+
+func TestGetAlertTargetsFromFilename_ContainsOneDeploymentInDefaultNamespace_Success(t *testing.T) {
+	unittest.MediumTest(t)
+
+	got, err := getAlertTargetsFromFilename(filepath.Join(testutils.TestDataDir(t), "deployment.yaml"))
+	require.NoError(t, err)
+	require.Len(t, got, 1)
+
+	// The key into alertTarget contains alertTarget.Directory, which will
+	// change based on where the code is being run, so iternate over the map to
+	// test the members.
+	for alertTarget := range got {
+		require.Equal(t, "perf", alertTarget.AppGroup)
+		require.Equal(t, "default", alertTarget.Namespace)
+		require.Contains(t, alertTarget.Directory, "/promk/go/genpromcrd/genpromcrd/testdata")
+	}
+}
+
+func TestGetAlertTargetsFromFilename_ContainsOneStatefulSetInNonDefaultNamespace_Success(t *testing.T) {
+	unittest.MediumTest(t)
+
+	got, err := getAlertTargetsFromFilename(filepath.Join(testutils.TestDataDir(t), "statefulset.yml"))
+	require.NoError(t, err)
+	require.Len(t, got, 1)
+
+	// The key into alertTarget contains alertTarget.Directory, which will
+	// change based on where the code is being run, so iternate over the map to
+	// test the members.
+	for alertTarget := range got {
+		require.Equal(t, "prometheus", alertTarget.AppGroup)
+		require.Equal(t, "prometheus", alertTarget.Namespace)
+		require.Contains(t, alertTarget.Directory, "/promk/go/genpromcrd/genpromcrd/testdata")
+	}
+}
+
+func TestGetAlertTargetsFromFilename_FileDoesNotExist_ReturnsError(t *testing.T) {
+	unittest.MediumTest(t)
+
+	_, err := getAlertTargetsFromFilename(filepath.Join(testutils.TestDataDir(t), "the-name-of-a-file-that-does-not-exist.yml"))
+	require.Error(t, err)
+}
+
+func TestGetAllAlertTargetsUnderDir_DirContainsYAMLFilesThatShouldBeSkipped_OnlyTheOneValidFileIsRead(t *testing.T) {
+	unittest.MediumTest(t)
+
+	// The 'fake-checkout' directory has deployment files in this tree:
+	//
+	//	fake-checkout/
+	//	├── monitoring
+	//	│   └── appgroups
+	//	│       └── perf.yml
+	//	├── skia-infra-public
+	//	│   └── deployment.yml
+	//	├── templates
+	//	│   └── this-deployment-is-ignored.yml
+	//	└── this-deployment-is-ignored.yaml
+	//
+	// Only the file under skia-infra-public should be read as
+	// getAllAlertTargetsUnderDir only looks file files under directories that
+	// correspond to cluster names.
+
+	alertTargets, err := getAllAlertTargetsUnderDir(filepath.Join(testutils.TestDataDir(t), "fake-checkout"))
+	require.NoError(t, err)
+	require.Len(t, alertTargets, 1)
+}
+
+func TestAppMain_NoDirectoryFlagSupplied_ReturnsError(t *testing.T) {
+	unittest.SmallTest(t)
+	require.Error(t, NewApp().Main([]string{"path/to/exe/goes/here"}))
+}
+
+func TestAppMain_DryRunOverFakeCheckout_PrintsListOfFilesWritten(t *testing.T) {
+	unittest.MediumTest(t)
+
+	// Setup to capture stdout.
+	backup := os.Stdout
+	defer func() {
+		os.Stdout = backup
+	}()
+	r, w, _ := os.Pipe()
+	os.Stdout = w
+
+	// Run Main with the --dryrun flag which only prints the names of the files it would write.
+	require.NoError(t, NewApp().Main(
+		[]string{
+			"path/to/exe/goes/here",
+			"--dryrun",
+			"--directory", filepath.Join(testutils.TestDataDir(t), "fake-checkout"),
+		}))
+
+	err := w.Close()
+	require.NoError(t, err)
+	out, err := ioutil.ReadAll(r)
+	require.NoError(t, err)
+
+	// We only expect a single file to be written.
+	parts := strings.Split(string(out), "\n")
+	require.Len(t, parts, 2)
+	require.Contains(t, parts[0], "/testdata/fake-checkout/skia-infra-public/perf_mytestnamespace_appgroup_alerts.yml")
+	require.Equal(t, "", parts[1])
+}
+
+func TestAppMain_RunOverFakeCheckout_CorrectFileContentsAreWritten(t *testing.T) {
+	unittest.MediumTest(t)
+
+	tmpDir := t.TempDir()
+	err := copy.Copy(filepath.Join(testutils.TestDataDir(t), "fake-checkout"), tmpDir)
+	require.NoError(t, err)
+
+	require.NoError(t, NewApp().Main(
+		[]string{
+			"path/to/exe/goes/here",
+			"--directory", tmpDir,
+		}))
+
+	newlyWrittenFilename := filepath.Join(tmpDir, "skia-infra-public/perf_mytestnamespace_appgroup_alerts.yml")
+	require.FileExists(t, newlyWrittenFilename)
+	b, err := ioutil.ReadFile(newlyWrittenFilename)
+	require.NoError(t, err)
+
+	expected := `apiVersion: monitoring.googleapis.com/v1
+kind: Rules
+metadata:
+  name: perf
+  namespace: mytestnamespace
+spec:
+  groups:
+  - name: perf
+    interval: 30s
+    rules:
+    - alert: AlwaysFiringAlertToSeeIfAlertsAreWorking
+      expr: vector(1)
+      labels: {}
+      annotations: {}
+  - name: absent-perf
+    interval: 30s
+    rules: []
+
+---
+apiVersion: monitoring.googleapis.com/v1
+kind: PodMonitoring
+metadata:
+ name: perf-mytestnamespace
+spec:
+ selector:
+   matchLabels:
+      appgroup: perf
+ endpoints:
+   - port: prom
+     interval: 15s
+ targetLabels:
+   fromPod:
+     - from: app
+     - from: appgroup
+`
+	require.Equal(t, expected, string(b))
+}
diff --git a/promk/go/genpromcrd/genpromcrd/testdata/deployment.yaml b/promk/go/genpromcrd/genpromcrd/testdata/deployment.yaml
new file mode 100644
index 0000000..3491c71
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/testdata/deployment.yaml
@@ -0,0 +1,124 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: skiaperf
+  name: skiaperf
+  annotations:
+    beta.cloud.google.com/backend-config:
+      '{"ports": {"8000":"skia-default-backendconfig"}}'
+    skia.org.domain: perf.skia.org
+spec:
+  ports:
+    - name: metrics
+      port: 20000
+    - name: http
+      port: 8000
+  selector:
+    app: skiaperf
+  type: NodePort
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: skiaperf
+spec:
+  selector:
+    matchLabels:
+      app: skiaperf
+  replicas: 1
+  strategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: skiaperf
+        appgroup: perf
+      annotations:
+        prometheus.io.scrape: 'true'
+        prometheus.io.port: '20000'
+    spec:
+      automountServiceAccountToken: false
+      securityContext:
+        runAsUser: 2000 # aka skia
+        fsGroup: 2000 # aka skia
+      serviceAccountName: skia-perf
+      containers:
+        - name: skiaperf
+          image: gcr.io/skia-public/perfserver:2022-07-19T15_23_14Z-jcgregorio-66af958-clean
+          args:
+            - frontend
+            - --port=:7000
+            - --internal_port=:9000
+            - --prom_port=:20000
+            - --commit_range_url=https://skia.googlesource.com/skia/+log/{begin}..{end}
+            - --email_client_secret_file=/etc/perf-email-secrets/client_secret.json
+            - --email_token_cache_file=/etc/perf-email-secrets/client_token.json
+            - --config_filename=/usr/local/share/skiaperf/configs/cdb-nano.json
+            - --num_continuous_parallel=1
+            - --do_clustering=true
+            - --resources_dir=/usr/local/share/skiaperf/dist
+            - --proxy-login=true
+          ports:
+            - containerPort: 20000
+              name: prom
+            - containerPort: 9000
+            - containerPort: 7000
+          volumeMounts:
+            - name: perf-alertserver-skia-org-secrets
+              mountPath: /etc/perf-email-secrets
+          env:
+            - name: MY_POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+          resources:
+            requests:
+              memory: '2Gi'
+              cpu: '2'
+          readinessProbe:
+            httpGet:
+              path: /healthz
+              port: 7000
+            initialDelaySeconds: 30
+            periodSeconds: 30
+            failureThreshold: 20
+        - name: auth-proxy
+          args:
+            - '--port=:8000'
+            - '--target_port=:7000'
+            - '--prom_port=:10000'
+            - '--passive'
+            - '--allowed_from=google.com'
+          image: gcr.io/skia-public/auth-proxy:2021-06-10T19_45_02Z-jcgregorio-939f73f-clean
+          ports:
+            - name: http
+              containerPort: 8000
+            - name: prom
+              containerPort: 10000
+          readinessProbe:
+            httpGet:
+              path: /healthz
+              port: 8000
+            initialDelaySeconds: 1
+            periodSeconds: 3
+          volumeMounts:
+            - name: skia-org-legacy-login-secrets
+              mountPath: /etc/skia.org/
+            - name: csrf-salt
+              mountPath: /var/skia/
+          resources:
+            requests:
+              memory: '30Mi'
+              cpu: '200m'
+              ephemeral-storage: '200M'
+      volumes:
+        - name: csrf-salt
+          secret:
+            secretName: csrf-salt
+        - name: skia-org-legacy-login-secrets
+          secret:
+            secretName: skia-org-legacy-login-secrets
+        - name: perf-alertserver-skia-org-secrets
+          secret:
+            secretName: perf-alertserver-skia-org-secrets
diff --git a/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/monitoring/appgroups/perf.yml b/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/monitoring/appgroups/perf.yml
new file mode 100644
index 0000000..9806135
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/monitoring/appgroups/perf.yml
@@ -0,0 +1,11 @@
+apiVersion: monitoring.googleapis.com/v1
+kind: Rules
+metadata:
+  name: perf
+spec:
+  groups:
+    - name: perf
+      interval: 30s
+      rules:
+        - alert: AlwaysFiringAlertToSeeIfAlertsAreWorking
+          expr: vector(1)
diff --git a/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/skia-infra-public/deployment.yml b/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/skia-infra-public/deployment.yml
new file mode 100644
index 0000000..e47a8cc
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/skia-infra-public/deployment.yml
@@ -0,0 +1,125 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: skiaperf
+  name: skiaperf
+  annotations:
+    beta.cloud.google.com/backend-config:
+      '{"ports": {"8000":"skia-default-backendconfig"}}'
+    skia.org.domain: perf.skia.org
+spec:
+  ports:
+    - name: metrics
+      port: 20000
+    - name: http
+      port: 8000
+  selector:
+    app: skiaperf
+  type: NodePort
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: skiaperf
+  namespace: mytestnamespace
+spec:
+  selector:
+    matchLabels:
+      app: skiaperf
+  replicas: 1
+  strategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: skiaperf
+        appgroup: perf
+      annotations:
+        prometheus.io.scrape: 'true'
+        prometheus.io.port: '20000'
+    spec:
+      automountServiceAccountToken: false
+      securityContext:
+        runAsUser: 2000 # aka skia
+        fsGroup: 2000 # aka skia
+      serviceAccountName: skia-perf
+      containers:
+        - name: skiaperf
+          image: gcr.io/skia-public/perfserver:2022-07-19T15_23_14Z-jcgregorio-66af958-clean
+          args:
+            - frontend
+            - --port=:7000
+            - --internal_port=:9000
+            - --prom_port=:20000
+            - --commit_range_url=https://skia.googlesource.com/skia/+log/{begin}..{end}
+            - --email_client_secret_file=/etc/perf-email-secrets/client_secret.json
+            - --email_token_cache_file=/etc/perf-email-secrets/client_token.json
+            - --config_filename=/usr/local/share/skiaperf/configs/cdb-nano.json
+            - --num_continuous_parallel=1
+            - --do_clustering=true
+            - --resources_dir=/usr/local/share/skiaperf/dist
+            - --proxy-login=true
+          ports:
+            - containerPort: 20000
+              name: prom
+            - containerPort: 9000
+            - containerPort: 7000
+          volumeMounts:
+            - name: perf-alertserver-skia-org-secrets
+              mountPath: /etc/perf-email-secrets
+          env:
+            - name: MY_POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+          resources:
+            requests:
+              memory: '2Gi'
+              cpu: '2'
+          readinessProbe:
+            httpGet:
+              path: /healthz
+              port: 7000
+            initialDelaySeconds: 30
+            periodSeconds: 30
+            failureThreshold: 20
+        - name: auth-proxy
+          args:
+            - '--port=:8000'
+            - '--target_port=:7000'
+            - '--prom_port=:10000'
+            - '--passive'
+            - '--allowed_from=google.com'
+          image: gcr.io/skia-public/auth-proxy:2021-06-10T19_45_02Z-jcgregorio-939f73f-clean
+          ports:
+            - name: http
+              containerPort: 8000
+            - name: prom
+              containerPort: 10000
+          readinessProbe:
+            httpGet:
+              path: /healthz
+              port: 8000
+            initialDelaySeconds: 1
+            periodSeconds: 3
+          volumeMounts:
+            - name: skia-org-legacy-login-secrets
+              mountPath: /etc/skia.org/
+            - name: csrf-salt
+              mountPath: /var/skia/
+          resources:
+            requests:
+              memory: '30Mi'
+              cpu: '200m'
+              ephemeral-storage: '200M'
+      volumes:
+        - name: csrf-salt
+          secret:
+            secretName: csrf-salt
+        - name: skia-org-legacy-login-secrets
+          secret:
+            secretName: skia-org-legacy-login-secrets
+        - name: perf-alertserver-skia-org-secrets
+          secret:
+            secretName: perf-alertserver-skia-org-secrets
diff --git a/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/templates/this-deployment-is-ignored.yml b/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/templates/this-deployment-is-ignored.yml
new file mode 100644
index 0000000..c46bc32
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/templates/this-deployment-is-ignored.yml
@@ -0,0 +1,16 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: skiaperf
+spec:
+  selector:
+    matchLabels:
+      app: skiaperf
+  replicas: 1
+  strategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: skiaperf
+        appgroup: B
diff --git a/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/this-deployment-is-ignored.yaml b/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/this-deployment-is-ignored.yaml
new file mode 100644
index 0000000..855e71f
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/testdata/fake-checkout/this-deployment-is-ignored.yaml
@@ -0,0 +1,16 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: skiaperf
+spec:
+  selector:
+    matchLabels:
+      app: skiaperf
+  replicas: 1
+  strategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: skiaperf
+        appgroup: A
diff --git a/promk/go/genpromcrd/genpromcrd/testdata/statefulset.yml b/promk/go/genpromcrd/genpromcrd/testdata/statefulset.yml
new file mode 100644
index 0000000..4202902
--- /dev/null
+++ b/promk/go/genpromcrd/genpromcrd/testdata/statefulset.yml
@@ -0,0 +1,122 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: prometheus
+  namespace: prometheus
+spec:
+  selector:
+    matchLabels:
+      app: prometheus
+      appgroup: prometheus
+  replicas: 1
+  updateStrategy:
+    type: RollingUpdate
+  serviceName: 'prometheus'
+  template:
+    metadata:
+      labels:
+        app: prometheus
+        appgroup: prometheus
+      annotations:
+        prometheus.io.scrape: 'true'
+        prometheus.io.port: '9090'
+    spec:
+      securityContext:
+        runAsUser: 2000 # aka skia
+        fsGroup: 2000 # aka skia
+      serviceAccountName: prometheus
+      automountServiceAccountToken: true
+      containers:
+        - name: prometheus
+          image: docker.io/prom/prometheus@sha256:0a8caa2e9f19907608915db6e62a67383fe44b9876a467b297ee6f64e51dd58a
+          args:
+            - '--config.file=/etc/prometheus/prometheus.yml'
+            - '--storage.tsdb.path=/mnt/prometheus/'
+            - '--web.enable-lifecycle'
+            - '--web.listen-address=:9090'
+            - '--storage.tsdb.max-block-duration=2h'
+            - '--storage.tsdb.min-block-duration=2h'
+          ports:
+            - containerPort: 9090
+          volumeMounts:
+            - name: prometheus-config-volume
+              mountPath: /etc/prometheus/
+            - name: prometheus-storage-volume-claim
+              mountPath: /mnt/prometheus/
+          resources:
+            requests:
+              memory: '1Gi'
+              cpu: '2'
+          readinessProbe:
+            httpGet:
+              path: /metrics
+              port: 9090
+            initialDelaySeconds: 3
+            periodSeconds: 3
+        - name: thanos-sidecar
+          args:
+            - sidecar
+            - |
+              --objstore.config=type: GCS
+              config:
+                bucket: skia-thanos
+            - --prometheus.url=http://localhost:9090
+            - --tsdb.path=/mnt/prometheus/
+            - --http-address=:9000
+            - --grpc-address=:9001
+          ports:
+            - containerPort: 9000
+            - containerPort: 9001
+          image: gcr.io/skia-public/thanos@sha256:111e964136af948ec387c5d4fbb1d8539366bfa4170485a94e9e0483b881bdb9
+          volumeMounts:
+            - name: prometheus-storage-volume-claim
+              mountPath: /mnt/prometheus/
+          resources:
+            requests:
+              memory: '50Mi'
+              cpu: '10m'
+        - name: configmap-reload
+          args:
+            - '--volume-dir=/etc/prometheus/'
+            - '--webhook-method=POST'
+            - '--webhook-url=http://localhost:9090/-/reload'
+          image: gcr.io/skia-public/configmap-reload@sha256:cfd55ce3c2ccf8f4025088fa7174e58b311ec42c2dd5dc46a5f2227725db1e5a
+          volumeMounts:
+            - name: prometheus-config-volume
+              mountPath: /etc/prometheus/
+          resources:
+            requests:
+              memory: '30Mi'
+              cpu: '10m'
+          ports: []
+        - name: thanos-bounce
+          image: gcr.io/skia-public/thanos-bounce@sha256:cf269863a4a6cb1406b4910135c35f5bee81cfcb8b3976009e4a5682d513a9b5
+          env:
+            - name: PORT_ON_THANOS_QUERY
+              value: '9006'
+            - name: CLOUDSDK_COMPUTE_ZONE
+              value: us-central1-a
+            - name: CLOUDSDK_CONTAINER_CLUSTER
+              value: skia-public
+            - name: CLOUDSDK_COMPUTE_REGION
+              value: us-central1-a
+            - name: CLOUDSDK_CORE_PROJECT
+              value: skia-public
+          resources:
+            requests:
+              memory: '50Mi'
+              cpu: '10m'
+          ports: []
+      volumes:
+        - name: prometheus-config-volume
+          configMap:
+            defaultMode: 420
+            name: prometheus-server-conf
+  volumeClaimTemplates:
+    - metadata:
+        name: prometheus-storage-volume-claim
+      spec:
+        accessModes: ['ReadWriteOnce']
+        resources:
+          requests:
+            storage: 1000Gi
diff --git a/promk/go/genpromcrd/main.go b/promk/go/genpromcrd/main.go
new file mode 100644
index 0000000..11251c5
--- /dev/null
+++ b/promk/go/genpromcrd/main.go
@@ -0,0 +1,17 @@
+// Package main implements the genpromcrd command line application.
+package main
+
+import (
+	"os"
+
+	"go.skia.org/infra/go/sklog"
+	"go.skia.org/infra/promk/go/genpromcrd/genpromcrd"
+)
+
+func main() {
+	app := genpromcrd.NewApp()
+
+	if err := app.Main(os.Args); err != nil {
+		sklog.Fatal(err)
+	}
+}