[docserver] Move to skia-public.
Run as a deployment with 2 instances. Preview works because CL
preview have been sped up, now only taking a couple seconds.
Additionally change all livenessProbe to readinessProbe, since we
really care about when it should be exposed in the Service.
Bug: skia:
Change-Id: I5bd6c717db8c04dae08e239a66704d3ace9609df
Reviewed-on: https://skia-review.googlesource.com/129264
Commit-Queue: Joe Gregorio <jcgregorio@google.com>
Reviewed-by: Ben Wagner <benjaminwagner@google.com>
diff --git a/docserverk/go/docserverk/main.go b/docserverk/go/docserverk/main.go
index e84fcbc..160b220 100644
--- a/docserverk/go/docserverk/main.go
+++ b/docserverk/go/docserverk/main.go
@@ -42,7 +42,7 @@
// flags
var (
- docRepo = flag.String("doc_repo", "https://skia.googlesource.com/skia", "The directory to check out the doc repo into.")
+ docRepo = flag.String("doc_repo", "https://skia.googlesource.com/skia", "The repo to check out.")
local = flag.Bool("local", false, "Running locally if true. As opposed to in production.")
port = flag.String("port", ":8000", "HTTP service address (e.g., ':8000')")
preview = flag.Bool("preview", false, "Preview markdown changes to a local repo. Doesn't do pulls.")
@@ -133,7 +133,7 @@
httputils.ReportError(w, r, fmt.Errorf("Not a valid integer id for an issue."), "The CL given is not valid.")
return
}
- d, err = docset.NewDocSetForIssue(context.Background(), filepath.Join(*workDir, "patches"), *docRepo, issue)
+ d, err = docset.NewDocSetForIssue(context.Background(), *workDir, *docRepo, issue)
if err == docset.IssueCommittedErr {
httputils.ReportError(w, r, err, "Failed to load the given CL, that issue is closed.")
return
diff --git a/docserverk/go/docset/docset.go b/docserverk/go/docset/docset.go
index 5b0ffe1..b1b38e3 100644
--- a/docserverk/go/docset/docset.go
+++ b/docserverk/go/docset/docset.go
@@ -101,10 +101,24 @@
// newDocSet does the core of the work for both NewDocSet and NewDocSetForIssue.
//
-// The repo is checked out into repoDir.
+// The repo is checked out somewhere under workDir.
// If a valid issue and patchset are supplied then the repo will be patched with that CL.
// If refresh is true then the git repo will be periodically refreshed (git pull).
-func newDocSet(ctx context.Context, repoDir, repo string, issue, patchset int64, refresh bool) (*DocSet, error) {
+func newDocSet(ctx context.Context, workDir, repo string, issue, patchset int64, refresh bool) (*DocSet, error) {
+ primaryDir := filepath.Join(workDir, "primary")
+ issueDir := filepath.Join(workDir, "patches", fmt.Sprintf("%d-%d", issue, patchset))
+ repoDir := primaryDir
+ if issue > 0 {
+ repoDir = issueDir
+ if _, err := os.Stat(issueDir); err == nil {
+ d := &DocSet{
+ repoDir: repoDir,
+ }
+ d.BuildNavigation()
+ return d, nil
+ }
+ }
+
if issue > 0 {
info, err := gc.GetIssueProperties(issue)
if err != nil {
@@ -114,7 +128,13 @@
return nil, IssueCommittedErr
}
}
- git, err := gitinfo.CloneOrUpdate(ctx, repo, repoDir, false)
+ var git *gitinfo.GitInfo
+ var err error
+ if issue > 0 {
+ git, err = gitinfo.CloneOrUpdate(ctx, primaryDir, repoDir, false)
+ } else {
+ git, err = gitinfo.CloneOrUpdate(ctx, repo, repoDir, false)
+ }
if err != nil {
return nil, fmt.Errorf("Failed to CloneOrUpdate repo %q: %s", repo, err)
}
@@ -129,6 +149,7 @@
// | +-> Issue ID.
// |
// +-> Last two digits of Issue ID.
+
issuePostfix := issue % 100
output, err := exec.RunCwd(ctx, repoDir, "git", "fetch", repo, fmt.Sprintf("refs/changes/%02d/%d/%d", issuePostfix, issue, patchset))
if err != nil {
@@ -181,11 +202,7 @@
// NewDocSet creates a new DocSet, one that is periodically refreshed.
func NewDocSet(ctx context.Context, workDir, repo string) (*DocSet, error) {
- d, err := newDocSet(ctx, filepath.Join(workDir, "primary"), repo, -1, -1, true)
- if err != nil {
- return nil, fmt.Errorf("Failed to CloneOrUpdate repo %q: %s", repo, err)
- }
- return d, nil
+ return newDocSet(ctx, workDir, repo, -1, -1, true)
}
// NewDocSetForIssue creates a new DocSet patched to the latest patch level of
@@ -209,26 +226,7 @@
if !util.In(domain, config.WHITELIST) {
return nil, fmt.Errorf("User is not authorized to test docset CLs.")
}
- var d *DocSet
- repoDir := filepath.Join(workDir, "patches", fmt.Sprintf("%d-%d", issue, patchset))
- if _, err := os.Stat(repoDir); os.IsNotExist(err) {
- d, err = newDocSet(ctx, repoDir, repo, issue, patchset, false)
- if err != nil {
- if err == IssueCommittedErr {
- return nil, err
- }
- if err := os.RemoveAll(repoDir); err != nil {
- sklog.Errorf("Failed to remove %q: %s", repoDir, err)
- }
- return nil, fmt.Errorf("Failed to create new doc set: %s", err)
- }
- } else {
- d = &DocSet{
- repoDir: repoDir,
- }
- d.BuildNavigation()
- }
- return d, nil
+ return newDocSet(ctx, workDir, repo, issue, patchset, false)
}
// RawFilename returns the absolute filename for the file associated with the
@@ -518,10 +516,7 @@
func StartCleaner(workDir string) {
sklog.Info("Starting Cleaner")
for range time.Tick(config.REFRESH) {
- // TODO (stephana): The extra 'patches' directory should go away after
- // one of the path segments is removed in docserver/main.go or
- // NewDocsetForIssue.
- matches, err := filepath.Glob(workDir + "/patches/patches/*")
+ matches, err := filepath.Glob(workDir + "/patches/*")
sklog.Infof("Matches: %v", matches)
if err != nil {
sklog.Errorf("Failed to retrieve list of patched checkouts: %s", err)
diff --git a/docserverk/probersk.json5 b/docserverk/probersk.json5
new file mode 100644
index 0000000..c78df6f
--- /dev/null
+++ b/docserverk/probersk.json5
@@ -0,0 +1,11 @@
+{
+ "docserver": {
+ "urls": [
+ "https://skia.org/",
+ "http://docserver:8000",
+ ],
+ "method": "GET",
+ "expected": [200],
+ "mimetype": "text/html"
+ }
+}
diff --git a/kube/skia-public/alertmanager.yaml b/kube/skia-public/alertmanager.yaml
index 0ae2a79..11b791c 100644
--- a/kube/skia-public/alertmanager.yaml
+++ b/kube/skia-public/alertmanager.yaml
@@ -57,7 +57,7 @@
requests:
memory: "30Mi"
cpu: "100m"
- livenessProbe:
+ readinessProbe:
httpGet:
path: /
port: 9090
diff --git a/kube/skia-public/debugger-assets.yaml b/kube/skia-public/debugger-assets.yaml
index e1c6d22..a8ca9b5 100644
--- a/kube/skia-public/debugger-assets.yaml
+++ b/kube/skia-public/debugger-assets.yaml
@@ -36,7 +36,7 @@
fsGroup: 2000 # aka skia
containers:
- name: debugger-assets
- image: gcr.io/skia-public/debugger-assets:2018-05-18T14_50_37Z-jcgregorio-992e9f8-dirty
+ image: gcr.io/skia-public/debugger-assets:2018-05-19T02_14_37Z-jcgregorio-612e55b-clean
args:
- "--logtostderr"
- "--port=:8000"
@@ -49,7 +49,7 @@
requests:
memory: "50Mi"
cpu: "20m"
- livenessProbe:
+ readinessProbe:
httpGet:
path: /
port: http
diff --git a/kube/skia-public/debugger.yaml b/kube/skia-public/debugger.yaml
index 889d072..60398df 100644
--- a/kube/skia-public/debugger.yaml
+++ b/kube/skia-public/debugger.yaml
@@ -47,7 +47,7 @@
requests:
memory: "20Gi"
cpu: "6"
- livenessProbe:
+ readinessProbe:
httpGet:
path: /
port: 8000
diff --git a/kube/skia-public/docserver.yaml b/kube/skia-public/docserver.yaml
index dd76e2e..d940cb7 100644
--- a/kube/skia-public/docserver.yaml
+++ b/kube/skia-public/docserver.yaml
@@ -2,48 +2,41 @@
kind: Service
metadata:
labels:
- app: docserverk
- name: docserverk
+ app: docserver
+ name: docserver
spec:
ports:
+ - name: metrics
+ port: 20000
- name: http
port: 8000
- - name: docserverk-metrics
- port: 20000
selector:
- app: docserverk
+ app: docserver
type: NodePort
---
apiVersion: apps/v1beta1
-kind: StatefulSet
+kind: Deployment
metadata:
- name: docserverk
+ name: docserver
spec:
- selector:
- matchLabels:
- app: docserverk # Label selector that determines which Pods belong to the StatefulSet
- # Must match spec: template: metadata: labels
- serviceName: "docserverk"
- replicas: 1
- updateStrategy:
+ replicas: 2
+ strategy:
type: RollingUpdate
template:
metadata:
labels:
- app: docserverk # Pod template's label selector
+ app: docserver
+ annotations:
+ prometheus.io.scrape: "true"
+ prometheus.io.port: "20000"
spec:
automountServiceAccountToken: false
securityContext:
runAsUser: 2000 # aka skia
fsGroup: 2000 # aka skia
- volumes:
- - name: skia-docs-sa
- secret:
- secretName: skia-docs
- terminationGracePeriodSeconds: 10
containers:
- - name: docserverk
- image: gcr.io/skia-public/docserverk:2018-05-04T15_11_29Z-jcgregorio-018615b-clean
+ - name: docserver
+ image: gcr.io/skia-public/docserverk:2018-05-22T16_58_21Z-jcgregorio-8ed9abe-dirty
args:
- "--logtostderr"
- "--resources_dir=/usr/local/share/docserverk/"
@@ -51,33 +44,29 @@
- "--port=:8000"
- "--prom_port=:20000"
ports:
- - name: http
- containerPort: 8000
- - name: metrics
- containerPort: 20000
- resources:
- requests:
- cpu: "100m"
- memory: "100Mi"
+ - containerPort: 8000
+ - containerPort: 20000
volumeMounts:
- name: skia-docs-sa
mountPath: /var/secrets/google
- - name: docserverk-storage2
+ - name: docserverk-storage
mountPath: /mnt/docserverk
env:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: /var/secrets/google/key.json
- livenessProbe:
+ resources:
+ requests:
+ memory: "1.3Gi"
+ cpu: "100m"
+ readinessProbe:
httpGet:
path: /
port: 8000
- initialDelaySeconds: 3
+ initialDelaySeconds: 45
periodSeconds: 3
- volumeClaimTemplates:
- - metadata:
- name: docserverk-storage2
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 500Gi
+ volumes:
+ - name: skia-docs-sa
+ secret:
+ secretName: skia-docs
+ - name: docserverk-storage
+ emptyDir: {}
diff --git a/kube/skia-public/notifier.yaml b/kube/skia-public/notifier.yaml
index 6252cb2..4c18185 100644
--- a/kube/skia-public/notifier.yaml
+++ b/kube/skia-public/notifier.yaml
@@ -56,7 +56,7 @@
requests:
memory: "30Mi"
cpu: "20m"
- livenessProbe:
+ readinessProbe:
httpGet:
path: /healthz
port: 9000
diff --git a/kube/skia-public/prober.yaml b/kube/skia-public/prober.yaml
index 56b7182..3217a37 100644
--- a/kube/skia-public/prober.yaml
+++ b/kube/skia-public/prober.yaml
@@ -28,7 +28,7 @@
fsGroup: 2000 # aka skia
containers:
- name: proberk
- image: gcr.io/skia-public/proberk:2018-05-18T21_17_26Z-jcgregorio-1dad246-clean
+ image: gcr.io/skia-public/proberk:2018-05-19T20_34_44Z-jcgregorio-b64814e-dirty
args:
- "--logtostderr"
- "--config=/etc/proberk/allprobersk.json"
@@ -41,7 +41,7 @@
requests:
memory: "100Mi"
cpu: "25m"
- livenessProbe:
+ readinessProbe:
httpGet:
path: /metrics
port: 20000
diff --git a/kube/skia-public/prom.yaml b/kube/skia-public/prom.yaml
index 5cb666a..64ccacf 100644
--- a/kube/skia-public/prom.yaml
+++ b/kube/skia-public/prom.yaml
@@ -93,7 +93,7 @@
requests:
memory: "1Gi"
cpu: "2"
- livenessProbe:
+ readinessProbe:
httpGet:
path: /
port: 9090
diff --git a/kube/skia-public/skia-ingress.yaml b/kube/skia-public/skia-ingress.yaml
index cca7bbc..cc0a803 100644
--- a/kube/skia-public/skia-ingress.yaml
+++ b/kube/skia-public/skia-ingress.yaml
@@ -8,7 +8,7 @@
ingress.gcp.kubernetes.io/pre-shared-cert: skia-org
spec:
backend:
- serviceName: docserverk
+ serviceName: docserver
servicePort: 8000
rules:
- host: prom2.skia.org
@@ -17,6 +17,18 @@
- backend:
serviceName: prometheus
servicePort: 8000
+ - host: www.skia.org
+ http:
+ paths:
+ - backend:
+ serviceName: docserver
+ servicePort: 8000
+ - host: skia.org
+ http:
+ paths:
+ - backend:
+ serviceName: docserver
+ servicePort: 8000
- host: alerts2.skia.org
http:
paths:
diff --git a/kube/skia-public/skiatest-ingress.yaml b/kube/skia-public/skiatest-ingress.yaml
index 6b474e5..180d136 100644
--- a/kube/skia-public/skiatest-ingress.yaml
+++ b/kube/skia-public/skiatest-ingress.yaml
@@ -7,14 +7,14 @@
ingress.gcp.kubernetes.io/pre-shared-cert: skiatest
spec:
backend:
- serviceName: docserverk
+ serviceName: docserver
servicePort: 8000
rules:
- host: docs.skiatest.org
http:
paths:
- backend:
- serviceName: docserverk
+ serviceName: docserver
servicePort: 8000
- host: prom.skiatest.org
http:
diff --git a/skfe/sys/skia_org_nginx b/skfe/sys/skia_org_nginx
index 14efe22..6e1ece6 100644
--- a/skfe/sys/skia_org_nginx
+++ b/skfe/sys/skia_org_nginx
@@ -45,7 +45,7 @@
}
location / {
- proxy_pass http://skia-docs:8000;
+ proxy_pass https://35.201.76.220;
proxy_set_header Host $host;
}
}
@@ -1669,28 +1669,6 @@
server_name debugger.skia.org;
return 301 https://debugger.skia.org$request_uri;
}
-##### debug.skia.org ###########################
-server {
- listen 443;
- server_name debug.skia.org;
-
- ssl on;
-
- access_log /var/log/nginx/debug.access.log;
- error_log /var/log/nginx/debug.error.log error;
-
- client_max_body_size 500M;
-
- location / {
- proxy_pass https://35.201.76.220;
- proxy_set_header Host $host;
- }
-}
-server {
- listen 80;
- server_name debug.skia.org;
- return 301 https://debug.skia.org$request_uri;
-}
##### debugger-assets.skia.org ###########################
server {
listen 443;
@@ -1701,8 +1679,6 @@
access_log /var/log/nginx/debugger-assets.access.log;
error_log /var/log/nginx/debugger-assets.error.log error;
- client_max_body_size 500M;
-
location / {
proxy_pass https://35.201.76.220;
proxy_set_header Host $host;