blob: 651107dd729c6c75c4dca774a65fdb29b52659b1 [file] [log] [blame]
reporting-disabled = false
bind-address = ":8088"
[meta]
enabled = true
dir = "/mnt/pd0/influxdb/meta"
bind-address = ":8088"
https-enabled = false
https-certificate = ""
retention-autocreate = true
election-timeout = "1s"
heartbeat-timeout = "1s"
leader-lease-timeout = "500ms"
commit-timeout = "50ms"
cluster-tracing = false
raft-promotion-enabled = true
logging-enabled = true
pprof-enabled = false
lease-duration = "1m0s"
[data]
enabled = true
dir = "/mnt/pd0/influxdb/tsm1"
engine = "tsm1"
max-wal-size = 104857600
wal-flush-interval = "10m0s"
wal-partition-flush-delay = "2s"
wal-dir = "/mnt/pd0/influxdb/tsm1-wal"
wal-logging-enabled = true
wal-ready-series-size = 30720
wal-compaction-threshold = 0.5
wal-max-series-size = 1048576
wal-flush-cold-interval = "5s"
wal-partition-size-threshold = 52428800
query-log-enabled = true
cache-max-memory-size = 524288000
cache-snapshot-memory-size = 26214400
cache-snapshot-write-cold-duration = "1m0s"
compact-full-write-cold-duration = "24h0m0s"
max-points-per-block = 0
trace-logging-enabled = false
max-series-per-database = 0
max-values-per-tag = 0
[coordinator]
force-remote-mapping = false
write-timeout = "5s"
shard-writer-timeout = "5s"
max-remote-write-connections = 3
shard-mapper-timeout = "5s"
[retention]
enabled = true
check-interval = "30m0s"
[shard-precreation]
enabled = true
check-interval = "10m0s"
advance-period = "30m0s"
[admin]
enabled = true
bind-address = ":10116"
https-enabled = false
# https-certificate = "/etc/ssl/influxdb.pem"
[monitor]
store-enabled = true
store-database = "_internal"
store-interval = "10s"
[subscriber]
enabled = true
[http]
enabled = true
bind-address = ":10117"
auth-enabled = false
log-enabled = true
write-tracing = false
pprof-enabled = false
https-enabled = false
#https-certificate = "/etc/ssl/influxdb.pem"
[[collectd]]
enabled = false
bind-address = ":25826"
database = "collectd"
retention-policy = ""
batch-size = 5000
batch-pending = 10
batch-timeout = "10s"
read-buffer = 0
typesdb = "/usr/share/collectd/types.db"
[[opentsdb]]
enabled = false
bind-address = ":4242"
database = "opentsdb"
retention-policy = ""
consistency-level = "one"
tls-enabled = false
certificate = "/etc/ssl/influxdb.pem"
batch-size = 1000
batch-pending = 5
batch-timeout = "1s"
log-point-errors = true
[continuous_queries]
log-enabled = true
enabled = true
run-interval = "1s"
[hinted-handoff]
enabled = false
#dir = "/mnt/pd0/influxdb/hh"
#max-size = 1073741824
#max-age = "168h0m0s"
#retry-rate-limit = 0
#retry-interval = "1s"
#retry-max-interval = "1m0s"
#purge-interval = "1h0m0s"
[[graphite]]
enabled = false
database = "graphite"
bind-address = ":2003"
protocol = "tcp"
consistency-level = "one"
name-separator = "."
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
## "name-schema" configures tag names for parsing the metric name from graphite protocol;
## separated by `name-separator`.
## The "measurement" tag is special and the corresponding field will become
## the name of the metric.
## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as
## {
## measurement: "cpu",
## tags: {
## "type": "server",
## "host": "localhost,
## "device": "cpu0"
## }
## }
templates = [
# Rule for probers.
#
# These metrics look like:
#
# prober.probename.<failure|latency>.value
#
"prober.* measurement.probename.type.",
# Rule for issues.
#
# These metrics look like:
#
# issues.closed.value
#
"issues.* measurement.type.",
# Rule for go-metrics runtime values.
#
# These metrics look like:
#
# statusd.skia-status.runtime.ReadMemStats.fifteen-minute
# statusd.skia-status.skia-status.runtime.ReadMemStats.fifteen-minute
"*.*.runtime app.host.runtime.measurement*",
"*.*.*.runtime app.host.host2.runtime.measurement*",
# Rules for buildbot measurements.
#
# datahopper.skia-datahopper2.buildbot.builds.Test_Ubuntu_GCC_Golo_GPU_GT610_x86_64_Debug_ASAN.duration.value
"*.*.buildbot.builds.* app.host.measurement.measurement.builder.measurement*",
# datahopper.skia-datahopper2.buildbot.buildstepsbybuilder.Test_Android_GCC_NexusPlayer_GPU_PowerVR_x86_Debug.push_uninteresting_hashes_txt.duration.value
"*.*.buildbot.buildstepsbybuilder.* app.host.measurement.measurement.builder.step.measurement*",
# datahopper.skia-datahopper2.buildbot.tx.PutBuild.mean.value
"*.*.buildbot.tx.* app.host.measurement.measurement.transaction.measurement*",
# Rules for datahopper_internal ingestBuild webhook measurements.
#
# internal.skia-internal.ingest-build-webhook.Google3-Autoroller.oldest-untested-commit-age.value
"*.*.ingest-build-webhook.* app.host.measurement.codename.measurement*",
# Rule for logserver metrics.
#
# These look like:
#
# logserver.skia-perf.skiaperf.ERROR.value
#
"logserver.* .host.app.measurement.value",
# Rule for collectd metrics.
#
# These look like:
#
# collectd.skia-alerts.cpu-11.cpu-idle
#
"collectd.* .host.resource.measurement*",
# Android stats.
"android_stats.* app.host.measurement.model.serial.measurement*",
# Catchall rule.
"app.host.measurement*",
]
## If set to true, when the input metric name has more fields than `name-schema` specified,
## the extra fields will be ignored.
## Otherwise an error will be logged and the metric rejected.
# ignore-unnamed = true