forked from stripe/veneur
-
Notifications
You must be signed in to change notification settings - Fork 0
/
example.yaml
282 lines (208 loc) · 8.52 KB
/
example.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
---
# == COLLECTION ==
# The addresses on which to listen for statsd metrics. These are
# formatted as URLs, with schemes corresponding to valid "network"
# arguments on https://golang.org/pkg/net/#Listen. Currently, only udp
# and tcp (including IPv4 and 6-only) schemes are supported.
# This option supersedes the "udp_address" and "tcp_address" options.
statsd_listen_addresses:
- udp://localhost:8126
- tcp://localhost:8126
# The addresses on which to listen for SSF data. As with
# statsd_listen_addresses, these are formatted as URLs, with schemes
# corresponding to valid "network" arguments on
# https://golang.org/pkg/net/#Listen. Currently, only UDP and Unix
# domain sockets are supported.
# Note: SSF sockets are required to ingest trace data.
# This option supersedes the "ssf_address" option.
ssf_listen_addresses:
- udp://localhost:8128
- unix:///tmp/veneur-ssf.sock
# TLS
# These are only useful in conjunction with TCP listening sockets
# TLS server private key and certificate for encryption (specify both)
# These are the key/certificate contents, not a file path
tls_key: ""
tls_certificate: ""
# Authority certificate: requires clients to be authenticated
tls_authority_certificate: ""
# == BEHAVIOR ==
# Use a static host for forwarding
#forward_address: "http://veneur.example.com"
forward_address: ""
# How often to flush. When flushing to Datadog, changing this
# value when you've already emitted metrics will break your time
# series data.
interval: "10s"
# Veneur can "sychronize" it's flushes with the system clock, flushing at even
# intervals i.e. 0, 10, 20… to align with the `interval`. This is disabled by
# default for now, as it can cause thundering herds in large installations.
synchronize_with_interval: false
# Veneur emits its own metrics; this configures where we send them. It's ok
# to point veneur at itself for metrics consumption!
stats_address: "localhost:8126"
# The address on which to listen for HTTP imports and/or healthchecks.
# http_address: "einhorn@0"
http_address: "0.0.0.0:8127"
# The name of timer metrics that "indicator" spans should be tracked
# under. If this is unset, veneur doesn't report an additional timer
# metric for indicator spans.
indicator_span_timer_name: "indicator_span.duration_ms"
# == METRICS CONFIGURATION ==
# Defaults to the os.Hostname()!
hostname: ""
# If true and hostname is "" or absent, don't add the host tag
omit_empty_hostname: false
# Tags supplied here will be added to all metrics and spans ingested by this
# instance. Example:
# tags:
# - "foo:bar"
# - "baz:quz"
tags:
- ""
# Set to floating point values that you'd like to output percentiles for from
# histograms.
percentiles:
- 0.5
- 0.75
- 0.99
# Aggregations you'd like to putput for histograms. Possible values can be any
# or all of:
# - `min`: the minimum value in the histogram during the flush period
# - `max`: the maximum value in the histogram during the flush period
# - `median`: the median value in the histogram during the flush period
# - `avg`: the average value in the histogram during the flush period
# - `count`: the number of values added to the histogram during the flush period
# - `sum`: the sum of all values added to the histogram during the flush period
# - `hmean`: the harmonic mean of the all the values added to the histogram during the flush period
aggregates:
- "min"
- "max"
- "count"
# == DEPRECATED ==
# This configuration has been replaced by datadog_flush_max_per_body.
flush_max_per_body: 0
# This configuration has been replaced by datadog_span_buffer_size.
ssf_buffer_size: 0
# This has been replaced by lightstep_access_token
trace_lightstep_access_token: ""
# This has been replaced by lightstep_collector_host
trace_lightstep_collector_host: ""
# This has been replaced by lightstep_reconnect_period
trace_lightstep_reconnect_period: ""
# This has been replaced by lightstep_maximum_spans
trace_lightstep_maximum_spans: 0
# This has been replaced by lightstep_num_clients
trace_lightstep_num_clients: 0
# == PERFORMANCE ==
# Adjusts the number of workers Veneur will distribute aggregation across.
# More decreases contention but has diminishing returns.
num_workers: 96
# Numbers larger than 1 will enable the use of SO_REUSEPORT, make sure
# this is supported on your platform!
num_readers: 1
# == LIMITS ==
# How big of a buffer to allocate for incoming metrics. Metrics longer than this
# will be truncated!
metric_max_length: 4096
# How big of a buffer to allocate for incoming traces.
trace_max_length_bytes: 16384
# The size of the buffer we'll use to buffer socket reads. Tune this if you
# you think Veneur needs more room to keep up with all packets.
read_buffer_size_bytes: 2097152
# == DIAGNOSTICS ==
# Sets the log level to DEBUG
debug: true
# Providing a Sentry DSN here will send internal exceptions to Sentry
sentry_dsn: ""
# Enables Go profiling
enable_profiling: false
# == SINKS ==
# == Datadog ==
# Datadog can be a sink for metrics, events, service checks and trace spans.
# Hostname to send Datadog data to.
datadog_api_hostname: https://app.datadoghq.com
# API key for acessing Datadog
datadog_api_key: "farts"
# How many metrics to include in the body of each POST to Datadog. Veneur
# will post multiple times in parallel if the limit is exceeded.
datadog_flush_max_per_body: 25000
# Hostname to send Datadog trace data to.
datadog_trace_api_address: ""
# The size of the ring buffer used for retaining spans during a flush interval.
datadog_span_buffer_size: 16384
# == SignalFx ==
# SignalFx can be a sink for metrics.
signalfx_api_key: ""
# Where to send metrics
signalfx_endpoint_base: "https://ingest.signalfx.com"
# The tag we'll add to each metric that contains the hostname we came from
signalfx_hostname_tag: "host"
# == LightStep ==
# LightStep can be a sink for trace spans.
# If present, lightstep will be enabled as a tracing sink
# and this access token will be used
# Access token for accessing LightStep
lightstep_access_token: ""
# Host to send trace data to
lightstep_collector_host: ""
# How often LightStep should reconnect to collectors. If your workload is
# imbalanced — some veneur instances see more spans than others — then you may
# want to reconnect more often.
lightstep_reconnect_period: "5m"
# The LightStep client has internal throttling to prevent you overwhelming
# things. Anything that exceeds this many spans in the reporting period
# — which is a minimum of 500ms and maxmium 2.5s at the time of this writing
# — will be dropped. In other words, you can only submit this many spans per
# flush! If left at zero, veneur will set the maximum to the size of
# `ssf_buffer_size`.
lightstep_maximum_spans: 0
# Multiple clients can be used to load-balance spans cross multiple collectors,
# improving span indexing success rates.
# If missing (or set to zero), it will default
# to a minimum of one client
lightstep_num_clients: 1
# == Kafka ==
# Comma-delimited list of brokers suitable for Sarama's [NewAsyncProducer](https://godoc.org/github.com/Shopify/sarama#NewAsyncProducer)
# in the form hostname:port, such as localhost:9092
kafka_broker: ""
# Name of the topic we'll be publishing checks to
kafka_check_topic: "veneur_checks"
# Name of the topic we'll be publishing events to
kafka_event_topic: "veneur_events"
# Name of the topic we'll be publishing metrics to
kafka_metric_topic: ""
# Name of the topic we'll be publishing spans to
kafka_span_topic: "veneur_spans"
# Name of a tag to hash on for sampling; if empty, spans are sampled based off
# of traceID
kafka_span_sample_tag: ""
# Sample rate in percent (as an integer)
# This should ideally be a floating point number, but at the time this was
# written, gojson interpreted whole-number floats in yaml as integers.
kafka_span_sample_rate_percent: 100
kafka_metric_buffer_bytes: 0
kafka_metric_buffer_messages: 0
kafka_metric_buffer_frequency: ""
kafka_span_serialization_format: "protobuf"
# The type of partitioner to use.
kafka_partitioner: "hash"
# What type of acks to require for metrics? One of none, local or all.
kafka_metric_require_acks: "all"
# What type of acks to require for span? One of none, local or all.
kafka_span_require_acks: "all"
kafka_span_buffer_bytes: 0
kafka_span_buffer_mesages: 0
kafka_span_buffer_frequency: ""
# The number of retries before giving up.
kafka_retry_max: 0
# == PLUGINS ==
# == S3 Output ==
# Include these if you want to archive data to S3
aws_access_key_id: ""
aws_secret_access_key: ""
aws_region: ""
aws_s3_bucket: ""
# == LocalFile Output ==
# Include this if you want to archive data to a local file (which should then be rotated/cleaned)
flush_file: ""