diff --git a/CHANGELOG.md b/CHANGELOG.md index d237f4b9c27..75da16ad649 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ * [ENHANCEMENT] Add new metric `tempo_distributor_push_duration_seconds` [#1027](https://github.com/grafana/tempo/pull/1027) (@zalegrala) * [ENHANCEMENT] Add query parameter to show the default config values and the difference between the current values and the defaults. [#1045](https://github.com/grafana/tempo/pull/1045) (@MichelHollands) * [ENHANCEMENT] Adding metrics around ingester flush retries [#1049](https://github.com/grafana/tempo/pull/944) (@dannykopping) +* [ENHANCEMENT] Allow search disablement in vulture [#1069](https://github.com/grafana/tempo/pull/1069) (@zalegrala) * [BUGFIX] Update port spec for GCS docker-compose example [#869](https://github.com/grafana/tempo/pull/869) (@zalegrala) * [BUGFIX] Fix "magic number" errors and other block mishandling when an ingester forcefully shuts down [#937](https://github.com/grafana/tempo/issues/937) (@mdisibio) * [BUGFIX] Fix compactor memory leak [#806](https://github.com/grafana/tempo/pull/806) (@mdisibio) diff --git a/cmd/tempo-vulture/main.go b/cmd/tempo-vulture/main.go index 9281ec06397..3fcca8ae972 100644 --- a/cmd/tempo-vulture/main.go +++ b/cmd/tempo-vulture/main.go @@ -62,7 +62,7 @@ func init() { flag.DurationVar(&tempoWriteBackoffDuration, "tempo-write-backoff-duration", 15*time.Second, "The amount of time to pause between write Tempo calls") flag.DurationVar(&tempoLongWriteBackoffDuration, "tempo-long-write-backoff-duration", 1*time.Minute, "The amount of time to pause between long write Tempo calls") flag.DurationVar(&tempoReadBackoffDuration, "tempo-read-backoff-duration", 30*time.Second, "The amount of time to pause between read Tempo calls") - flag.DurationVar(&tempoSearchBackoffDuration, "tempo-search-backoff-duration", 60*time.Second, "The amount of time to pause between search Tempo calls") + flag.DurationVar(&tempoSearchBackoffDuration, "tempo-search-backoff-duration", 60*time.Second, "The amount of time to pause between search Tempo calls. Set to 0s to disable search.") flag.DurationVar(&tempoRetentionDuration, "tempo-retention-duration", 336*time.Hour, "The block retention that Tempo is using") flag.DurationVar(&tempoSearchRetentionDuration, "tempo-search-retention-duration", 10*time.Minute, "The ingester retention we expect to be able to search within") } @@ -83,7 +83,11 @@ func main() { startTime := actualStartTime tickerWrite := time.NewTicker(tempoWriteBackoffDuration) tickerRead := time.NewTicker(tempoReadBackoffDuration) - tickerSearch := time.NewTicker(tempoSearchBackoffDuration) + var tickerSearch *time.Ticker + logger.Info(fmt.Sprintf("tempoSearchBackoffDuration: %+v", tempoSearchBackoffDuration)) + if tempoSearchBackoffDuration > 0 { + tickerSearch = time.NewTicker(tempoSearchBackoffDuration) + } interval := tempoWriteBackoffDuration ready := func(info *util.TraceInfo, now time.Time) bool { @@ -156,33 +160,35 @@ func main() { }() // Search - go func() { - for now := range tickerSearch.C { - _, seed := selectPastTimestamp(startTime, now, interval, tempoSearchRetentionDuration) - log := logger.With( - zap.String("org_id", tempoOrgID), - zap.Int64("seed", seed.Unix()), - ) + if tickerSearch != nil { + go func() { + for now := range tickerSearch.C { + _, seed := selectPastTimestamp(startTime, now, interval, tempoSearchRetentionDuration) + log := logger.With( + zap.String("org_id", tempoOrgID), + zap.Int64("seed", seed.Unix()), + ) - info := util.NewTraceInfo(seed, tempoOrgID) + info := util.NewTraceInfo(seed, tempoOrgID) - if !ready(info, now) { - continue - } + if !ready(info, now) { + continue + } - client := util.NewClient(tempoQueryURL, tempoOrgID) + client := util.NewClient(tempoQueryURL, tempoOrgID) - // query a tag we expect the trace to be found within - searchMetrics, err := searchTag(client, seed) - if err != nil { - metricErrorTotal.Inc() - log.Error("search for metrics failed", - zap.Error(err), - ) + // query a tag we expect the trace to be found within + searchMetrics, err := searchTag(client, seed) + if err != nil { + metricErrorTotal.Inc() + log.Error("search for metrics failed", + zap.Error(err), + ) + } + pushMetrics(searchMetrics) } - pushMetrics(searchMetrics) - } - }() + }() + } http.Handle(prometheusPath, promhttp.Handler()) log.Fatal(http.ListenAndServe(prometheusListenAddress, nil)) diff --git a/operations/jsonnet/microservices/config.libsonnet b/operations/jsonnet/microservices/config.libsonnet index b0d97e1f5ee..4fbbc7439ab 100644 --- a/operations/jsonnet/microservices/config.libsonnet +++ b/operations/jsonnet/microservices/config.libsonnet @@ -90,6 +90,10 @@ tempoPushUrl: 'http://distributor', tempoQueryUrl: 'http://query-frontend:%s' % $._config.port, tempoOrgId: '', + tempoRetentionDuration: '', + tempoSearchBackoffDuration: '', + tempoReadBackoffDuration: '', + tempoWriteBackoffDuration: '', }, ballast_size_mbs: '1024', port: 3200, diff --git a/operations/jsonnet/microservices/vulture.libsonnet b/operations/jsonnet/microservices/vulture.libsonnet index eab77e03ece..e3bd323281b 100644 --- a/operations/jsonnet/microservices/vulture.libsonnet +++ b/operations/jsonnet/microservices/vulture.libsonnet @@ -17,6 +17,10 @@ '-tempo-push-url=' + $._config.vulture.tempoPushUrl, '-tempo-query-url=' + $._config.vulture.tempoQueryUrl, '-tempo-org-id=' + $._config.vulture.tempoOrgId, + '-tempo-retention-duration=' + $._config.vulture.tempoRetentionDuration, + '-tempo-search-backoff-duration=' + $._config.vulture.tempoSearchBackoffDuration, + '-tempo-read-backoff-duration=' + $._config.vulture.tempoReadBackoffDuration, + '-tempo-write-backoff-duration=' + $._config.vulture.tempoWriteBackoffDuration, ]) + k.util.resourcesRequests('50m', '100Mi') + k.util.resourcesLimits('100m', '500Mi'), diff --git a/operations/kube-manifests/Deployment-vulture.yaml b/operations/kube-manifests/Deployment-vulture.yaml index cafd334383a..18cb3f6edc9 100644 --- a/operations/kube-manifests/Deployment-vulture.yaml +++ b/operations/kube-manifests/Deployment-vulture.yaml @@ -23,6 +23,10 @@ spec: - -tempo-push-url=http://distributor - -tempo-query-url=http://query-frontend:3200/tempo - -tempo-org-id=1 + - -tempo-retention-duration= + - -tempo-search-backoff-duration= + - -tempo-read-backoff-duration= + - -tempo-write-backoff-duration= image: grafana/tempo-vulture:latest imagePullPolicy: IfNotPresent name: vulture diff --git a/operations/kube-manifests/util/jsonnetfile.lock.json b/operations/kube-manifests/util/jsonnetfile.lock.json index 714ac49fc6d..70d6b2df365 100644 --- a/operations/kube-manifests/util/jsonnetfile.lock.json +++ b/operations/kube-manifests/util/jsonnetfile.lock.json @@ -8,8 +8,8 @@ "subdir": "ksonnet-util" } }, - "version": "bc9b685050691a78ee414cd8f789857de0eabe8d", - "sum": "OxgtIWL4hjvG0xkMwUzZ7Yjs52zUhLhaVQpwHCbqf8A=" + "version": "17eca514f990530c411c2e9411af5213dd4bd224", + "sum": "fFVlCoa/N0qiqTbDhZAEdRm2Vv76Z9Clxp3/haJ+PyA=" }, { "source": { @@ -18,7 +18,7 @@ "subdir": "memcached" } }, - "version": "bc9b685050691a78ee414cd8f789857de0eabe8d", + "version": "17eca514f990530c411c2e9411af5213dd4bd224", "sum": "dTOeEux3t9bYSqP2L/uCuLo/wUDpCKH4w+4OD9fePUk=" }, { diff --git a/operations/kube-manifests/util/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet b/operations/kube-manifests/util/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet index d97f7d5db32..aaecdabbba8 100644 --- a/operations/kube-manifests/util/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet +++ b/operations/kube-manifests/util/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet @@ -103,42 +103,55 @@ local util(k) = { // For example, passing "volumeMount.withSubPath(subpath)" will result in // a subpath mixin. configVolumeMount(name, path, volumeMountMixin={}):: - local container = k.core.v1.container, - deployment = k.apps.v1.deployment, - volumeMount = k.core.v1.volumeMount, - volume = k.core.v1.volume, - addMount(c) = c + container.withVolumeMountsMixin( - volumeMount.new(name, path) + - volumeMountMixin, - ); - - deployment.mapContainers(addMount) + - deployment.mixin.spec.template.spec.withVolumesMixin([ - volume.fromConfigMap(name, name), - ]), + $.volumeMounts([$.volumeMountItem(name, path, volumeMountMixin)]), // configMapVolumeMount adds a configMap to deployment-like objects. // It will also add an annotation hash to ensure the pods are re-deployed // when the config map changes. configMapVolumeMount(configMap, path, volumeMountMixin={}):: - local name = configMap.metadata.name, - hash = std.md5(std.toString(configMap)), - container = k.core.v1.container, + $.volumeMounts([$.configMapVolumeMountItem(configMap, path, volumeMountMixin)]), + + + // configMapVolumeMountItem represents a config map to be mounted. + // It is used in the volumeMounts function + configMapVolumeMountItem(configMap, path, volumeMountMixin={}):: + local name = configMap.metadata.name; + local annotations = { ['%s-hash' % name]: std.md5(std.toString(configMap)) }; + $.volumeMountItem(name, path, volumeMountMixin, annotations), + + // volumeMountItem represents a volume to be mounted. + // It is used in the volumeMounts function + volumeMountItem(name, path, volumeMountMixin={}, annotations={}):: { + name: name, + path: path, + volumeMountMixin: volumeMountMixin, + annotations: annotations, + }, + + // volumeMounts adds an array of volumeMountItem to deployment-like objects. + // It can also add a set of annotations for each mount + volumeMounts(mounts):: + local container = k.core.v1.container, deployment = k.apps.v1.deployment, volumeMount = k.core.v1.volumeMount, - volume = k.core.v1.volume, - addMount(c) = c + container.withVolumeMountsMixin( - volumeMount.new(name, path) + - volumeMountMixin, + volume = k.core.v1.volume; + local addMounts(c) = c + container.withVolumeMountsMixin([ + volumeMount.new(m.name, m.path) + + m.volumeMountMixin + for m in mounts + ]); + local annotations = std.foldl( + function(acc, ann) acc + ann, + [m.annotations for m in mounts], + {} ); - deployment.mapContainers(addMount) + - deployment.mixin.spec.template.spec.withVolumesMixin([ - volume.fromConfigMap(name, name), - ]) + - deployment.mixin.spec.template.metadata.withAnnotationsMixin({ - ['%s-hash' % name]: hash, - }), + deployment.mapContainers(addMounts) + + deployment.mixin.spec.template.spec.withVolumesMixin([ + volume.fromConfigMap(m.name, m.name) + for m in mounts + ]) + + (if annotations != {} then deployment.mixin.spec.template.metadata.withAnnotationsMixin(annotations) else {}), hostVolumeMount(name, hostPath, path, readOnly=false, volumeMountMixin={}):: local container = k.core.v1.container,