From badd217337dd4670e4609dcecf151d69fc22a797 Mon Sep 17 00:00:00 2001 From: Benjamin Guttmann Date: Wed, 26 Jun 2024 00:30:52 +0200 Subject: [PATCH] Add Prometheus Remote Write Exporter (#26) --- src/otel-collector/components.go | 2 + src/otel-collector/config.yaml | 1 + src/otel-collector/go.mod | 8 + src/otel-collector/go.sum | 15 + .../prometheusremotewriteexporter/DESIGN.md | 288 ++ .../prometheusremotewriteexporter/LICENSE | 201 + .../prometheusremotewriteexporter/Makefile | 1 + .../prometheusremotewriteexporter/README.md | 151 + .../prometheusremotewriteexporter/config.go | 119 + .../prometheusremotewriteexporter/context.go | 35 + .../prometheusremotewriteexporter/doc.go | 7 + .../documentation.md | 23 + .../prometheusremotewriteexporter/exporter.go | 368 ++ .../prometheusremotewriteexporter/factory.go | 110 + .../prometheusremotewriteexporter/helper.go | 98 + .../internal/metadata/generated_status.go | 15 + .../internal/metadata/generated_telemetry.go | 69 + .../metadata.yaml | 30 + .../prometheusremotewriteexporter/wal.go | 424 ++ .../translator/prometheusremotewrite/LICENSE | 201 + .../translator/prometheusremotewrite/Makefile | 1 + .../prometheusremotewrite/helper.go | 562 +++ .../prometheusremotewrite/histograms.go | 196 + .../prometheusremotewrite/metadata.yaml | 3 + .../prometheusremotewrite/metrics_to_prw.go | 201 + .../number_data_points.go | 96 + .../otlp_to_openmetrics_metadata.go | 76 + .../github.com/prometheus/prometheus/LICENSE | 201 + .../github.com/prometheus/prometheus/NOTICE | 108 + .../prometheus/model/timestamp/timestamp.go | 34 + .../prometheus/model/value/value.go | 34 + .../prometheus/prometheus/prompb/README.md | 9 + .../prometheus/prometheus/prompb/buf.lock | 10 + .../prometheus/prometheus/prompb/buf.yaml | 21 + .../prometheus/prometheus/prompb/custom.go | 39 + .../prometheus/prometheus/prompb/remote.pb.go | 1702 +++++++ .../prometheus/prometheus/prompb/remote.proto | 88 + .../prometheus/prometheus/prompb/types.pb.go | 4440 +++++++++++++++++ .../prometheus/prometheus/prompb/types.proto | 187 + .../vendor/github.com/tidwall/gjson/LICENSE | 20 + .../vendor/github.com/tidwall/gjson/README.md | 488 ++ .../vendor/github.com/tidwall/gjson/SYNTAX.md | 319 ++ .../vendor/github.com/tidwall/gjson/gjson.go | 2973 +++++++++++ .../vendor/github.com/tidwall/gjson/logo.png | Bin 0 -> 15936 bytes .../vendor/github.com/tidwall/match/LICENSE | 20 + .../vendor/github.com/tidwall/match/README.md | 29 + .../vendor/github.com/tidwall/match/match.go | 237 + .../vendor/github.com/tidwall/pretty/LICENSE | 20 + .../github.com/tidwall/pretty/README.md | 122 + .../github.com/tidwall/pretty/pretty.go | 674 +++ .../vendor/github.com/tidwall/tinylru/LICENSE | 19 + .../github.com/tidwall/tinylru/README.md | 54 + .../vendor/github.com/tidwall/tinylru/lru.go | 208 + .../vendor/github.com/tidwall/wal/.gitignore | 3 + .../vendor/github.com/tidwall/wal/LICENSE | 20 + .../vendor/github.com/tidwall/wal/README.md | 79 + .../vendor/github.com/tidwall/wal/wal.go | 917 ++++ src/otel-collector/vendor/modules.txt | 27 + 58 files changed, 16403 insertions(+) create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/DESIGN.md create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/LICENSE create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/Makefile create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/README.md create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/config.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/context.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/doc.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/documentation.md create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/exporter.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/factory.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/helper.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_status.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_telemetry.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/metadata.yaml create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/wal.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/LICENSE create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/Makefile create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/helper.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/histograms.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/metadata.yaml create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/metrics_to_prw.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/number_data_points.go create mode 100644 src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/LICENSE create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/NOTICE create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/model/value/value.go create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/README.md create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/buf.lock create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/buf.yaml create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/custom.go create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/remote.proto create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/types.pb.go create mode 100644 src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/types.proto create mode 100644 src/otel-collector/vendor/github.com/tidwall/gjson/LICENSE create mode 100644 src/otel-collector/vendor/github.com/tidwall/gjson/README.md create mode 100644 src/otel-collector/vendor/github.com/tidwall/gjson/SYNTAX.md create mode 100644 src/otel-collector/vendor/github.com/tidwall/gjson/gjson.go create mode 100644 src/otel-collector/vendor/github.com/tidwall/gjson/logo.png create mode 100644 src/otel-collector/vendor/github.com/tidwall/match/LICENSE create mode 100644 src/otel-collector/vendor/github.com/tidwall/match/README.md create mode 100644 src/otel-collector/vendor/github.com/tidwall/match/match.go create mode 100644 src/otel-collector/vendor/github.com/tidwall/pretty/LICENSE create mode 100644 src/otel-collector/vendor/github.com/tidwall/pretty/README.md create mode 100644 src/otel-collector/vendor/github.com/tidwall/pretty/pretty.go create mode 100644 src/otel-collector/vendor/github.com/tidwall/tinylru/LICENSE create mode 100644 src/otel-collector/vendor/github.com/tidwall/tinylru/README.md create mode 100644 src/otel-collector/vendor/github.com/tidwall/tinylru/lru.go create mode 100644 src/otel-collector/vendor/github.com/tidwall/wal/.gitignore create mode 100644 src/otel-collector/vendor/github.com/tidwall/wal/LICENSE create mode 100644 src/otel-collector/vendor/github.com/tidwall/wal/README.md create mode 100644 src/otel-collector/vendor/github.com/tidwall/wal/wal.go diff --git a/src/otel-collector/components.go b/src/otel-collector/components.go index a0167e97..d48f7ef2 100644 --- a/src/otel-collector/components.go +++ b/src/otel-collector/components.go @@ -12,6 +12,7 @@ import ( otlpexporter "go.opentelemetry.io/collector/exporter/otlpexporter" fileexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter" prometheusexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter" + prometheusremotewriteexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" otlpreceiver "go.opentelemetry.io/collector/receiver/otlpreceiver" ) @@ -36,6 +37,7 @@ func components() (otelcol.Factories, error) { otlpexporter.NewFactory(), fileexporter.NewFactory(), prometheusexporter.NewFactory(), + prometheusremotewriteexporter.NewFactory(), ) if err != nil { return otelcol.Factories{}, err diff --git a/src/otel-collector/config.yaml b/src/otel-collector/config.yaml index cfac7b77..91142715 100644 --- a/src/otel-collector/config.yaml +++ b/src/otel-collector/config.yaml @@ -9,5 +9,6 @@ exporters: - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.103.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.103.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.103.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.103.0 receivers: - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.103.0 diff --git a/src/otel-collector/go.mod b/src/otel-collector/go.mod index 5774065c..643217dd 100644 --- a/src/otel-collector/go.mod +++ b/src/otel-collector/go.mod @@ -7,6 +7,7 @@ go 1.21.0 require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.103.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.103.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.103.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.103.0 go.opentelemetry.io/collector/confmap v0.103.0 @@ -61,17 +62,24 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.103.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.103.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.103.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.103.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.54.0 // indirect github.com/prometheus/procfs v0.15.0 // indirect + github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e // indirect github.com/rs/cors v1.10.1 // indirect github.com/shirou/gopsutil/v4 v4.24.5 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/tidwall/gjson v1.10.2 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tidwall/tinylru v1.1.0 // indirect + github.com/tidwall/wal v1.1.7 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect diff --git a/src/otel-collector/go.sum b/src/otel-collector/go.sum index b7d7bf75..045a3924 100644 --- a/src/otel-collector/go.sum +++ b/src/otel-collector/go.sum @@ -245,6 +245,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.103.0/go.mod h1:rgaRhVR8qqSHK7UnFt055kY29QJzexZdg7YG5nJrHLQ= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.103.0 h1:NQCp4YFDJz5rqJ3PuecRVFIfqcZaZUacqaiSnPOyHGQ= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.103.0/go.mod h1:8v8Pn7bLxFrWmer685QqLNe4f86fPIAsb+1rr2bahco= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.103.0 h1:blcAZWoZ9vqDvr1pT/Q5RfYYNOcOd71oaKFI2m4P4Hc= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.103.0/go.mod h1:FXy3BBa2RwAVHa8M5pTX6vFLouFAh3Ly9bAvnTAp0nk= github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.103.0 h1:ul4wxL7d8FH5qkdqvA8whNOEmK4/VH0/uXbgrFFHwHs= github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.103.0/go.mod h1:aand41EuSgmlWvOM6Auw7SB75gvG3MffehUPMJ3kq2s= github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding/otlpencodingextension v0.103.0 h1:ikR6Xz7QNQjJmaSGeIJbWGceBgzvkqhqXD08DihBHvI= @@ -261,6 +263,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetr github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.103.0/go.mod h1:DR/fbOxaIyobmsd8nbDzuwqwwSNX9+yONDWx8dF2qS4= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.103.0 h1:mZbM5uIvfL4/Q0i9xKYps8AYe37MhwJJpk1Ojz9m6mw= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.103.0/go.mod h1:+WiRcWZOrXV3B5Qk0KlF5Tm7BopRvLpoPYqTrt4iA7s= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.103.0 h1:MquBpfLZRaGnQeeIF3Kn+bQNXRLZtKTWGEysbNnxcRo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.103.0/go.mod h1:+3rCl7Z3Zm4ZZOyX9lMmaAoN8NeeCXUOmR/tysR631g= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.103.0 h1:V1N1X+KbjMauQsqW99vOfwTWBmdDGHafyoCN2LVtro4= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.103.0/go.mod h1:Sd26pFKO2fLsFKJCeQMn77k87z7lOyx5t2CISD5psR0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -317,6 +321,16 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= +github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= +github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= +github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= +github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -568,6 +582,7 @@ k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/DESIGN.md b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/DESIGN.md new file mode 100644 index 00000000..3fbf3690 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/DESIGN.md @@ -0,0 +1,288 @@ + + +# **OpenTelemetry Collector Prometheus Remote Write/Cortex Exporter Design** + +Authors: @huyan0, @danielbang907 + +Date: July 30, 2020 + +## **1. Introduction** + +Prometheus can be integrated with remote storage systems that supports its remote write API. Existing remote storage integration support is included in [Cortex](https://cortexmetrics.io/docs/api/), [influxDB](https://docs.influxdata.com/influxdb/v1.8/supported_protocols/prometheus/), and many [others](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). + +The following diagram shows an example of Prometheus remote write API usage, with Cortex, an open source, horizontally scalable, highly available, multi-tenant, long term storage, as a remote storage backend. + +![Cortex Architecture](img/cortex.png) + +Our project is focused on developing an exporter for the OpenTelemetry Collector to any Prometheus remote storage backend. + +### **1.1 Remote Write API** + +The Prometheus remote write/Cortex exporter should write metrics to a remote URL in a snappy-compressed, [protocol buffer](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto#L22) encoded HTTP request defined by the Prometheus remote write API. Each request should encode multiple Prometheus remote write TimeSeries, which are composed of a set of labels and a collection of samples. Each label contains a name-value pair of strings, and each sample contains a timestamp-value number pair. + +![Image of TimeSeries](img/timeseries.png) + +TimeSeries stores its metric name in its labels and does not describe metric types or start timestamps. To convert to TimeSeries data, buckets of a Histogram are broken down into individual TimeSeries with a bound label(`le`), and a similar process happens with quantiles in a Summary. + + +More details of Prometheus remote write API can be found in Prometheus [documentation](https://prometheus.io/docs/prometheus/latest/storage/#overview) and Cortex [documentation](https://cortexmetrics.io/docs/api/). + +### **1.2 Gaps and Assumptions** + +**Gap 1:** +Currently, metrics from the OpenTelemetry SDKs cannot be exported to Prometheus from the collector correctly ([#1255](https://github.com/open-telemetry/opentelemetry-collector/issues/1255)). This is because the SDKs send metrics to the collector via their OTLP exporter, which exports the delta value of cumulative counters. The same issue will arise for exporting to any Prometheus remote storage backend. + +To overcome this gap in the Collector pipeline, we had proposed 2 different solutions: + +1. Add a [metric aggregation processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/4968) to the collector pipeline to aggregate delta values into cumulative values for cumulative backends. This solution requires users to set up a collector agent next to each SDK to make sure delta values are aggregated correctly. +2. Require the OTLP exporters in SDKs to [send cumulative values for cumulative metric types to the Collector by default](https://github.com/open-telemetry/opentelemetry-specification/issues/731). Therefore, no aggregation of delta metric values is required in the Collector pipeline for Prometheus/storage backends to properly process the data. + +**Gap 2:** +Another gap is that OTLP metric definition is still in development. This exporter will require refactoring as OTLP changes in the future. + +**Assumptions:** +Because of the gaps mentioned above, this project will convert from the current OTLP metrics and work under the assumption one of the above solutions will be implemented, and all incoming monotonic scalars/histogram/summary metrics should be cumulative or otherwise dropped. More details on the behavior of the exporter is in section 2.2. + +## **2. Prometheus Remote Write/Cortex Exporter** + +The Prometheus remote write/Cortex exporter should receive OTLP metrics, group data points by metric name and label set, convert each group to a TimeSeries, and send all TimeSeries to a storage backend via HTTP. + +### **2.1 Receiving Metrics** +The Prometheus remote write/Cortex exporter receives a MetricsData instance in its PushMetrics() function. MetricsData contains a collection of Metric instances. Each Metric instance contains a series of data points, and each data point has a set of labels associated with it. Since Prometheus remote write TimeSeries are identified by unique sets of labels, the exporter needs to group data points within each Metric instance by their label set, and convert each group to a TimeSeries. + +To group data points by label set, the exporter should create a map with each PushMetrics() call. The key of the map should represent a combination of the following information: + +* the metric type +* the metric name +* the set of labels that identify a unique TimeSeries + + +The exporter should create a signature string as map key by concatenating metric type, metric name, and label names and label values at each data point. To ensure correctness, the label set at each data point should be sorted by label key before generating the signature string. + +An alternative key type is in the exiting label.Set implementation from the OpenTelemetry Go API. It provides a Distinct type that guarantees the result will equal the equivalent Distinct value of any label set with the same elements as this, where sets are made unique by choosing the last value in the input for any given key. If we allocate a Go API's kv.KeyValue for every label of a data point, then a label.Set from the API can be created, and its Distinct value can be used as map key. + + +The value of the map should be Prometheus TimeSeries, and each data point’s value and timestamp should be inserted to its corresponding TimeSeries in the map as a Sample, each metric’s label set and metric name should be combined and translated to a Prometheus label set; a new TimeSeries should be created if the string signature is not in the map. + + +Pseudocode: + + func PushMetrics(metricsData) { + + // Create a map that stores distinct TimeSeries + map := make(map[String][]TimeSeries) + + for metric in metricsData: + for point in metric: + // Generate signature string + sig := pointSignature(metric, point) + + // Find corresponding TimeSeries in map + // Add to TimeSeries + + // Sends TimeSeries to backend + export(map) + } + +### **2.2 Mapping of OTLP Metrics to TimeSeries** + +Each Prometheus remote write TimeSeries represents less semantic information than an OTLP metric. The temporality property of a OTLP metric is ignored in a TimeSeries because it is always considered as cumulative for monotonic types and histogram, and the type property of a OTLP metric is translated by mapping each metric to one or multiple TimeSeries. The following sections explain how to map each OTLP metric type to Prometheus remote write TimeSeries. + + +**INT64, MONOTONIC_INT64, DOUBLE, MONOTONIC_DOUBLE** + +Each unique label set within metrics of these types can be converted to exactly one TimeSeries. From the perspective of Prometheus client types, INT64 and DOUBLE correspond to gauge metrics, and MONOTONIC types correspond to counter metrics. In both cases, data points will be exported directly without aggregation. Any metric of the monotonic types that is not cumulative should be dropped; non-monotonic scalar types are assumed to represent gauge values, thus its temporality is not checked. Monotonic types need to have a `_total` suffix in its metric name when exporting; this is a requirement of Prometheus. + + +**HISTOGRAM** + +Each histogram data point can be converted to 2 + n + 1 Prometheus remote write TimeSeries: + +* 1 *TimeSeries* representing metric_name_count contains HistogramDataPoint.count +* 1 *TimeSeries* representing metric_name_sum contains HistogramDataPoint.sum +* n *TimeSeries* each representing metric_name_bucket{le=“upperbound”} contain the count of each bucket defined by the bounds of the data point +* 1 *TimeSeries* representing metric_name_bucket{le=“+Inf”} contains counts for the bucket with infinity as upper bound; its value is equivalent to metric_name_count. + +Prometheus bucket values are cumulative, meaning the count of each bucket should contain counts from buckets with lower bounds. In addition, Exemplars from a histogram data point are ignored. When adding a bucket of the histogram data point to the map, the string signature should also contain a `le` label that indicates the bound value. This label should also be exported. Any histogram metric that is not cumulative should be dropped. + + +**SUMMARY** + +Each summary data point can be converted to 2 + n Prometheus remote write TimeSeries: + +* 1 *TimeSeries* representing metric_name_count contains SummaryDataPoint.count +* 1 *TimeSeries* representing metric_name_sum contains SummaryDataPoint.sum +* and n *TimeSeries* each representing metric_name{quantile=“quantileValue”} contains the value of each quantile in the data point. + +When adding a quantile of the summary data point to the map, the string signature should also contain a `quantile ` label that indicates the quantile value. This label should also be exported. Any summary metric that is not cumulative should be dropped. + +### **2.3 Exporting Metrics** + +The Prometheus remote write/Cortex exporter should call proto.Marshal() to convert multiple TimeSeries to a byte array. Then, the exporter should send the byte array to Prometheus remote storage in a HTTP request. + + +Authentication credentials should be added to each request before sending to the backend. Basic auth and bearer token headers can be added using Golang http.Client’s default configuration options. Other authentication headers can be added by implementing a client interceptor. + + +Pseudocode: + + + func export(*map) error { + // Stores timeseries + arr := make([]TimeSeries) + + for timeseries in map: + arr = append(arr, timeseries) + + // Converts arr to WriteRequest + request := proto.Marshal(arr) + + // Sends HTTP request to endpoint + } + +## **3. Other Components** + +### **3.1 Config Struct** + +This struct is based on an inputted YAML file at the beginning of the pipeline and defines the configurations for an Exporter build. Examples of configuration parameters are HTTP endpoint, compression type, backend program, etc. + + +Converting YAML to a Go struct is done by the Collector, using [_the Viper package_](https://github.com/spf13/viper), which is an open-source library that seamlessly converts inputted YAML files into a usable, appropriate Config struct. + + +An example of the exporter section of the Collector config.yml YAML file can be seen below: + + ... + + exporters: + prometheus_remote_write: + http_endpoint: + # Prefix to metric name + namespace: + # Labels to add to each TimeSeries + const_labels: + [label: ] + # Allow users to add any header; only required headers listed here + headers: + [X-Prometheus-Remote-Write-Version:] + [Tenant-id:] + request_timeout: + + # ************************************************************************ + # below are configurations copied from Prometheus remote write config + # ************************************************************************ + # Sets the `Authorization` header on every remote write request with the + # configured username and password. + # password and password_file are mutually exclusive. + basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + + # Sets the `Authorization` header on every remote write request with + # the configured bearer token. It is mutually exclusive with `bearer_token_file`. + [ bearer_token: ] + + # Sets the `Authorization` header on every remote write request with the bearer token + # read from the configured file. It is mutually exclusive with `bearer_token`. + [ bearer_token_file: /path/to/bearer/token/file ] + + # Configures the remote write request's TLS settings. + tls_config: + # CA certificate to validate API server certificate with. + [ ca_file: ] + + # Certificate and key files for client cert authentication to the server. + [ cert_file: ] + [ key_file: ] + + # ServerName extension to indicate the name of the server. + # https://tools.ietf.org/html/rfc4366#section-3.1 + [ server_name: ] + + # Disable validation of the server certificate. + [ insecure_skip_verify: ] + + ... + +### **3.2 Factory Struct** + +This struct implements the ExporterFactory interface, and is used during collector’s pipeline initialization to create the Exporter instances as defined by the Config struct. The `exporterhelper` package will be used to create the exporter and the factory. + + +Our Factory type will look very similar to other exporters’ factory implementation. For our implementation, our Factory instance will implement three methods + + +**Methods** + + NewFactory +This method will use the NewFactory method within the `exporterhelper` package to create a instance of the factory. + + createDefaultConfig + +This method creates the default configuration for Prometheus remote write/Cortex exporter. + + + createMetricsExporter + +This method constructs a new http.Client with interceptors that add headers to any request it sends. Then, this method initializes a new Prometheus remote write exporter/Cortex exporter with the http.Client. This method constructs a collector Prometheus remote write/Cortex exporter exporter with the created SDK exporter + + + +## **4. Other Considerations** + +### **4.1 Concurrency** + +The Prometheus remote write/Cortex exporter should be thread-safe; In this design, the only resource shared across goroutines is the http.Client from the Golang library. It is thread-safe, thus, our code is thread-safe. + +### **4.2 Shutdown Behavior** + +Once the shutdown() function is called, the exporter should stop accepting incoming calls(return error), and wait for current operations to finish before returning. This can be done by using a stop channel and a wait group. + + func Shutdown () { + close(stopChan) + waitGroup.Wait() + } + + func PushMetrics() { + select: + case <- stopCh + return error + default: + waitGroup.Add(1) + defer waitGroup.Done() + // export metrics + ... + } + +### **4.3 Timeout Behavior** + +Users should be able to pass in a time for the each http request as part of the Configuration. The factory should read the configuration file and set the timeout field of the http.Client + + func (f *Factory) CreateNewExporter (config) { + ... + client := &http.Client{ + Timeout config.requestTimeout + } + ... + } + +### **4.4 Error Behavior** + +The PushMetricsData() function should return the number of dropped metrics. Any monotonic and histogram metrics that are not cumulative should be dropped. This can be done by checking the temporality of each received metric. Any error should be returned to the caller, and the error message should be descriptive. + + + +### **4.5 Test Strategy** + +We will follow test-driven development practices while completing this project. We’ll write unit tests before implementing production code. Tests will cover normal and abnormal inputs and test for edge cases. We will provide end-to-end tests using mock backend/client. Our target is to get 90% or more of code coverage. + + + +## **Request for Feedback** +We'd like to get some feedback on whether we made the appropriate assumptions in [this](#12-gaps-and-assumptions) section, and appreciate more comments, updates , and suggestions on the topic. + +Please let us know if there are any revisions, technical or informational, necessary for this document. Thank you! + + + diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/LICENSE b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/Makefile b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/Makefile new file mode 100644 index 00000000..ded7a360 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/README.md b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/README.md new file mode 100644 index 00000000..33ae05e1 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/README.md @@ -0,0 +1,151 @@ +# Prometheus Remote Write Exporter + + +| Status | | +| ------------- |-----------| +| Stability | [beta]: metrics | +| Distributions | [core], [contrib] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fprometheusremotewrite%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fprometheusremotewrite) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fprometheusremotewrite%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fprometheusremotewrite) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@Aneurysm9](https://www.github.com/Aneurysm9), [@rapphil](https://www.github.com/rapphil) | + +[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib + + +Prometheus Remote Write Exporter sends OpenTelemetry metrics +to Prometheus [remote write compatible +backends](https://prometheus.io/docs/operating/integrations/) +such as Cortex, Mimir, and Thanos. +By default, this exporter requires TLS and offers queued retry capabilities. + +:warning: Non-cumulative monotonic, histogram, and summary OTLP metrics are +dropped by this exporter. + +A [design doc](DESIGN.md) is available to document in detail +how this exporter works. + +## Getting Started + +The following settings are required: + +- `endpoint` (no default): The remote write URL to send remote write samples. + +By default, TLS is enabled and must be configured under `tls:`: + +- `insecure` (default = `false`): whether to enable client transport security for + the exporter's connection. + +As a result, the following parameters are also required under `tls:`: + +- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` (no default): path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +The following settings can be optionally configured: + +- `external_labels`: map of labels names and values to be attached to each metric data point +- `headers`: additional headers attached to each HTTP request. + - *Note the following headers cannot be changed: `Content-Encoding`, `Content-Type`, `X-Prometheus-Remote-Write-Version`, and `User-Agent`.* +- `namespace`: prefix attached to each exported metric name. +- `add_metric_suffixes`: If set to false, type and unit suffixes will not be added to metrics. Default: true. +- `send_metadata`: If set to true, prometheus metadata will be generated and sent. Default: false. +- `remote_write_queue`: fine tuning for queueing and sending of the outgoing remote writes. + - `enabled`: enable the sending queue (default: `true`) + - `queue_size`: number of OTLP metrics that can be queued. Ignored if `enabled` is `false` (default: `10000`) + - `num_consumers`: minimum number of workers to use to fan out the outgoing requests. (default: `5`) +- `resource_to_telemetry_conversion` + - `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default. +- `target_info`: customize `target_info` metric + - `enabled` (default = true): If `enabled` is `true`, a `target_info` metric will be generated for each resource metric (see https://github.com/open-telemetry/opentelemetry-specification/pull/2381). +- `export_created_metric`: + - `enabled` (default = false): If `enabled` is `true`, a `_created` metric is + exported for Summary, Histogram, and Monotonic Sum metric points if + `StartTimeUnixNano` is set. +- `max_batch_size_bytes` (default = `3000000` -> `~2.861 mb`): Maximum size of a batch of + samples to be sent to the remote write endpoint. If the batch size is larger + than this value, it will be split into multiple batches. + +Example: + +```yaml +exporters: + prometheusremotewrite: + endpoint: "https://my-cortex:7900/api/v1/push" + wal: # Enabling the Write-Ahead-Log for the exporter. + directory: ./prom_rw # The directory to store the WAL in + buffer_size: 100 # Optional count of elements to be read from the WAL before truncating; default of 300 + truncate_frequency: 45s # Optional frequency for how often the WAL should be truncated. It is a time.ParseDuration; default of 1m + resource_to_telemetry_conversion: + enabled: true # Convert resource attributes to metric labels +``` + +Example: + +```yaml +exporters: + prometheusremotewrite: + endpoint: "https://my-cortex:7900/api/v1/push" + external_labels: + label_name1: label_value1 + label_name2: label_value2 +``` + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md) +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) +- [Retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), note that the exporter doesn't support `sending_queue` but provides `remote_write_queue`. + +### Feature gates +This exporter has feature gate: `exporter.prometheusremotewritexporter.RetryOn429`. +When this feature gate is enable the prometheus remote write exporter will retry on 429 http status code with the provided retry configuration. +It currently doesn't support respecting the http header `Retry-After` if provided since the retry library used doesn't support this feature. + +To enable it run collector with enabled feature gate `exporter.prometheusremotewritexporter.RetryOn429`. This can be done by executing it with one additional parameter - `--feature-gates=telemetry.useOtelForInternalMetrics`. + +## Metric names and labels normalization + +OpenTelemetry metric names and attributes are normalized to be compliant with Prometheus naming rules. [Details on this normalization process are described in the Prometheus translator module](../../pkg/translator/prometheus/). + +## Setting resource attributes as metric labels + +By default, resource attributes are added to a special metric called `target_info`. To select and group by metrics by resource attributes, you [need to do join on `target_info`](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches). For example, to select metrics with `k8s_namespace_name` attribute equal to `my-namespace`: + +```promql +app_ads_ad_requests_total * on (job, instance) group_left target_info{k8s_namespace_name="my-namespace"} +``` + +Or to group by a particular attribute (for ex. `k8s_namespace_name`): + +```promql +sum by (k8s_namespace_name) (app_ads_ad_requests_total * on (job, instance) group_left(k8s_namespace_name) target_info) +``` + +This is not a common pattern, and we recommend copying the most common resource attributes into metric labels. You can do this through the transform processor: + +```yaml +processor: + transform: + metric_statements: + - context: datapoint + statements: + - set(attributes["namespace"], resource.attributes["k8s.namespace.name"]) + - set(attributes["container"], resource.attributes["k8s.container.name"]) + - set(attributes["pod"], resource.attributes["k8s.pod.name"]) +``` + +After this, grouping or selecting becomes as simple as: + +```promql +app_ads_ad_requests_total{namespace="my-namespace"} + +sum by (namespace) (app_ads_ad_requests_total) +``` + +[beta]:https://github.com/open-telemetry/opentelemetry-collector#beta +[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[core]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/config.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/config.go new file mode 100644 index 00000000..f6346878 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/config.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewriteexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" + +import ( + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/exporter/exporterhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" +) + +// Config defines configuration for Remote Write exporter. +type Config struct { + exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + configretry.BackOffConfig `mapstructure:"retry_on_failure"` + + // prefix attached to each exported metric name + // See: https://prometheus.io/docs/practices/naming/#metric-names + Namespace string `mapstructure:"namespace"` + + // QueueConfig allows users to fine tune the queues + // that handle outgoing requests. + RemoteWriteQueue RemoteWriteQueue `mapstructure:"remote_write_queue"` + + // ExternalLabels defines a map of label keys and values that are allowed to start with reserved prefix "__" + ExternalLabels map[string]string `mapstructure:"external_labels"` + + ClientConfig confighttp.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + + // maximum size in bytes of time series batch sent to remote storage + MaxBatchSizeBytes int `mapstructure:"max_batch_size_bytes"` + + // ResourceToTelemetrySettings is the option for converting resource attributes to telemetry attributes. + // "Enabled" - A boolean field to enable/disable this option. Default is `false`. + // If enabled, all the resource attributes will be converted to metric labels by default. + ResourceToTelemetrySettings resourcetotelemetry.Settings `mapstructure:"resource_to_telemetry_conversion"` + WAL *WALConfig `mapstructure:"wal"` + + // TargetInfo allows customizing the target_info metric + TargetInfo *TargetInfo `mapstructure:"target_info,omitempty"` + + // CreatedMetric allows customizing creation of _created metrics + CreatedMetric *CreatedMetric `mapstructure:"export_created_metric,omitempty"` + + // AddMetricSuffixes controls whether unit and type suffixes are added to metrics on export + AddMetricSuffixes bool `mapstructure:"add_metric_suffixes"` + + // SendMetadata controls whether prometheus metadata will be generated and sent + SendMetadata bool `mapstructure:"send_metadata"` +} + +type CreatedMetric struct { + // Enabled if true the _created metrics could be exported + Enabled bool `mapstructure:"enabled"` +} + +type TargetInfo struct { + // Enabled if false the target_info metric is not generated by the exporter + Enabled bool `mapstructure:"enabled"` +} + +// RemoteWriteQueue allows to configure the remote write queue. +type RemoteWriteQueue struct { + // Enabled if false the queue is not enabled, the export requests + // are executed synchronously. + Enabled bool `mapstructure:"enabled"` + + // QueueSize is the maximum number of OTLP metric batches allowed + // in the queue at a given time. Ignored if Enabled is false. + QueueSize int `mapstructure:"queue_size"` + + // NumWorkers configures the number of workers used by + // the collector to fan out remote write requests. + NumConsumers int `mapstructure:"num_consumers"` +} + +// TODO(jbd): Add capacity, max_samples_per_send to QueueConfig. + +var _ component.Config = (*Config)(nil) + +// Validate checks if the exporter configuration is valid +func (cfg *Config) Validate() error { + if cfg.RemoteWriteQueue.QueueSize < 0 { + return fmt.Errorf("remote write queue size can't be negative") + } + + if cfg.RemoteWriteQueue.Enabled && cfg.RemoteWriteQueue.QueueSize == 0 { + return fmt.Errorf("a 0 size queue will drop all the data") + } + + if cfg.RemoteWriteQueue.NumConsumers < 0 { + return fmt.Errorf("remote write consumer number can't be negative") + } + + if cfg.TargetInfo == nil { + cfg.TargetInfo = &TargetInfo{ + Enabled: true, + } + } + if cfg.CreatedMetric == nil { + cfg.CreatedMetric = &CreatedMetric{ + Enabled: false, + } + } + if cfg.MaxBatchSizeBytes < 0 { + return fmt.Errorf("max_batch_byte_size must be greater than 0") + } + if cfg.MaxBatchSizeBytes == 0 { + // Defaults to ~2.81MB + cfg.MaxBatchSizeBytes = 3000000 + } + + return nil +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/context.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/context.go new file mode 100644 index 00000000..88693399 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/context.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewriteexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" + +import ( + "context" + "fmt" + + "go.uber.org/zap" +) + +type ctxKey int + +const ( + loggerCtxKey ctxKey = iota +) + +func contextWithLogger(ctx context.Context, log *zap.Logger) context.Context { + return context.WithValue(ctx, loggerCtxKey, log) +} + +func loggerFromContext(ctx context.Context) (*zap.Logger, error) { + v := ctx.Value(loggerCtxKey) + if v == nil { + return nil, fmt.Errorf("no logger found in context") + } + + l, ok := v.(*zap.Logger) + if !ok { + return nil, fmt.Errorf("invalid logger found in context") + } + + return l, nil +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/doc.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/doc.go new file mode 100644 index 00000000..3fafb018 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package prometheusremotewriteexporter sends metrics data to Prometheus Remote Write (PRW) endpoints. +// +//go:generate mdatagen metadata.yaml +package prometheusremotewriteexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/documentation.md b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/documentation.md new file mode 100644 index 00000000..dc8d0cdf --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/documentation.md @@ -0,0 +1,23 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# prometheusremotewrite + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### exporter_prometheusremotewrite_failed_translations + +Number of translation operations that failed to translate metrics from Otel to Prometheus + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### exporter_prometheusremotewrite_translated_time_series + +Number of Prometheus time series that were translated from OTel metrics + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/exporter.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/exporter.go new file mode 100644 index 00000000..0e8fd93f --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/exporter.go @@ -0,0 +1,368 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewriteexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "strings" + "sync" + + "github.com/cenkalti/backoff/v4" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/prometheus/prompb" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata" + prometheustranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite" +) + +type prwTelemetry interface { + recordTranslationFailure(ctx context.Context) + recordTranslatedTimeSeries(ctx context.Context, numTS int) +} + +type prwTelemetryOtel struct { + telemetryBuilder *metadata.TelemetryBuilder + otelAttrs []attribute.KeyValue +} + +func (p *prwTelemetryOtel) recordTranslationFailure(ctx context.Context) { + p.telemetryBuilder.ExporterPrometheusremotewriteFailedTranslations.Add(ctx, 1, metric.WithAttributes(p.otelAttrs...)) +} + +func (p *prwTelemetryOtel) recordTranslatedTimeSeries(ctx context.Context, numTS int) { + p.telemetryBuilder.ExporterPrometheusremotewriteTranslatedTimeSeries.Add(ctx, int64(numTS), metric.WithAttributes(p.otelAttrs...)) +} + +// prwExporter converts OTLP metrics to Prometheus remote write TimeSeries and sends them to a remote endpoint. +type prwExporter struct { + endpointURL *url.URL + client *http.Client + wg *sync.WaitGroup + closeChan chan struct{} + concurrency int + userAgentHeader string + maxBatchSizeBytes int + clientSettings *confighttp.ClientConfig + settings component.TelemetrySettings + retrySettings configretry.BackOffConfig + retryOnHTTP429 bool + wal *prweWAL + exporterSettings prometheusremotewrite.Settings + telemetry prwTelemetry +} + +func newPRWTelemetry(set exporter.Settings) (prwTelemetry, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err + } + + return &prwTelemetryOtel{ + telemetryBuilder: telemetryBuilder, + otelAttrs: []attribute.KeyValue{ + attribute.String("exporter", set.ID.String()), + }, + }, nil +} + +// newPRWExporter initializes a new prwExporter instance and sets fields accordingly. +func newPRWExporter(cfg *Config, set exporter.Settings) (*prwExporter, error) { + sanitizedLabels, err := validateAndSanitizeExternalLabels(cfg) + if err != nil { + return nil, err + } + + endpointURL, err := url.ParseRequestURI(cfg.ClientConfig.Endpoint) + if err != nil { + return nil, errors.New("invalid endpoint") + } + + prwTelemetry, err := newPRWTelemetry(set) + if err != nil { + return nil, err + } + + userAgentHeader := fmt.Sprintf("%s/%s", strings.ReplaceAll(strings.ToLower(set.BuildInfo.Description), " ", "-"), set.BuildInfo.Version) + + prwe := &prwExporter{ + endpointURL: endpointURL, + wg: new(sync.WaitGroup), + closeChan: make(chan struct{}), + userAgentHeader: userAgentHeader, + maxBatchSizeBytes: cfg.MaxBatchSizeBytes, + concurrency: cfg.RemoteWriteQueue.NumConsumers, + clientSettings: &cfg.ClientConfig, + settings: set.TelemetrySettings, + retrySettings: cfg.BackOffConfig, + retryOnHTTP429: retryOn429FeatureGate.IsEnabled(), + exporterSettings: prometheusremotewrite.Settings{ + Namespace: cfg.Namespace, + ExternalLabels: sanitizedLabels, + DisableTargetInfo: !cfg.TargetInfo.Enabled, + ExportCreatedMetric: cfg.CreatedMetric.Enabled, + AddMetricSuffixes: cfg.AddMetricSuffixes, + SendMetadata: cfg.SendMetadata, + }, + telemetry: prwTelemetry, + } + + prwe.wal = newWAL(cfg.WAL, prwe.export) + return prwe, nil +} + +// Start creates the prometheus client +func (prwe *prwExporter) Start(ctx context.Context, host component.Host) (err error) { + prwe.client, err = prwe.clientSettings.ToClient(ctx, host, prwe.settings) + if err != nil { + return err + } + return prwe.turnOnWALIfEnabled(contextWithLogger(ctx, prwe.settings.Logger.Named("prw.wal"))) +} + +func (prwe *prwExporter) shutdownWALIfEnabled() error { + if !prwe.walEnabled() { + return nil + } + return prwe.wal.stop() +} + +// Shutdown stops the exporter from accepting incoming calls(and return error), and wait for current export operations +// to finish before returning +func (prwe *prwExporter) Shutdown(context.Context) error { + select { + case <-prwe.closeChan: + default: + close(prwe.closeChan) + } + err := prwe.shutdownWALIfEnabled() + prwe.wg.Wait() + return err +} + +// PushMetrics converts metrics to Prometheus remote write TimeSeries and send to remote endpoint. It maintain a map of +// TimeSeries, validates and handles each individual metric, adding the converted TimeSeries to the map, and finally +// exports the map. +func (prwe *prwExporter) PushMetrics(ctx context.Context, md pmetric.Metrics) error { + prwe.wg.Add(1) + defer prwe.wg.Done() + + select { + case <-prwe.closeChan: + return errors.New("shutdown has been called") + default: + + tsMap, err := prometheusremotewrite.FromMetrics(md, prwe.exporterSettings) + if err != nil { + prwe.telemetry.recordTranslationFailure(ctx) + prwe.settings.Logger.Debug("failed to translate metrics, exporting remaining metrics", zap.Error(err), zap.Int("translated", len(tsMap))) + } + + prwe.telemetry.recordTranslatedTimeSeries(ctx, len(tsMap)) + + var m []*prompb.MetricMetadata + if prwe.exporterSettings.SendMetadata { + m = prometheusremotewrite.OtelMetricsToMetadata(md, prwe.exporterSettings.AddMetricSuffixes) + } + + // Call export even if a conversion error, since there may be points that were successfully converted. + return prwe.handleExport(ctx, tsMap, m) + } +} + +func validateAndSanitizeExternalLabels(cfg *Config) (map[string]string, error) { + sanitizedLabels := make(map[string]string) + for key, value := range cfg.ExternalLabels { + if key == "" || value == "" { + return nil, fmt.Errorf("prometheus remote write: external labels configuration contains an empty key or value") + } + sanitizedLabels[prometheustranslator.NormalizeLabel(key)] = value + } + + return sanitizedLabels, nil +} + +func (prwe *prwExporter) handleExport(ctx context.Context, tsMap map[string]*prompb.TimeSeries, m []*prompb.MetricMetadata) error { + // There are no metrics to export, so return. + if len(tsMap) == 0 { + return nil + } + + // Calls the helper function to convert and batch the TsMap to the desired format + requests, err := batchTimeSeries(tsMap, prwe.maxBatchSizeBytes, m) + if err != nil { + return err + } + if !prwe.walEnabled() { + // Perform a direct export otherwise. + return prwe.export(ctx, requests) + } + + // Otherwise the WAL is enabled, and just persist the requests to the WAL + // and they'll be exported in another goroutine to the RemoteWrite endpoint. + if err = prwe.wal.persistToWAL(requests); err != nil { + return consumererror.NewPermanent(err) + } + return nil +} + +// export sends a Snappy-compressed WriteRequest containing TimeSeries to a remote write endpoint in order +func (prwe *prwExporter) export(ctx context.Context, requests []*prompb.WriteRequest) error { + input := make(chan *prompb.WriteRequest, len(requests)) + for _, request := range requests { + input <- request + } + close(input) + + var wg sync.WaitGroup + + concurrencyLimit := int(math.Min(float64(prwe.concurrency), float64(len(requests)))) + wg.Add(concurrencyLimit) // used to wait for workers to be finished + + var mu sync.Mutex + var errs error + // Run concurrencyLimit of workers until there + // is no more requests to execute in the input channel. + for i := 0; i < concurrencyLimit; i++ { + go func() { + defer wg.Done() + for { + select { + case <-ctx.Done(): // Check firstly to ensure that the context wasn't cancelled. + return + + case request, ok := <-input: + if !ok { + return + } + if errExecute := prwe.execute(ctx, request); errExecute != nil { + mu.Lock() + errs = multierr.Append(errs, consumererror.NewPermanent(errExecute)) + mu.Unlock() + } + } + } + }() + } + wg.Wait() + + return errs +} + +func (prwe *prwExporter) execute(ctx context.Context, writeReq *prompb.WriteRequest) error { + // Uses proto.Marshal to convert the WriteRequest into bytes array + data, errMarshal := proto.Marshal(writeReq) + if errMarshal != nil { + return consumererror.NewPermanent(errMarshal) + } + buf := make([]byte, len(data), cap(data)) + compressedData := snappy.Encode(buf, data) + + // executeFunc can be used for backoff and non backoff scenarios. + executeFunc := func() error { + // check there was no timeout in the component level to avoid retries + // to continue to run after a timeout + select { + case <-ctx.Done(): + return backoff.Permanent(ctx.Err()) + default: + // continue + } + + // Create the HTTP POST request to send to the endpoint + req, err := http.NewRequestWithContext(ctx, "POST", prwe.endpointURL.String(), bytes.NewReader(compressedData)) + if err != nil { + return backoff.Permanent(consumererror.NewPermanent(err)) + } + + // Add necessary headers specified by: + // https://cortexmetrics.io/docs/apis/#remote-api + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", "application/x-protobuf") + req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + req.Header.Set("User-Agent", prwe.userAgentHeader) + + resp, err := prwe.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + // 2xx status code is considered a success + // 5xx errors are recoverable and the exporter should retry + // Reference for different behavior according to status code: + // https://github.com/prometheus/prometheus/pull/2552/files#diff-ae8db9d16d8057358e49d694522e7186 + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return nil + } + + body, err := io.ReadAll(io.LimitReader(resp.Body, 256)) + rerr := fmt.Errorf("remote write returned HTTP status %v; err = %w: %s", resp.Status, err, body) + if resp.StatusCode >= 500 && resp.StatusCode < 600 { + return rerr + } + + // 429 errors are recoverable and the exporter should retry if RetryOnHTTP429 enabled + // Reference: https://github.com/prometheus/prometheus/pull/12677 + if prwe.retryOnHTTP429 && resp.StatusCode == 429 { + return rerr + } + + return backoff.Permanent(consumererror.NewPermanent(rerr)) + } + + var err error + if prwe.retrySettings.Enabled { + // Use the BackOff instance to retry the func with exponential backoff. + err = backoff.Retry(executeFunc, &backoff.ExponentialBackOff{ + InitialInterval: prwe.retrySettings.InitialInterval, + RandomizationFactor: prwe.retrySettings.RandomizationFactor, + Multiplier: prwe.retrySettings.Multiplier, + MaxInterval: prwe.retrySettings.MaxInterval, + MaxElapsedTime: prwe.retrySettings.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + }) + } else { + err = executeFunc() + } + + if err != nil { + return consumererror.NewPermanent(err) + } + + return err +} + +func (prwe *prwExporter) walEnabled() bool { return prwe.wal != nil } + +func (prwe *prwExporter) turnOnWALIfEnabled(ctx context.Context) error { + if !prwe.walEnabled() { + return nil + } + cancelCtx, cancel := context.WithCancel(ctx) + go func() { + <-prwe.closeChan + cancel() + }() + return prwe.wal.run(cancelCtx) +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/factory.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/factory.go new file mode 100644 index 00000000..e23b0ed0 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/factory.go @@ -0,0 +1,110 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewriteexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configopaque" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/featuregate" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" +) + +var retryOn429FeatureGate = featuregate.GlobalRegistry().MustRegister( + "exporter.prometheusremotewritexporter.RetryOn429", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.101.0"), + featuregate.WithRegisterDescription("When enabled, the Prometheus remote write exporter will retry 429 http status code. Requires exporter.prometheusremotewritexporter.metrics.RetryOn429 to be enabled."), +) + +// NewFactory creates a new Prometheus Remote Write exporter. +func NewFactory() exporter.Factory { + return exporter.NewFactory( + metadata.Type, + createDefaultConfig, + exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability)) +} + +func createMetricsExporter(ctx context.Context, set exporter.Settings, + cfg component.Config) (exporter.Metrics, error) { + + prwCfg, ok := cfg.(*Config) + if !ok { + return nil, errors.New("invalid configuration") + } + + prwe, err := newPRWExporter(prwCfg, set) + if err != nil { + return nil, err + } + + // Don't allow users to configure the queue. + // See https://github.com/open-telemetry/opentelemetry-collector/issues/2949. + // Prometheus remote write samples needs to be in chronological + // order for each timeseries. If we shard the incoming metrics + // without considering this limitation, we experience + // "out of order samples" errors. + exporter, err := exporterhelper.NewMetricsExporter( + ctx, + set, + cfg, + prwe.PushMetrics, + exporterhelper.WithTimeout(prwCfg.TimeoutSettings), + exporterhelper.WithQueue(exporterhelper.QueueSettings{ + Enabled: prwCfg.RemoteWriteQueue.Enabled, + NumConsumers: 1, + QueueSize: prwCfg.RemoteWriteQueue.QueueSize, + }), + exporterhelper.WithStart(prwe.Start), + exporterhelper.WithShutdown(prwe.Shutdown), + ) + if err != nil { + return nil, err + } + return resourcetotelemetry.WrapMetricsExporter(prwCfg.ResourceToTelemetrySettings, exporter), nil +} + +func createDefaultConfig() component.Config { + retrySettings := configretry.NewDefaultBackOffConfig() + retrySettings.InitialInterval = 50 * time.Millisecond + + return &Config{ + Namespace: "", + ExternalLabels: map[string]string{}, + MaxBatchSizeBytes: 3000000, + TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), + BackOffConfig: retrySettings, + AddMetricSuffixes: true, + SendMetadata: false, + ClientConfig: confighttp.ClientConfig{ + Endpoint: "http://some.url:9411/api/prom/push", + // We almost read 0 bytes, so no need to tune ReadBufferSize. + ReadBufferSize: 0, + WriteBufferSize: 512 * 1024, + Timeout: exporterhelper.NewDefaultTimeoutSettings().Timeout, + Headers: map[string]configopaque.String{}, + }, + // TODO(jbd): Adjust the default queue size. + RemoteWriteQueue: RemoteWriteQueue{ + Enabled: true, + QueueSize: 10000, + NumConsumers: 5, + }, + TargetInfo: &TargetInfo{ + Enabled: true, + }, + CreatedMetric: &CreatedMetric{ + Enabled: false, + }, + } +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/helper.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/helper.go new file mode 100644 index 00000000..d5eca308 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/helper.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewriteexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" + +import ( + "errors" + "sort" + + "github.com/prometheus/prometheus/prompb" +) + +// batchTimeSeries splits series into multiple batch write requests. +func batchTimeSeries(tsMap map[string]*prompb.TimeSeries, maxBatchByteSize int, m []*prompb.MetricMetadata) ([]*prompb.WriteRequest, error) { + if len(tsMap) == 0 { + return nil, errors.New("invalid tsMap: cannot be empty map") + } + + requests := make([]*prompb.WriteRequest, 0, len(tsMap)+len(m)) + tsArray := make([]prompb.TimeSeries, 0, len(tsMap)) + sizeOfCurrentBatch := 0 + + i := 0 + for _, v := range tsMap { + sizeOfSeries := v.Size() + + if sizeOfCurrentBatch+sizeOfSeries >= maxBatchByteSize { + wrapped := convertTimeseriesToRequest(tsArray) + requests = append(requests, wrapped) + + tsArray = make([]prompb.TimeSeries, 0, len(tsMap)-i) + sizeOfCurrentBatch = 0 + } + + tsArray = append(tsArray, *v) + sizeOfCurrentBatch += sizeOfSeries + i++ + } + + if len(tsArray) != 0 { + wrapped := convertTimeseriesToRequest(tsArray) + requests = append(requests, wrapped) + } + + mArray := make([]prompb.MetricMetadata, 0, len(m)) + sizeOfCurrentBatch = 0 + i = 0 + for _, v := range m { + sizeOfM := v.Size() + + if sizeOfCurrentBatch+sizeOfM >= maxBatchByteSize { + wrapped := convertMetadataToRequest(mArray) + requests = append(requests, wrapped) + + mArray = make([]prompb.MetricMetadata, 0, len(m)-i) + sizeOfCurrentBatch = 0 + } + + mArray = append(mArray, *v) + sizeOfCurrentBatch += sizeOfM + i++ + } + + if len(mArray) != 0 { + wrapped := convertMetadataToRequest(mArray) + requests = append(requests, wrapped) + } + + return requests, nil +} + +func convertTimeseriesToRequest(tsArray []prompb.TimeSeries) *prompb.WriteRequest { + // the remote_write endpoint only requires the timeseries. + // otlp defines it's own way to handle metric metadata + return &prompb.WriteRequest{ + // Prometheus requires time series to be sorted by Timestamp to avoid out of order problems. + // See: + // * https://github.com/open-telemetry/wg-prometheus/issues/10 + // * https://github.com/open-telemetry/opentelemetry-collector/issues/2315 + Timeseries: orderBySampleTimestamp(tsArray), + } +} + +func convertMetadataToRequest(m []prompb.MetricMetadata) *prompb.WriteRequest { + return &prompb.WriteRequest{ + Metadata: m, + } +} + +func orderBySampleTimestamp(tsArray []prompb.TimeSeries) []prompb.TimeSeries { + for i := range tsArray { + sL := tsArray[i].Samples + sort.Slice(sL, func(i, j int) bool { + return sL[i].Timestamp < sL[j].Timestamp + }) + } + return tsArray +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_status.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_status.go new file mode 100644 index 00000000..6a7a98fa --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_status.go @@ -0,0 +1,15 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("prometheusremotewrite") +) + +const ( + MetricsStability = component.StabilityLevelBeta +) diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_telemetry.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_telemetry.go new file mode 100644 index 00000000..a27eb1b7 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_telemetry.go @@ -0,0 +1,69 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "errors" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("otelcol/prometheusremotewrite") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("otelcol/prometheusremotewrite") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + ExporterPrometheusremotewriteFailedTranslations metric.Int64Counter + ExporterPrometheusremotewriteTranslatedTimeSeries metric.Int64Counter + level configtelemetry.Level +} + +// telemetryBuilderOption applies changes to default builder. +type telemetryBuilderOption func(*TelemetryBuilder) + +// WithLevel sets the current telemetry level for the component. +func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.level = lvl + } +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{level: configtelemetry.LevelBasic} + for _, op := range options { + op(&builder) + } + var err, errs error + if builder.level >= configtelemetry.LevelBasic { + builder.meter = Meter(settings) + } else { + builder.meter = noop.Meter{} + } + builder.ExporterPrometheusremotewriteFailedTranslations, err = builder.meter.Int64Counter( + "exporter_prometheusremotewrite_failed_translations", + metric.WithDescription("Number of translation operations that failed to translate metrics from Otel to Prometheus"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ExporterPrometheusremotewriteTranslatedTimeSeries, err = builder.meter.Int64Counter( + "exporter_prometheusremotewrite_translated_time_series", + metric.WithDescription("Number of Prometheus time series that were translated from OTel metrics"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + return &builder, errs +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/metadata.yaml b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/metadata.yaml new file mode 100644 index 00000000..492aaacc --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/metadata.yaml @@ -0,0 +1,30 @@ +type: prometheusremotewrite +scope_name: otelcol/prometheusremotewrite + +status: + class: exporter + stability: + beta: [metrics] + distributions: [core, contrib] + codeowners: + active: [Aneurysm9, rapphil] + +tests: + expect_consumer_error: true + +telemetry: + metrics: + exporter_prometheusremotewrite_failed_translations: + enabled: true + description: Number of translation operations that failed to translate metrics from Otel to Prometheus + unit: 1 + sum: + value_type: int + monotonic: true + exporter_prometheusremotewrite_translated_time_series: + enabled: true + description: Number of Prometheus time series that were translated from OTel metrics + unit: 1 + sum: + value_type: int + monotonic: true diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/wal.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/wal.go new file mode 100644 index 00000000..f40e21d0 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/wal.go @@ -0,0 +1,424 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewriteexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/gogo/protobuf/proto" + "github.com/prometheus/prometheus/prompb" + "github.com/tidwall/wal" + "go.uber.org/multierr" + "go.uber.org/zap" +) + +type prweWAL struct { + mu sync.Mutex // mu protects the fields below. + wal *wal.Log + walConfig *WALConfig + walPath string + + exportSink func(ctx context.Context, reqL []*prompb.WriteRequest) error + + stopOnce sync.Once + stopChan chan struct{} + rWALIndex *atomic.Uint64 + wWALIndex *atomic.Uint64 +} + +const ( + defaultWALBufferSize = 300 + defaultWALTruncateFrequency = 1 * time.Minute +) + +type WALConfig struct { + Directory string `mapstructure:"directory"` + BufferSize int `mapstructure:"buffer_size"` + TruncateFrequency time.Duration `mapstructure:"truncate_frequency"` +} + +func (wc *WALConfig) bufferSize() int { + if wc.BufferSize > 0 { + return wc.BufferSize + } + return defaultWALBufferSize +} + +func (wc *WALConfig) truncateFrequency() time.Duration { + if wc.TruncateFrequency > 0 { + return wc.TruncateFrequency + } + return defaultWALTruncateFrequency +} + +func newWAL(walConfig *WALConfig, exportSink func(context.Context, []*prompb.WriteRequest) error) *prweWAL { + if walConfig == nil { + // There are cases for which the WAL can be disabled. + // TODO: Perhaps log that the WAL wasn't enabled. + return nil + } + + return &prweWAL{ + exportSink: exportSink, + walConfig: walConfig, + stopChan: make(chan struct{}), + rWALIndex: &atomic.Uint64{}, + wWALIndex: &atomic.Uint64{}, + } +} + +func (wc *WALConfig) createWAL() (*wal.Log, string, error) { + walPath := filepath.Join(wc.Directory, "prom_remotewrite") + log, err := wal.Open(walPath, &wal.Options{ + SegmentCacheSize: wc.bufferSize(), + NoCopy: true, + }) + if err != nil { + return nil, "", fmt.Errorf("prometheusremotewriteexporter: failed to open WAL: %w", err) + } + return log, walPath, nil +} + +var ( + errAlreadyClosed = errors.New("already closed") + errNilWAL = errors.New("wal is nil") +) + +// retrieveWALIndices queries the WriteAheadLog for its current first and last indices. +func (prwe *prweWAL) retrieveWALIndices() (err error) { + prwe.mu.Lock() + defer prwe.mu.Unlock() + + err = prwe.closeWAL() + if err != nil { + return err + } + + log, walPath, err := prwe.walConfig.createWAL() + if err != nil { + return err + } + + prwe.wal = log + prwe.walPath = walPath + + rIndex, err := prwe.wal.FirstIndex() + if err != nil { + return fmt.Errorf("prometheusremotewriteexporter: failed to retrieve the first WAL index: %w", err) + } + prwe.rWALIndex.Store(rIndex) + + wIndex, err := prwe.wal.LastIndex() + if err != nil { + return fmt.Errorf("prometheusremotewriteexporter: failed to retrieve the last WAL index: %w", err) + } + prwe.wWALIndex.Store(wIndex) + return nil +} + +func (prwe *prweWAL) stop() error { + err := errAlreadyClosed + prwe.stopOnce.Do(func() { + prwe.mu.Lock() + defer prwe.mu.Unlock() + + close(prwe.stopChan) + err = prwe.closeWAL() + }) + return err +} + +// run begins reading from the WAL until prwe.stopChan is closed. +func (prwe *prweWAL) run(ctx context.Context) (err error) { + var logger *zap.Logger + logger, err = loggerFromContext(ctx) + if err != nil { + return + } + + if err = prwe.retrieveWALIndices(); err != nil { + logger.Error("unable to start write-ahead log", zap.Error(err)) + return + } + + runCtx, cancel := context.WithCancel(ctx) + + // Start the process of exporting but wait until the exporting has started. + waitUntilStartedCh := make(chan bool) + go func() { + signalStart := func() { close(waitUntilStartedCh) } + defer cancel() + for { + select { + case <-runCtx.Done(): + return + case <-prwe.stopChan: + return + default: + err := prwe.continuallyPopWALThenExport(runCtx, signalStart) + signalStart = func() {} + if err != nil { + // log err + logger.Error("error processing WAL entries", zap.Error(err)) + // Restart WAL + if errS := prwe.retrieveWALIndices(); errS != nil { + logger.Error("unable to re-start write-ahead log after error", zap.Error(errS)) + return + } + } + } + } + }() + <-waitUntilStartedCh + return nil +} + +// continuallyPopWALThenExport reads a prompb.WriteRequest proto encoded blob from the WAL, and moves +// the WAL's front index forward until either the read buffer period expires or the maximum +// buffer size is exceeded. When either of the two conditions are matched, it then exports +// the requests to the Remote-Write endpoint, and then truncates the head of the WAL to where +// it last read from. +func (prwe *prweWAL) continuallyPopWALThenExport(ctx context.Context, signalStart func()) (err error) { + var reqL []*prompb.WriteRequest + defer func() { + // Keeping it within a closure to ensure that the later + // updated value of reqL is always flushed to disk. + if errL := prwe.exportSink(ctx, reqL); errL != nil { + err = multierr.Append(err, errL) + } + }() + + freshTimer := func() *time.Timer { + return time.NewTimer(prwe.walConfig.truncateFrequency()) + } + + timer := freshTimer() + defer func() { + // Added in a closure to ensure we capture the later + // updated value of timer when changed in the loop below. + timer.Stop() + }() + + signalStart() + + maxCountPerUpload := prwe.walConfig.bufferSize() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-prwe.stopChan: + return nil + default: + } + + var req *prompb.WriteRequest + req, err = prwe.readPrompbFromWAL(ctx, prwe.rWALIndex.Load()) + if err != nil { + return err + } + reqL = append(reqL, req) + + var shouldExport bool + select { + case <-timer.C: + shouldExport = true + default: + shouldExport = len(reqL) >= maxCountPerUpload + } + + if !shouldExport { + continue + } + + // Otherwise, it is time to export, flush and then truncate the WAL, but also to kill the timer! + timer.Stop() + timer = freshTimer() + + if err = prwe.exportThenFrontTruncateWAL(ctx, reqL); err != nil { + return err + } + // Reset but reuse the write requests slice. + reqL = reqL[:0] + } +} + +func (prwe *prweWAL) closeWAL() error { + if prwe.wal != nil { + err := prwe.wal.Close() + prwe.wal = nil + return err + } + return nil +} + +func (prwe *prweWAL) syncAndTruncateFront() error { + prwe.mu.Lock() + defer prwe.mu.Unlock() + + if prwe.wal == nil { + return errNilWAL + } + + // Save all the entries that aren't yet committed, to the tail of the WAL. + if err := prwe.wal.Sync(); err != nil { + return err + } + // Truncate the WAL from the front for the entries that we already + // read from the WAL and had already exported. + if err := prwe.wal.TruncateFront(prwe.rWALIndex.Load()); err != nil && !errors.Is(err, wal.ErrOutOfRange) { + return err + } + return nil +} + +func (prwe *prweWAL) exportThenFrontTruncateWAL(ctx context.Context, reqL []*prompb.WriteRequest) error { + if len(reqL) == 0 { + return nil + } + if cErr := ctx.Err(); cErr != nil { + return nil + } + + if errL := prwe.exportSink(ctx, reqL); errL != nil { + return errL + } + if err := prwe.syncAndTruncateFront(); err != nil { + return err + } + // Reset by retrieving the respective read and write WAL indices. + return prwe.retrieveWALIndices() +} + +// persistToWAL is the routine that'll be hooked into the exporter's receiving side and it'll +// write them to the Write-Ahead-Log so that shutdowns won't lose data, and that the routine that +// reads from the WAL can then process the previously serialized requests. +func (prwe *prweWAL) persistToWAL(requests []*prompb.WriteRequest) error { + prwe.mu.Lock() + defer prwe.mu.Unlock() + + // Write all the requests to the WAL in a batch. + batch := new(wal.Batch) + for _, req := range requests { + protoBlob, err := proto.Marshal(req) + if err != nil { + return err + } + wIndex := prwe.wWALIndex.Add(1) + batch.Write(wIndex, protoBlob) + } + + return prwe.wal.WriteBatch(batch) +} + +func (prwe *prweWAL) readPrompbFromWAL(ctx context.Context, index uint64) (wreq *prompb.WriteRequest, err error) { + prwe.mu.Lock() + defer prwe.mu.Unlock() + + var protoBlob []byte + for i := 0; i < 12; i++ { + // Firstly check if we've been terminated, then exit if so. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-prwe.stopChan: + return nil, fmt.Errorf("attempt to read from WAL after stopped") + default: + } + + if index <= 0 { + index = 1 + } + + if prwe.wal == nil { + return nil, fmt.Errorf("attempt to read from closed WAL") + } + + protoBlob, err = prwe.wal.Read(index) + if err == nil { // The read succeeded. + req := new(prompb.WriteRequest) + if err = proto.Unmarshal(protoBlob, req); err != nil { + return nil, err + } + + // Now increment the WAL's read index. + prwe.rWALIndex.Add(1) + + return req, nil + } + + if !errors.Is(err, wal.ErrNotFound) { + return nil, err + } + + if index <= 1 { + // This could be the very first attempted read, so try again, after a small sleep. + time.Sleep(time.Duration(1<= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range labels[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, v.Name...) + b = append(b, seps[0]) + b = append(b, v.Value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b) +} + +var seps = []byte{'\xff'} + +// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. +// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and +// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. +func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, + ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label { + resourceAttrs := resource.Attributes() + serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) + instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) + + // Calculate the maximum possible number of labels we could return so we can preallocate l + maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2 + + if haveServiceName { + maxLabelCount++ + } + + if haveInstanceID { + maxLabelCount++ + } + + // map ensures no duplicate label name + l := make(map[string]string, maxLabelCount) + + // Ensure attributes are sorted by key for consistent merging of keys which + // collide when sanitized. + labels := make([]prompb.Label, 0, maxLabelCount) + // XXX: Should we always drop service namespace/service name/service instance ID from the labels + // (as they get mapped to other Prometheus labels)? + attributes.Range(func(key string, value pcommon.Value) bool { + if !slices.Contains(ignoreAttrs, key) { + labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) + } + return true + }) + sort.Stable(ByLabelName(labels)) + + for _, label := range labels { + var finalKey = prometheustranslator.NormalizeLabel(label.Name) + if existingValue, alreadyExists := l[finalKey]; alreadyExists { + l[finalKey] = existingValue + ";" + label.Value + } else { + l[finalKey] = label.Value + } + } + + // Map service.name + service.namespace to job + if haveServiceName { + val := serviceName.AsString() + if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok { + val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) + } + l[model.JobLabel] = val + } + // Map service.instance.id to instance + if haveInstanceID { + l[model.InstanceLabel] = instance.AsString() + } + for key, value := range externalLabels { + // External labels have already been sanitized + if _, alreadyExists := l[key]; alreadyExists { + // Skip external labels if they are overridden by metric attributes + continue + } + l[key] = value + } + + for i := 0; i < len(extras); i += 2 { + if i+1 >= len(extras) { + break + } + _, found := l[extras[i]] + if found && logOnOverwrite { + log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.") + } + // internal labels should be maintained + name := extras[i] + if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { + name = prometheustranslator.NormalizeLabel(name) + } + l[name] = extras[i+1] + } + + labels = labels[:0] + for k, v := range l { + labels = append(labels, prompb.Label{Name: k, Value: v}) + } + + return labels +} + +// isValidAggregationTemporality checks whether an OTel metric has a valid +// aggregation temporality for conversion to a Prometheus metric. +func isValidAggregationTemporality(metric pmetric.Metric) bool { + //exhaustive:enforce + switch metric.Type() { + case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary: + return true + case pmetric.MetricTypeSum: + return metric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityCumulative + case pmetric.MetricTypeHistogram: + return metric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative + case pmetric.MetricTypeExponentialHistogram: + return metric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative + } + return false +} + +func (c *prometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice, + resource pcommon.Resource, settings Settings, baseName string) { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + timestamp := convertTimeStamp(pt.Timestamp()) + baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) + + // If the sum is unset, it indicates the _sum metric point should be + // omitted + if pt.HasSum() { + // treat sum as a sample in an individual TimeSeries + sum := &prompb.Sample{ + Value: pt.Sum(), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + sum.Value = math.Float64frombits(value.StaleNaN) + } + + sumlabels := createLabels(baseName+sumStr, baseLabels) + c.addSample(sum, sumlabels) + + } + + // treat count as a sample in an individual TimeSeries + count := &prompb.Sample{ + Value: float64(pt.Count()), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + count.Value = math.Float64frombits(value.StaleNaN) + } + + countlabels := createLabels(baseName+countStr, baseLabels) + c.addSample(count, countlabels) + + // cumulative count for conversion to cumulative histogram + var cumulativeCount uint64 + + var bucketBounds []bucketBoundsData + + // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 + for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ { + bound := pt.ExplicitBounds().At(i) + cumulativeCount += pt.BucketCounts().At(i) + bucket := &prompb.Sample{ + Value: float64(cumulativeCount), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + bucket.Value = math.Float64frombits(value.StaleNaN) + } + boundStr := strconv.FormatFloat(bound, 'f', -1, 64) + labels := createLabels(baseName+bucketStr, baseLabels, leStr, boundStr) + ts := c.addSample(bucket, labels) + + bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound}) + } + // add le=+Inf bucket + infBucket := &prompb.Sample{ + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + infBucket.Value = math.Float64frombits(value.StaleNaN) + } else { + infBucket.Value = float64(pt.Count()) + } + infLabels := createLabels(baseName+bucketStr, baseLabels, leStr, pInfStr) + ts := c.addSample(infBucket, infLabels) + + bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)}) + c.addExemplars(pt, bucketBounds) + + startTimestamp := pt.StartTimestamp() + if settings.ExportCreatedMetric && startTimestamp != 0 { + labels := createLabels(baseName+createdSuffix, baseLabels) + c.addTimeSeriesIfNeeded(labels, startTimestamp, pt.Timestamp()) + } + } +} + +type exemplarType interface { + pmetric.ExponentialHistogramDataPoint | pmetric.HistogramDataPoint | pmetric.NumberDataPoint + Exemplars() pmetric.ExemplarSlice +} + +func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar { + promExemplars := make([]prompb.Exemplar, 0, pt.Exemplars().Len()) + for i := 0; i < pt.Exemplars().Len(); i++ { + exemplar := pt.Exemplars().At(i) + exemplarRunes := 0 + + promExemplar := prompb.Exemplar{ + Value: exemplar.DoubleValue(), + Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), + } + if traceID := exemplar.TraceID(); !traceID.IsEmpty() { + val := hex.EncodeToString(traceID[:]) + exemplarRunes += utf8.RuneCountInString(prometheustranslator.ExemplarTraceIDKey) + utf8.RuneCountInString(val) + promLabel := prompb.Label{ + Name: prometheustranslator.ExemplarTraceIDKey, + Value: val, + } + promExemplar.Labels = append(promExemplar.Labels, promLabel) + } + if spanID := exemplar.SpanID(); !spanID.IsEmpty() { + val := hex.EncodeToString(spanID[:]) + exemplarRunes += utf8.RuneCountInString(prometheustranslator.ExemplarSpanIDKey) + utf8.RuneCountInString(val) + promLabel := prompb.Label{ + Name: prometheustranslator.ExemplarSpanIDKey, + Value: val, + } + promExemplar.Labels = append(promExemplar.Labels, promLabel) + } + + attrs := exemplar.FilteredAttributes() + labelsFromAttributes := make([]prompb.Label, 0, attrs.Len()) + attrs.Range(func(key string, value pcommon.Value) bool { + val := value.AsString() + exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(val) + promLabel := prompb.Label{ + Name: key, + Value: val, + } + + labelsFromAttributes = append(labelsFromAttributes, promLabel) + + return true + }) + if exemplarRunes <= maxExemplarRunes { + // only append filtered attributes if it does not cause exemplar + // labels to exceed the max number of runes + promExemplar.Labels = append(promExemplar.Labels, labelsFromAttributes...) + } + + promExemplars = append(promExemplars, promExemplar) + } + + return promExemplars +} + +// mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics +func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { + var ts pcommon.Timestamp + // handle individual metric based on type + //exhaustive:enforce + switch metric.Type() { + case pmetric.MetricTypeGauge: + dataPoints := metric.Gauge().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + case pmetric.MetricTypeSum: + dataPoints := metric.Sum().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + case pmetric.MetricTypeHistogram: + dataPoints := metric.Histogram().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + case pmetric.MetricTypeExponentialHistogram: + dataPoints := metric.ExponentialHistogram().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + case pmetric.MetricTypeSummary: + dataPoints := metric.Summary().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + } + return ts +} + +func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp { + if a > b { + return a + } + return b +} + +func (c *prometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, + settings Settings, baseName string) { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + timestamp := convertTimeStamp(pt.Timestamp()) + baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) + + // treat sum as a sample in an individual TimeSeries + sum := &prompb.Sample{ + Value: pt.Sum(), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + sum.Value = math.Float64frombits(value.StaleNaN) + } + // sum and count of the summary should append suffix to baseName + sumlabels := createLabels(baseName+sumStr, baseLabels) + c.addSample(sum, sumlabels) + + // treat count as a sample in an individual TimeSeries + count := &prompb.Sample{ + Value: float64(pt.Count()), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + count.Value = math.Float64frombits(value.StaleNaN) + } + countlabels := createLabels(baseName+countStr, baseLabels) + c.addSample(count, countlabels) + + // process each percentile/quantile + for i := 0; i < pt.QuantileValues().Len(); i++ { + qt := pt.QuantileValues().At(i) + quantile := &prompb.Sample{ + Value: qt.Value(), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + quantile.Value = math.Float64frombits(value.StaleNaN) + } + percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) + qtlabels := createLabels(baseName, baseLabels, quantileStr, percentileStr) + c.addSample(quantile, qtlabels) + } + + startTimestamp := pt.StartTimestamp() + if settings.ExportCreatedMetric && startTimestamp != 0 { + createdLabels := createLabels(baseName+createdSuffix, baseLabels) + c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp()) + } + } +} + +// createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name. +// If extras are provided, corresponding label pairs are also added to the returned slice. +// If extras is uneven length, the last (unpaired) extra will be ignored. +func createLabels(name string, baseLabels []prompb.Label, extras ...string) []prompb.Label { + extraLabelCount := len(extras) / 2 + labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name + copy(labels, baseLabels) + + n := len(extras) + n -= n % 2 + for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 { + labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) + } + + labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name}) + return labels +} + +// getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false. +// Otherwise it creates a new one and returns that, and true. +func (c *prometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) { + h := timeSeriesSignature(lbls) + ts := c.unique[h] + if ts != nil { + if isSameMetric(ts, lbls) { + // We already have this metric + return ts, false + } + + // Look for a matching conflict + for _, cTS := range c.conflicts[h] { + if isSameMetric(cTS, lbls) { + // We already have this metric + return cTS, false + } + } + + // New conflict + ts = &prompb.TimeSeries{ + Labels: lbls, + } + c.conflicts[h] = append(c.conflicts[h], ts) + return ts, true + } + + // This metric is new + ts = &prompb.TimeSeries{ + Labels: lbls, + } + c.unique[h] = ts + return ts, true +} + +// addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist. +// If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp, +// both converted to milliseconds. +func (c *prometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp pcommon.Timestamp, timestamp pcommon.Timestamp) { + ts, created := c.getOrCreateTimeSeries(lbls) + if created { + ts.Samples = []prompb.Sample{ + { + // convert ns to ms + Value: float64(convertTimeStamp(startTimestamp)), + Timestamp: convertTimeStamp(timestamp), + }, + } + } +} + +// addResourceTargetInfo converts the resource to the target info metric. +func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, converter *prometheusConverter) { + if settings.DisableTargetInfo || timestamp == 0 { + return + } + + attributes := resource.Attributes() + identifyingAttrs := []string{ + conventions.AttributeServiceNamespace, + conventions.AttributeServiceName, + conventions.AttributeServiceInstanceID, + } + nonIdentifyingAttrsCount := attributes.Len() + for _, a := range identifyingAttrs { + _, haveAttr := attributes.Get(a) + if haveAttr { + nonIdentifyingAttrsCount-- + } + } + if nonIdentifyingAttrsCount == 0 { + // If we only have job + instance, then target_info isn't useful, so don't add it. + return + } + + name := prometheustranslator.TargetInfoMetricName + if len(settings.Namespace) > 0 { + name = settings.Namespace + "_" + name + } + + labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name) + haveIdentifier := false + for _, l := range labels { + if l.Name == model.JobLabel || l.Name == model.InstanceLabel { + haveIdentifier = true + break + } + } + + if !haveIdentifier { + // We need at least one identifying label to generate target_info. + return + } + + sample := &prompb.Sample{ + Value: float64(1), + // convert ns to ms + Timestamp: convertTimeStamp(timestamp), + } + converter.addSample(sample, labels) +} + +// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms +func convertTimeStamp(timestamp pcommon.Timestamp) int64 { + return timestamp.AsTime().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/histograms.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/histograms.go new file mode 100644 index 00000000..35ec2108 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/histograms.go @@ -0,0 +1,196 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewrite // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite" + +import ( + "fmt" + "math" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/prompb" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +const defaultZeroThreshold = 1e-128 + +func (c *prometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice, + resource pcommon.Resource, settings Settings, baseName string) error { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + lbls := createAttributes( + resource, + pt.Attributes(), + settings.ExternalLabels, + nil, + true, + model.MetricNameLabel, + baseName, + ) + ts, _ := c.getOrCreateTimeSeries(lbls) + + histogram, err := exponentialToNativeHistogram(pt) + if err != nil { + return err + } + ts.Histograms = append(ts.Histograms, histogram) + + exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt) + ts.Exemplars = append(ts.Exemplars, exemplars...) + } + + return nil +} + +// exponentialToNativeHistogram translates OTel Exponential Histogram data point +// to Prometheus Native Histogram. +func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) { + scale := p.Scale() + if scale < -4 { + return prompb.Histogram{}, + fmt.Errorf("cannot convert exponential to native histogram."+ + " Scale must be >= -4, was %d", scale) + } + + var scaleDown int32 + if scale > 8 { + scaleDown = scale - 8 + scale = 8 + } + + pSpans, pDeltas := convertBucketsLayout(p.Positive(), scaleDown) + nSpans, nDeltas := convertBucketsLayout(p.Negative(), scaleDown) + + h := prompb.Histogram{ + // The counter reset detection must be compatible with Prometheus to + // safely set ResetHint to NO. This is not ensured currently. + // Sending a sample that triggers counter reset but with ResetHint==NO + // would lead to Prometheus panic as it does not double check the hint. + // Thus we're explicitly saying UNKNOWN here, which is always safe. + // TODO: using created time stamp should be accurate, but we + // need to know here if it was used for the detection. + // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 + // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 + ResetHint: prompb.Histogram_UNKNOWN, + Schema: scale, + + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: p.ZeroCount()}, + // TODO use zero_threshold, if set, see + // https://github.com/open-telemetry/opentelemetry-proto/pull/441 + ZeroThreshold: defaultZeroThreshold, + + PositiveSpans: pSpans, + PositiveDeltas: pDeltas, + NegativeSpans: nSpans, + NegativeDeltas: nDeltas, + + Timestamp: convertTimeStamp(p.Timestamp()), + } + + if p.Flags().NoRecordedValue() { + h.Sum = math.Float64frombits(value.StaleNaN) + h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN} + } else { + if p.HasSum() { + h.Sum = p.Sum() + } + h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()} + } + return h, nil +} + +// convertBucketsLayout translates OTel Exponential Histogram dense buckets +// representation to Prometheus Native Histogram sparse bucket representation. +// +// The translation logic is taken from the client_golang `histogram.go#makeBuckets` +// function, see `makeBuckets` https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go +// The bucket indexes conversion was adjusted, since OTel exp. histogram bucket +// index 0 corresponds to the range (1, base] while Prometheus bucket index 0 +// to the range (base 1]. +// +// scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one. +func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, scaleDown int32) ([]prompb.BucketSpan, []int64) { + bucketCounts := buckets.BucketCounts() + if bucketCounts.Len() == 0 { + return nil, nil + } + + var ( + spans []prompb.BucketSpan + deltas []int64 + count int64 + prevCount int64 + ) + + appendDelta := func(count int64) { + spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + // Let the compiler figure out that this is const during this function by + // moving it into a local variable. + numBuckets := bucketCounts.Len() + + // The offset is scaled and adjusted by 1 as described above. + bucketIdx := buckets.Offset()>>scaleDown + 1 + spans = append(spans, prompb.BucketSpan{ + Offset: bucketIdx, + Length: 0, + }) + + for i := 0; i < numBuckets; i++ { + // The offset is scaled and adjusted by 1 as described above. + nextBucketIdx := (int32(i)+buckets.Offset())>>scaleDown + 1 + if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. + count += int64(bucketCounts.At(i)) + continue + } + if count == 0 { + count = int64(bucketCounts.At(i)) + continue + } + + gap := nextBucketIdx - bucketIdx - 1 + if gap > 2 { + // We have to create a new span, because we have found a gap + // of more than two buckets. The constant 2 is copied from the logic in + // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 + spans = append(spans, prompb.BucketSpan{ + Offset: gap, + Length: 0, + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < gap; j++ { + appendDelta(0) + } + } + appendDelta(count) + count = int64(bucketCounts.At(i)) + bucketIdx = nextBucketIdx + } + // Need to use the last item's index. The offset is scaled and adjusted by 1 as described above. + gap := (int32(numBuckets)+buckets.Offset()-1)>>scaleDown + 1 - bucketIdx + if gap > 2 { + // We have to create a new span, because we have found a gap + // of more than two buckets. The constant 2 is copied from the logic in + // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 + spans = append(spans, prompb.BucketSpan{ + Offset: gap, + Length: 0, + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < gap; j++ { + appendDelta(0) + } + } + appendDelta(count) + + return spans, deltas +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/metadata.yaml b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/metadata.yaml new file mode 100644 index 00000000..79f2d095 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/metadata.yaml @@ -0,0 +1,3 @@ +status: + codeowners: + active: [Aneurysm9] \ No newline at end of file diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/metrics_to_prw.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/metrics_to_prw.go new file mode 100644 index 00000000..05d62498 --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/metrics_to_prw.go @@ -0,0 +1,201 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewrite // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite" + +import ( + "errors" + "fmt" + "sort" + "strconv" + + "github.com/prometheus/prometheus/prompb" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/multierr" + + prometheustranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" +) + +type Settings struct { + Namespace string + ExternalLabels map[string]string + DisableTargetInfo bool + ExportCreatedMetric bool + AddMetricSuffixes bool + SendMetadata bool +} + +// FromMetrics converts pmetric.Metrics to Prometheus remote write format. +func FromMetrics(md pmetric.Metrics, settings Settings) (map[string]*prompb.TimeSeries, error) { + c := newPrometheusConverter() + errs := c.fromMetrics(md, settings) + tss := c.timeSeries() + out := make(map[string]*prompb.TimeSeries, len(tss)) + for i := range tss { + out[strconv.Itoa(i)] = &tss[i] + } + + return out, errs +} + +// prometheusConverter converts from OTel write format to Prometheus write format. +type prometheusConverter struct { + unique map[uint64]*prompb.TimeSeries + conflicts map[uint64][]*prompb.TimeSeries +} + +func newPrometheusConverter() *prometheusConverter { + return &prometheusConverter{ + unique: map[uint64]*prompb.TimeSeries{}, + conflicts: map[uint64][]*prompb.TimeSeries{}, + } +} + +// fromMetrics converts pmetric.Metrics to Prometheus remote write format. +func (c *prometheusConverter) fromMetrics(md pmetric.Metrics, settings Settings) (errs error) { + resourceMetricsSlice := md.ResourceMetrics() + for i := 0; i < resourceMetricsSlice.Len(); i++ { + resourceMetrics := resourceMetricsSlice.At(i) + resource := resourceMetrics.Resource() + scopeMetricsSlice := resourceMetrics.ScopeMetrics() + // keep track of the most recent timestamp in the ResourceMetrics for + // use with the "target" info metric + var mostRecentTimestamp pcommon.Timestamp + for j := 0; j < scopeMetricsSlice.Len(); j++ { + metricSlice := scopeMetricsSlice.At(j).Metrics() + + // TODO: decide if instrumentation library information should be exported as labels + for k := 0; k < metricSlice.Len(); k++ { + metric := metricSlice.At(k) + mostRecentTimestamp = maxTimestamp(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) + + if !isValidAggregationTemporality(metric) { + errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name())) + continue + } + + promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) + + // handle individual metrics based on type + //exhaustive:enforce + switch metric.Type() { + case pmetric.MetricTypeGauge: + dataPoints := metric.Gauge().DataPoints() + if dataPoints.Len() == 0 { + errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break + } + c.addGaugeNumberDataPoints(dataPoints, resource, settings, promName) + case pmetric.MetricTypeSum: + dataPoints := metric.Sum().DataPoints() + if dataPoints.Len() == 0 { + errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break + } + c.addSumNumberDataPoints(dataPoints, resource, metric, settings, promName) + case pmetric.MetricTypeHistogram: + dataPoints := metric.Histogram().DataPoints() + if dataPoints.Len() == 0 { + errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break + } + c.addHistogramDataPoints(dataPoints, resource, settings, promName) + case pmetric.MetricTypeExponentialHistogram: + dataPoints := metric.ExponentialHistogram().DataPoints() + if dataPoints.Len() == 0 { + errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break + } + errs = multierr.Append(errs, c.addExponentialHistogramDataPoints( + dataPoints, + resource, + settings, + promName, + )) + case pmetric.MetricTypeSummary: + dataPoints := metric.Summary().DataPoints() + if dataPoints.Len() == 0 { + errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break + } + c.addSummaryDataPoints(dataPoints, resource, settings, promName) + default: + errs = multierr.Append(errs, errors.New("unsupported metric type")) + } + } + } + addResourceTargetInfo(resource, settings, mostRecentTimestamp, c) + } + + return +} + +// timeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format. +func (c *prometheusConverter) timeSeries() []prompb.TimeSeries { + conflicts := 0 + for _, ts := range c.conflicts { + conflicts += len(ts) + } + allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts) + for _, ts := range c.unique { + allTS = append(allTS, *ts) + } + for _, cTS := range c.conflicts { + for _, ts := range cTS { + allTS = append(allTS, *ts) + } + } + + return allTS +} + +func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool { + if len(ts.Labels) != len(lbls) { + return false + } + for i, l := range ts.Labels { + if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value { + return false + } + } + return true +} + +// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value, +// the exemplar is added to the bucket bound's time series, provided that the time series' has samples. +func (c *prometheusConverter) addExemplars(dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) { + if len(bucketBounds) == 0 { + return + } + + exemplars := getPromExemplars(dataPoint) + if len(exemplars) == 0 { + return + } + + sort.Sort(byBucketBoundsData(bucketBounds)) + for _, exemplar := range exemplars { + for _, bound := range bucketBounds { + if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound { + bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar) + break + } + } + } +} + +// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it. +// If there is no corresponding TimeSeries already, it's created. +// The corresponding TimeSeries is returned. +// If either lbls is nil/empty or sample is nil, nothing is done. +func (c *prometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries { + if sample == nil || len(lbls) == 0 { + // This shouldn't happen + return nil + } + + ts, _ := c.getOrCreateTimeSeries(lbls) + ts.Samples = append(ts.Samples, *sample) + return ts +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/number_data_points.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/number_data_points.go new file mode 100644 index 00000000..d128359f --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/number_data_points.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewrite // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite" + +import ( + "math" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/prompb" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func (c *prometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, + resource pcommon.Resource, settings Settings, name string) { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + labels := createAttributes( + resource, + pt.Attributes(), + settings.ExternalLabels, + nil, + true, + model.MetricNameLabel, + name, + ) + sample := &prompb.Sample{ + // convert ns to ms + Timestamp: convertTimeStamp(pt.Timestamp()), + } + switch pt.ValueType() { + case pmetric.NumberDataPointValueTypeInt: + sample.Value = float64(pt.IntValue()) + case pmetric.NumberDataPointValueTypeDouble: + sample.Value = pt.DoubleValue() + } + if pt.Flags().NoRecordedValue() { + sample.Value = math.Float64frombits(value.StaleNaN) + } + c.addSample(sample, labels) + } +} + +func (c *prometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + lbls := createAttributes( + resource, + pt.Attributes(), + settings.ExternalLabels, + nil, + true, + model.MetricNameLabel, + name, + ) + sample := &prompb.Sample{ + // convert ns to ms + Timestamp: convertTimeStamp(pt.Timestamp()), + } + switch pt.ValueType() { + case pmetric.NumberDataPointValueTypeInt: + sample.Value = float64(pt.IntValue()) + case pmetric.NumberDataPointValueTypeDouble: + sample.Value = pt.DoubleValue() + } + if pt.Flags().NoRecordedValue() { + sample.Value = math.Float64frombits(value.StaleNaN) + } + ts := c.addSample(sample, lbls) + if ts != nil { + exemplars := getPromExemplars[pmetric.NumberDataPoint](pt) + ts.Exemplars = append(ts.Exemplars, exemplars...) + } + + // add created time series if needed + if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() { + startTimestamp := pt.StartTimestamp() + if startTimestamp == 0 { + return + } + + createdLabels := make([]prompb.Label, len(lbls)) + copy(createdLabels, lbls) + for i, l := range createdLabels { + if l.Name == model.MetricNameLabel { + createdLabels[i].Value = name + createdSuffix + break + } + } + c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp()) + } + } +} diff --git a/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go new file mode 100644 index 00000000..046a713c --- /dev/null +++ b/src/otel-collector/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheusremotewrite // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite" + +import ( + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + "go.opentelemetry.io/collector/pdata/pmetric" + + prometheustranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" +) + +func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMetadata_MetricType { + // metric metadata can be used to support Prometheus types that don't exist + // in OpenTelemetry. + typeFromMetadata, hasTypeFromMetadata := otelMetric.Metadata().Get(prometheustranslator.MetricMetadataTypeKey) + switch otelMetric.Type() { + case pmetric.MetricTypeGauge: + if hasTypeFromMetadata && typeFromMetadata.Str() == string(model.MetricTypeUnknown) { + return prompb.MetricMetadata_UNKNOWN + } + return prompb.MetricMetadata_GAUGE + case pmetric.MetricTypeSum: + if otelMetric.Sum().IsMonotonic() { + return prompb.MetricMetadata_COUNTER + } + if hasTypeFromMetadata && typeFromMetadata.Str() == string(model.MetricTypeInfo) { + return prompb.MetricMetadata_INFO + } + if hasTypeFromMetadata && typeFromMetadata.Str() == string(model.MetricTypeStateset) { + return prompb.MetricMetadata_STATESET + } + return prompb.MetricMetadata_GAUGE + case pmetric.MetricTypeHistogram: + return prompb.MetricMetadata_HISTOGRAM + case pmetric.MetricTypeSummary: + return prompb.MetricMetadata_SUMMARY + case pmetric.MetricTypeExponentialHistogram: + return prompb.MetricMetadata_HISTOGRAM + } + return prompb.MetricMetadata_UNKNOWN +} + +func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes bool) []*prompb.MetricMetadata { + resourceMetricsSlice := md.ResourceMetrics() + + metadataLength := 0 + for i := 0; i < resourceMetricsSlice.Len(); i++ { + scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() + for j := 0; j < scopeMetricsSlice.Len(); j++ { + metadataLength += scopeMetricsSlice.At(j).Metrics().Len() + } + } + + var metadata = make([]*prompb.MetricMetadata, 0, metadataLength) + for i := 0; i < resourceMetricsSlice.Len(); i++ { + resourceMetrics := resourceMetricsSlice.At(i) + scopeMetricsSlice := resourceMetrics.ScopeMetrics() + + for j := 0; j < scopeMetricsSlice.Len(); j++ { + scopeMetrics := scopeMetricsSlice.At(j) + for k := 0; k < scopeMetrics.Metrics().Len(); k++ { + metric := scopeMetrics.Metrics().At(k) + entry := prompb.MetricMetadata{ + Type: otelMetricTypeToPromMetricType(metric), + MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes), + Help: metric.Description(), + } + metadata = append(metadata, &entry) + } + } + } + + return metadata +} diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/LICENSE b/src/otel-collector/vendor/github.com/prometheus/prometheus/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/NOTICE b/src/otel-collector/vendor/github.com/prometheus/prometheus/NOTICE new file mode 100644 index 00000000..8605c258 --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/NOTICE @@ -0,0 +1,108 @@ +The Prometheus systems and service monitoring server +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (https://soundcloud.com/). + + +The following components are included in this product: + +Bootstrap +https://getbootstrap.com +Copyright 2011-2014 Twitter, Inc. +Licensed under the MIT License + +bootstrap3-typeahead.js +https://github.com/bassjobsen/Bootstrap-3-Typeahead +Original written by @mdo and @fat +Copyright 2014 Bass Jobsen @bassjobsen +Licensed under the Apache License, Version 2.0 + +fuzzy +https://github.com/mattyork/fuzzy +Original written by @mattyork +Copyright 2012 Matt York +Licensed under the MIT License + +bootstrap-datetimepicker.js +https://github.com/Eonasdan/bootstrap-datetimepicker +Copyright 2015 Jonathan Peterson (@Eonasdan) +Licensed under the MIT License + +moment.js +https://github.com/moment/moment/ +Copyright JS Foundation and other contributors +Licensed under the MIT License + +Rickshaw +https://github.com/shutterstock/rickshaw +Copyright 2011-2014 by Shutterstock Images, LLC +See https://github.com/shutterstock/rickshaw/blob/master/LICENSE for license details + +mustache.js +https://github.com/janl/mustache.js +Copyright 2009 Chris Wanstrath (Ruby) +Copyright 2010-2014 Jan Lehnardt (JavaScript) +Copyright 2010-2015 The mustache.js community +Licensed under the MIT License + +jQuery +https://jquery.org +Copyright jQuery Foundation and other contributors +Licensed under the MIT License + +Protocol Buffers for Go with Gadgets +https://github.com/gogo/protobuf/ +Copyright (c) 2013, The GoGo Authors. +See source code for license details. + +Go support for leveled logs, analogous to +https://code.google.com/p/google-glog/ +Copyright 2013 Google Inc. +Licensed under the Apache License, Version 2.0 + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 + +DNS library in Go +https://miek.nl/2014/august/16/go-dns-package/ +Copyright 2009 The Go Authors, 2011 Miek Gieben +See https://github.com/miekg/dns/blob/master/LICENSE for license details. + +LevelDB key/value database in Go +https://github.com/syndtr/goleveldb +Copyright 2012 Suryandaru Triandana +See https://github.com/syndtr/goleveldb/blob/master/LICENSE for license details. + +gosnappy - a fork of code.google.com/p/snappy-go +https://github.com/syndtr/gosnappy +Copyright 2011 The Snappy-Go Authors +See https://github.com/syndtr/gosnappy/blob/master/LICENSE for license details. + +go-zookeeper - Native ZooKeeper client for Go +https://github.com/samuel/go-zookeeper +Copyright (c) 2013, Samuel Stauffer +See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details. + +Time series compression algorithm from Facebook's Gorilla paper +https://github.com/dgryski/go-tsz +Copyright (c) 2015,2016 Damian Gryski +See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details. + +The Go programming language +https://go.dev/ +Copyright (c) 2009 The Go Authors +See https://go.dev/LICENSE for license details. + +The Codicon icon font from Microsoft +https://github.com/microsoft/vscode-codicons +Copyright (c) Microsoft Corporation and other contributors +See https://github.com/microsoft/vscode-codicons/blob/main/LICENSE for license details. + +We also use code from a large number of npm packages. For details, see: +- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json +- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json +- The individual package licenses as copied from the node_modules directory can be found in + the npm_licenses.tar.bz2 archive in release tarballs and Docker images. diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go b/src/otel-collector/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go new file mode 100644 index 00000000..93458f64 --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go @@ -0,0 +1,34 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "math" + "time" +) + +// FromTime returns a new millisecond timestamp from a time. +func FromTime(t time.Time) int64 { + return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond) +} + +// Time returns a new time.Time object from a millisecond timestamp. +func Time(ts int64) time.Time { + return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC() +} + +// FromFloatSeconds returns a millisecond timestamp from float seconds. +func FromFloatSeconds(ts float64) int64 { + return int64(math.Round(ts * 1000)) +} diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/model/value/value.go b/src/otel-collector/vendor/github.com/prometheus/prometheus/model/value/value.go new file mode 100644 index 00000000..655ce852 --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/model/value/value.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package value + +import ( + "math" +) + +const ( + // NormalNaN is a quiet NaN. This is also math.NaN(). + NormalNaN uint64 = 0x7ff8000000000001 + + // StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0. + // This value is chosen with many leading 0s, so we have scope to store more + // complicated values in the future. It is 2 rather than 1 to make + // it easier to distinguish from the NormalNaN by a human when debugging. + StaleNaN uint64 = 0x7ff0000000000002 +) + +// IsStaleNaN returns true when the provided NaN value is a stale marker. +func IsStaleNaN(v float64) bool { + return math.Float64bits(v) == StaleNaN +} diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/README.md b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/README.md new file mode 100644 index 00000000..a33d7bfb --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/README.md @@ -0,0 +1,9 @@ +The compiled protobufs are version controlled and you won't normally need to +re-compile them when building Prometheus. + +If however you have modified the defs and do need to re-compile, run +`make proto` from the parent dir. + +In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in +your PATH. + diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/buf.lock b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/buf.lock new file mode 100644 index 00000000..30b0f084 --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/buf.lock @@ -0,0 +1,10 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: gogo + repository: protobuf + branch: main + commit: 4df00b267f944190a229ce3695781e99 + digest: b1-sjLgsg7CzrkOrIjBDh3s-l0aMjE6oqTj85-OsoopKAw= + create_time: 2021-08-10T00:14:28.345069Z diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/buf.yaml b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/buf.yaml new file mode 100644 index 00000000..0f7ec134 --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/buf.yaml @@ -0,0 +1,21 @@ +version: v1 +name: buf.build/prometheus/prometheus +lint: + ignore_only: + ENUM_VALUE_PREFIX: + - remote.proto + - types.proto + - io/prometheus/client/metrics.proto + ENUM_ZERO_VALUE_SUFFIX: + - remote.proto + - types.proto + - io/prometheus/client/metrics.proto + PACKAGE_DIRECTORY_MATCH: + - remote.proto + - types.proto + PACKAGE_VERSION_SUFFIX: + - remote.proto + - types.proto + - io/prometheus/client/metrics.proto +deps: + - buf.build/gogo/protobuf diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/custom.go b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/custom.go new file mode 100644 index 00000000..13d6e0f0 --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/custom.go @@ -0,0 +1,39 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prompb + +import ( + "sync" +) + +func (m Sample) T() int64 { return m.Timestamp } +func (m Sample) V() float64 { return m.Value } + +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { + size := r.Size() + data, ok := p.Get().(*[]byte) + if ok && cap(*data) >= size { + n, err := r.MarshalToSizedBuffer((*data)[:size]) + if err != nil { + return nil, err + } + return (*data)[:n], nil + } + return r.Marshal() +} diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go new file mode 100644 index 00000000..19318878 --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go @@ -0,0 +1,1702 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: remote.proto + +package prompb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ReadRequest_ResponseType int32 + +const ( + // Server will return a single ReadResponse message with matched series that includes list of raw samples. + // It's recommended to use streamed response types instead. + // + // Response headers: + // Content-Type: "application/x-protobuf" + // Content-Encoding: "snappy" + ReadRequest_SAMPLES ReadRequest_ResponseType = 0 + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. + // + // Response headers: + // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" + // Content-Encoding: "" + ReadRequest_STREAMED_XOR_CHUNKS ReadRequest_ResponseType = 1 +) + +var ReadRequest_ResponseType_name = map[int32]string{ + 0: "SAMPLES", + 1: "STREAMED_XOR_CHUNKS", +} + +var ReadRequest_ResponseType_value = map[string]int32{ + "SAMPLES": 0, + "STREAMED_XOR_CHUNKS": 1, +} + +func (x ReadRequest_ResponseType) String() string { + return proto.EnumName(ReadRequest_ResponseType_name, int32(x)) +} + +func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{1, 0} +} + +type WriteRequest struct { + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` + Metadata []MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{0} +} +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return m.Size() +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +func (m *WriteRequest) GetTimeseries() []TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +func (m *WriteRequest) GetMetadata() []MetricMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +// ReadRequest represents a remote read request. +type ReadRequest struct { + Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + // accepted_response_types allows negotiating the content type of the response. + // + // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is + // implemented by server, error is returned. + // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. + AcceptedResponseTypes []ReadRequest_ResponseType `protobuf:"varint,2,rep,packed,name=accepted_response_types,json=acceptedResponseTypes,proto3,enum=prometheus.ReadRequest_ResponseType" json:"accepted_response_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{1} +} +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +func (m *ReadRequest) GetQueries() []*Query { + if m != nil { + return m.Queries + } + return nil +} + +func (m *ReadRequest) GetAcceptedResponseTypes() []ReadRequest_ResponseType { + if m != nil { + return m.AcceptedResponseTypes + } + return nil +} + +// ReadResponse is a response when response_type equals SAMPLES. +type ReadResponse struct { + // In same order as the request's queries. + Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{2} +} +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(m, src) +} +func (m *ReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo + +func (m *ReadResponse) GetResults() []*QueryResult { + if m != nil { + return m.Results + } + return nil +} + +type Query struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` + Hints *ReadHints `protobuf:"bytes,4,opt,name=hints,proto3" json:"hints,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{3} +} +func (m *Query) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Query.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Query) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query.Merge(m, src) +} +func (m *Query) XXX_Size() int { + return m.Size() +} +func (m *Query) XXX_DiscardUnknown() { + xxx_messageInfo_Query.DiscardUnknown(m) +} + +var xxx_messageInfo_Query proto.InternalMessageInfo + +func (m *Query) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *Query) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *Query) GetMatchers() []*LabelMatcher { + if m != nil { + return m.Matchers + } + return nil +} + +func (m *Query) GetHints() *ReadHints { + if m != nil { + return m.Hints + } + return nil +} + +type QueryResult struct { + // Samples within a time series must be ordered by time. + Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} +func (*QueryResult) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{4} +} +func (m *QueryResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResult.Merge(m, src) +} +func (m *QueryResult) XXX_Size() int { + return m.Size() +} +func (m *QueryResult) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResult.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResult proto.InternalMessageInfo + +func (m *QueryResult) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. +// We strictly stream full series after series, optionally split by time. This means that a single frame can contain +// partition of the single series, but once a new series is started to be streamed it means that no more chunks will +// be sent for previous one. Series are returned sorted in the same way TSDB block are internally. +type ChunkedReadResponse struct { + ChunkedSeries []*ChunkedSeries `protobuf:"bytes,1,rep,name=chunked_series,json=chunkedSeries,proto3" json:"chunked_series,omitempty"` + // query_index represents an index of the query from ReadRequest.queries these chunks relates to. + QueryIndex int64 `protobuf:"varint,2,opt,name=query_index,json=queryIndex,proto3" json:"query_index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} } +func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) } +func (*ChunkedReadResponse) ProtoMessage() {} +func (*ChunkedReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{5} +} +func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkedReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkedReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkedReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkedReadResponse.Merge(m, src) +} +func (m *ChunkedReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ChunkedReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkedReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkedReadResponse proto.InternalMessageInfo + +func (m *ChunkedReadResponse) GetChunkedSeries() []*ChunkedSeries { + if m != nil { + return m.ChunkedSeries + } + return nil +} + +func (m *ChunkedReadResponse) GetQueryIndex() int64 { + if m != nil { + return m.QueryIndex + } + return 0 +} + +func init() { + proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value) + proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest") + proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest") + proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse") + proto.RegisterType((*Query)(nil), "prometheus.Query") + proto.RegisterType((*QueryResult)(nil), "prometheus.QueryResult") + proto.RegisterType((*ChunkedReadResponse)(nil), "prometheus.ChunkedReadResponse") +} + +func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) } + +var fileDescriptor_eefc82927d57d89b = []byte{ + // 496 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xee, 0x26, 0x69, 0x13, 0x8d, 0x43, 0x14, 0xb6, 0x2d, 0x09, 0x39, 0xa4, 0x91, 0xc5, 0x21, + 0x52, 0x51, 0x10, 0xa1, 0xe2, 0xd4, 0x03, 0x69, 0x89, 0x54, 0xa0, 0xe6, 0x67, 0x13, 0x04, 0x42, + 0x48, 0xd6, 0xc6, 0x1e, 0x35, 0x16, 0xf5, 0x4f, 0x77, 0xd7, 0x52, 0xf3, 0x16, 0x3c, 0x13, 0xa7, + 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x2d, 0x5c, 0xb8, 0xad, 0xbf, 0x3f, + 0xcf, 0xcc, 0xce, 0x42, 0x53, 0x60, 0x18, 0x2b, 0x1c, 0x25, 0x22, 0x56, 0x31, 0x85, 0x44, 0xc4, + 0x21, 0xaa, 0x25, 0xa6, 0xb2, 0x67, 0xa9, 0x55, 0x82, 0x32, 0x27, 0x7a, 0x7b, 0x17, 0xf1, 0x45, + 0xac, 0x8f, 0x8f, 0xb2, 0x53, 0x8e, 0xda, 0x5f, 0x09, 0x34, 0x3f, 0x88, 0x40, 0x21, 0xc3, 0xab, + 0x14, 0xa5, 0xa2, 0xc7, 0x00, 0x2a, 0x08, 0x51, 0xa2, 0x08, 0x50, 0x76, 0xc9, 0xa0, 0x3a, 0xb4, + 0xc6, 0xf7, 0x46, 0x7f, 0x42, 0x47, 0xf3, 0x20, 0xc4, 0x99, 0x66, 0x4f, 0x6a, 0x37, 0x3f, 0x0f, + 0xb6, 0x98, 0xa1, 0xa7, 0xc7, 0xd0, 0x08, 0x51, 0x71, 0x9f, 0x2b, 0xde, 0xad, 0x6a, 0x6f, 0xcf, + 0xf4, 0x3a, 0xa8, 0x44, 0xe0, 0x39, 0x85, 0xa2, 0xf0, 0x6f, 0x1c, 0x2f, 0x6b, 0x8d, 0x4a, 0xbb, + 0x6a, 0x7f, 0x27, 0x60, 0x31, 0xe4, 0x7e, 0x59, 0xd1, 0x21, 0xd4, 0xaf, 0x52, 0xb3, 0x9c, 0xbb, + 0x66, 0xe4, 0xbb, 0x14, 0xc5, 0x8a, 0x95, 0x0a, 0xfa, 0x19, 0x3a, 0xdc, 0xf3, 0x30, 0x51, 0xe8, + 0xbb, 0x02, 0x65, 0x12, 0x47, 0x12, 0x5d, 0x3d, 0x86, 0x6e, 0x65, 0x50, 0x1d, 0xb6, 0xc6, 0x0f, + 0x4c, 0xb3, 0xf1, 0x9b, 0x11, 0x2b, 0xd4, 0xf3, 0x55, 0x82, 0x6c, 0xbf, 0x0c, 0x31, 0x51, 0x69, + 0x1f, 0x41, 0xd3, 0x04, 0xa8, 0x05, 0xf5, 0xd9, 0xc4, 0x79, 0x7b, 0x3e, 0x9d, 0xb5, 0xb7, 0x68, + 0x07, 0x76, 0x67, 0x73, 0x36, 0x9d, 0x38, 0xd3, 0xe7, 0xee, 0xc7, 0x37, 0xcc, 0x3d, 0x3d, 0x7b, + 0xff, 0xfa, 0xd5, 0xac, 0x4d, 0xec, 0x49, 0xe6, 0xe2, 0x9b, 0x28, 0xfa, 0x18, 0xea, 0x02, 0x65, + 0x7a, 0xa9, 0xca, 0x86, 0x3a, 0xff, 0x36, 0xa4, 0x79, 0x56, 0xea, 0xec, 0x6f, 0x04, 0xb6, 0x35, + 0x41, 0x1f, 0x02, 0x95, 0x8a, 0x0b, 0xe5, 0xea, 0xa9, 0x2b, 0x1e, 0x26, 0x6e, 0x98, 0xe5, 0x90, + 0x61, 0x95, 0xb5, 0x35, 0x33, 0x2f, 0x09, 0x47, 0xd2, 0x21, 0xb4, 0x31, 0xf2, 0x6f, 0x6b, 0x2b, + 0x5a, 0xdb, 0xc2, 0xc8, 0x37, 0x95, 0x47, 0xd0, 0x08, 0xb9, 0xf2, 0x96, 0x28, 0x64, 0x71, 0x73, + 0x5d, 0xb3, 0xaa, 0x73, 0xbe, 0xc0, 0x4b, 0x27, 0x17, 0xb0, 0x8d, 0x92, 0x1e, 0xc2, 0xf6, 0x32, + 0x88, 0x94, 0xec, 0xd6, 0x06, 0x64, 0x68, 0x8d, 0xf7, 0xff, 0x1e, 0xee, 0x59, 0x46, 0xb2, 0x5c, + 0x63, 0x4f, 0xc1, 0x32, 0x9a, 0xa3, 0x4f, 0xff, 0x7f, 0xd3, 0xcc, 0x1d, 0xb3, 0xaf, 0x61, 0xf7, + 0x74, 0x99, 0x46, 0x5f, 0xb2, 0xcb, 0x31, 0xa6, 0xfa, 0x0c, 0x5a, 0x5e, 0x0e, 0xbb, 0xb7, 0x22, + 0xef, 0x9b, 0x91, 0x85, 0xb1, 0x48, 0xbd, 0xe3, 0x99, 0x9f, 0xf4, 0x00, 0xac, 0x6c, 0x8d, 0x56, + 0x6e, 0x10, 0xf9, 0x78, 0x5d, 0xcc, 0x09, 0x34, 0xf4, 0x22, 0x43, 0x4e, 0xf6, 0x6e, 0xd6, 0x7d, + 0xf2, 0x63, 0xdd, 0x27, 0xbf, 0xd6, 0x7d, 0xf2, 0x69, 0x27, 0xcb, 0x4d, 0x16, 0x8b, 0x1d, 0xfd, + 0x92, 0x9e, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x13, 0x18, 0x12, 0x0a, 0x88, 0x03, 0x00, 0x00, +} + +func (m *WriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Metadata) > 0 { + for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ReadRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.AcceptedResponseTypes) > 0 { + dAtA2 := make([]byte, len(m.AcceptedResponseTypes)*10) + var j1 int + for _, num := range m.AcceptedResponseTypes { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintRemote(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x12 + } + if len(m.Queries) > 0 { + for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Query) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Hints != nil { + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.EndTimestampMs != 0 { + i = encodeVarintRemote(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.StartTimestampMs != 0 { + i = encodeVarintRemote(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ChunkedReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkedReadResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkedReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.QueryIndex != 0 { + i = encodeVarintRemote(dAtA, i, uint64(m.QueryIndex)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChunkedSeries) > 0 { + for iNdEx := len(m.ChunkedSeries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ChunkedSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintRemote(dAtA []byte, offset int, v uint64) int { + offset -= sovRemote(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *WriteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if len(m.Metadata) > 0 { + for _, e := range m.Metadata { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Queries) > 0 { + for _, e := range m.Queries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if len(m.AcceptedResponseTypes) > 0 { + l = 0 + for _, e := range m.AcceptedResponseTypes { + l += sovRemote(uint64(e)) + } + n += 1 + sovRemote(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovRemote(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovRemote(uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRemote(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *QueryResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChunkedReadResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChunkedSeries) > 0 { + for _, e := range m.ChunkedSeries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.QueryIndex != 0 { + n += 1 + sovRemote(uint64(m.QueryIndex)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRemote(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRemote(x uint64) (n int) { + return sovRemote(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *WriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata, MetricMetadata{}) + if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Queries = append(m.Queries, &Query{}) + if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType == 0 { + var v ReadRequest_ResponseType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= ReadRequest_ResponseType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.AcceptedResponseTypes) == 0 { + m.AcceptedResponseTypes = make([]ReadRequest_ResponseType, 0, elementCount) + } + for iNdEx < postIndex { + var v ReadRequest_ResponseType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= ReadRequest_ResponseType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedResponseTypes", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &QueryResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Query) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Query: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Query: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hints == nil { + m.Hints = &ReadHints{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, &TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChunkedReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkedReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkedReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkedSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkedSeries = append(m.ChunkedSeries, &ChunkedSeries{}) + if err := m.ChunkedSeries[len(m.ChunkedSeries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryIndex", wireType) + } + m.QueryIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryIndex |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRemote(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRemote + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRemote + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRemote + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRemote = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRemote = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRemote = fmt.Errorf("proto: unexpected end of group") +) diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/remote.proto b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/remote.proto new file mode 100644 index 00000000..b4f82f5f --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/remote.proto @@ -0,0 +1,88 @@ +// Copyright 2016 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package prometheus; + +option go_package = "prompb"; + +import "types.proto"; +import "gogoproto/gogo.proto"; + +message WriteRequest { + repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; + // Cortex uses this field to determine the source of the write request. + // We reserve it to avoid any compatibility issues. + reserved 2; + repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false]; +} + +// ReadRequest represents a remote read request. +message ReadRequest { + repeated Query queries = 1; + + enum ResponseType { + // Server will return a single ReadResponse message with matched series that includes list of raw samples. + // It's recommended to use streamed response types instead. + // + // Response headers: + // Content-Type: "application/x-protobuf" + // Content-Encoding: "snappy" + SAMPLES = 0; + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. + // + // Response headers: + // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" + // Content-Encoding: "" + STREAMED_XOR_CHUNKS = 1; + } + + // accepted_response_types allows negotiating the content type of the response. + // + // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is + // implemented by server, error is returned. + // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. + repeated ResponseType accepted_response_types = 2; +} + +// ReadResponse is a response when response_type equals SAMPLES. +message ReadResponse { + // In same order as the request's queries. + repeated QueryResult results = 1; +} + +message Query { + int64 start_timestamp_ms = 1; + int64 end_timestamp_ms = 2; + repeated prometheus.LabelMatcher matchers = 3; + prometheus.ReadHints hints = 4; +} + +message QueryResult { + // Samples within a time series must be ordered by time. + repeated prometheus.TimeSeries timeseries = 1; +} + +// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. +// We strictly stream full series after series, optionally split by time. This means that a single frame can contain +// partition of the single series, but once a new series is started to be streamed it means that no more chunks will +// be sent for previous one. Series are returned sorted in the same way TSDB block are internally. +message ChunkedReadResponse { + repeated prometheus.ChunkedSeries chunked_series = 1; + + // query_index represents an index of the query from ReadRequest.queries these chunks relates to. + int64 query_index = 2; +} diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/types.pb.go new file mode 100644 index 00000000..93883daa --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -0,0 +1,4440 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: types.proto + +package prompb + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricMetadata_MetricType int32 + +const ( + MetricMetadata_UNKNOWN MetricMetadata_MetricType = 0 + MetricMetadata_COUNTER MetricMetadata_MetricType = 1 + MetricMetadata_GAUGE MetricMetadata_MetricType = 2 + MetricMetadata_HISTOGRAM MetricMetadata_MetricType = 3 + MetricMetadata_GAUGEHISTOGRAM MetricMetadata_MetricType = 4 + MetricMetadata_SUMMARY MetricMetadata_MetricType = 5 + MetricMetadata_INFO MetricMetadata_MetricType = 6 + MetricMetadata_STATESET MetricMetadata_MetricType = 7 +) + +var MetricMetadata_MetricType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "COUNTER", + 2: "GAUGE", + 3: "HISTOGRAM", + 4: "GAUGEHISTOGRAM", + 5: "SUMMARY", + 6: "INFO", + 7: "STATESET", +} + +var MetricMetadata_MetricType_value = map[string]int32{ + "UNKNOWN": 0, + "COUNTER": 1, + "GAUGE": 2, + "HISTOGRAM": 3, + "GAUGEHISTOGRAM": 4, + "SUMMARY": 5, + "INFO": 6, + "STATESET": 7, +} + +func (x MetricMetadata_MetricType) String() string { + return proto.EnumName(MetricMetadata_MetricType_name, int32(x)) +} + +func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0, 0} +} + +type Histogram_ResetHint int32 + +const ( + Histogram_UNKNOWN Histogram_ResetHint = 0 + Histogram_YES Histogram_ResetHint = 1 + Histogram_NO Histogram_ResetHint = 2 + Histogram_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "UNKNOWN", + 1: "YES", + 2: "NO", + 3: "GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "UNKNOWN": 0, + "YES": 1, + "NO": 2, + "GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3, 0} +} + +type LabelMatcher_Type int32 + +const ( + LabelMatcher_EQ LabelMatcher_Type = 0 + LabelMatcher_NEQ LabelMatcher_Type = 1 + LabelMatcher_RE LabelMatcher_Type = 2 + LabelMatcher_NRE LabelMatcher_Type = 3 +) + +var LabelMatcher_Type_name = map[int32]string{ + 0: "EQ", + 1: "NEQ", + 2: "RE", + 3: "NRE", +} + +var LabelMatcher_Type_value = map[string]int32{ + "EQ": 0, + "NEQ": 1, + "RE": 2, + "NRE": 3, +} + +func (x LabelMatcher_Type) String() string { + return proto.EnumName(LabelMatcher_Type_name, int32(x)) +} + +func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{8, 0} +} + +// We require this to match chunkenc.Encoding. +type Chunk_Encoding int32 + +const ( + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 + Chunk_HISTOGRAM Chunk_Encoding = 2 + Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3 +) + +var Chunk_Encoding_name = map[int32]string{ + 0: "UNKNOWN", + 1: "XOR", + 2: "HISTOGRAM", + 3: "FLOAT_HISTOGRAM", +} + +var Chunk_Encoding_value = map[string]int32{ + "UNKNOWN": 0, + "XOR": 1, + "HISTOGRAM": 2, + "FLOAT_HISTOGRAM": 3, +} + +func (x Chunk_Encoding) String() string { + return proto.EnumName(Chunk_Encoding_name, int32(x)) +} + +func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{10, 0} +} + +type MetricMetadata struct { + // Represents the metric type, these match the set from Prometheus. + // Refer to github.com/prometheus/common/model/metadata.go for details. + Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"` + MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` + Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricMetadata) Reset() { *m = MetricMetadata{} } +func (m *MetricMetadata) String() string { return proto.CompactTextString(m) } +func (*MetricMetadata) ProtoMessage() {} +func (*MetricMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0} +} +func (m *MetricMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricMetadata.Merge(m, src) +} +func (m *MetricMetadata) XXX_Size() int { + return m.Size() +} +func (m *MetricMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MetricMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo + +func (m *MetricMetadata) GetType() MetricMetadata_MetricType { + if m != nil { + return m.Type + } + return MetricMetadata_UNKNOWN +} + +func (m *MetricMetadata) GetMetricFamilyName() string { + if m != nil { + return m.MetricFamilyName + } + return "" +} + +func (m *MetricMetadata) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricMetadata) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +type Sample struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{1} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +type Exemplar struct { + // Optional, can be empty. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{2} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_UNKNOWN +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{4} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +// TimeSeries represents samples and labels for a single time series. +type TimeSeries struct { + // For a timeseries to be valid, and for the samples and exemplars + // to be ingested by the remote system properly, the labels field is required. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{5} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +func (m *TimeSeries) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +func (m *TimeSeries) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + +type Label struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Label) Reset() { *m = Label{} } +func (m *Label) String() string { return proto.CompactTextString(m) } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{6} +} +func (m *Label) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Label.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Label) XXX_Merge(src proto.Message) { + xxx_messageInfo_Label.Merge(m, src) +} +func (m *Label) XXX_Size() int { + return m.Size() +} +func (m *Label) XXX_DiscardUnknown() { + xxx_messageInfo_Label.DiscardUnknown(m) +} + +var xxx_messageInfo_Label proto.InternalMessageInfo + +func (m *Label) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Label) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type Labels struct { + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Labels) Reset() { *m = Labels{} } +func (m *Labels) String() string { return proto.CompactTextString(m) } +func (*Labels) ProtoMessage() {} +func (*Labels) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{7} +} +func (m *Labels) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Labels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Labels.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Labels) XXX_Merge(src proto.Message) { + xxx_messageInfo_Labels.Merge(m, src) +} +func (m *Labels) XXX_Size() int { + return m.Size() +} +func (m *Labels) XXX_DiscardUnknown() { + xxx_messageInfo_Labels.DiscardUnknown(m) +} + +var xxx_messageInfo_Labels proto.InternalMessageInfo + +func (m *Labels) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +// Matcher specifies a rule, which can match or set of labels or not. +type LabelMatcher struct { + Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.LabelMatcher_Type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{8} +} +func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(m, src) +} +func (m *LabelMatcher) XXX_Size() int { + return m.Size() +} +func (m *LabelMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo + +func (m *LabelMatcher) GetType() LabelMatcher_Type { + if m != nil { + return m.Type + } + return LabelMatcher_EQ +} + +func (m *LabelMatcher) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelMatcher) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type ReadHints struct { + StepMs int64 `protobuf:"varint,1,opt,name=step_ms,json=stepMs,proto3" json:"step_ms,omitempty"` + Func string `protobuf:"bytes,2,opt,name=func,proto3" json:"func,omitempty"` + StartMs int64 `protobuf:"varint,3,opt,name=start_ms,json=startMs,proto3" json:"start_ms,omitempty"` + EndMs int64 `protobuf:"varint,4,opt,name=end_ms,json=endMs,proto3" json:"end_ms,omitempty"` + Grouping []string `protobuf:"bytes,5,rep,name=grouping,proto3" json:"grouping,omitempty"` + By bool `protobuf:"varint,6,opt,name=by,proto3" json:"by,omitempty"` + RangeMs int64 `protobuf:"varint,7,opt,name=range_ms,json=rangeMs,proto3" json:"range_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadHints) Reset() { *m = ReadHints{} } +func (m *ReadHints) String() string { return proto.CompactTextString(m) } +func (*ReadHints) ProtoMessage() {} +func (*ReadHints) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{9} +} +func (m *ReadHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadHints.Merge(m, src) +} +func (m *ReadHints) XXX_Size() int { + return m.Size() +} +func (m *ReadHints) XXX_DiscardUnknown() { + xxx_messageInfo_ReadHints.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadHints proto.InternalMessageInfo + +func (m *ReadHints) GetStepMs() int64 { + if m != nil { + return m.StepMs + } + return 0 +} + +func (m *ReadHints) GetFunc() string { + if m != nil { + return m.Func + } + return "" +} + +func (m *ReadHints) GetStartMs() int64 { + if m != nil { + return m.StartMs + } + return 0 +} + +func (m *ReadHints) GetEndMs() int64 { + if m != nil { + return m.EndMs + } + return 0 +} + +func (m *ReadHints) GetGrouping() []string { + if m != nil { + return m.Grouping + } + return nil +} + +func (m *ReadHints) GetBy() bool { + if m != nil { + return m.By + } + return false +} + +func (m *ReadHints) GetRangeMs() int64 { + if m != nil { + return m.RangeMs + } + return 0 +} + +// Chunk represents a TSDB chunk. +// Time range [min, max] is inclusive. +type Chunk struct { + MinTimeMs int64 `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"` + MaxTimeMs int64 `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"` + Type Chunk_Encoding `protobuf:"varint,3,opt,name=type,proto3,enum=prometheus.Chunk_Encoding" json:"type,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Chunk) Reset() { *m = Chunk{} } +func (m *Chunk) String() string { return proto.CompactTextString(m) } +func (*Chunk) ProtoMessage() {} +func (*Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{10} +} +func (m *Chunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(m, src) +} +func (m *Chunk) XXX_Size() int { + return m.Size() +} +func (m *Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_Chunk proto.InternalMessageInfo + +func (m *Chunk) GetMinTimeMs() int64 { + if m != nil { + return m.MinTimeMs + } + return 0 +} + +func (m *Chunk) GetMaxTimeMs() int64 { + if m != nil { + return m.MaxTimeMs + } + return 0 +} + +func (m *Chunk) GetType() Chunk_Encoding { + if m != nil { + return m.Type + } + return Chunk_UNKNOWN +} + +func (m *Chunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// ChunkedSeries represents single, encoded time series. +type ChunkedSeries struct { + // Labels should be sorted. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + // Chunks will be in start time order and may overlap. + Chunks []Chunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } +func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } +func (*ChunkedSeries) ProtoMessage() {} +func (*ChunkedSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{11} +} +func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkedSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkedSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkedSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkedSeries.Merge(m, src) +} +func (m *ChunkedSeries) XXX_Size() int { + return m.Size() +} +func (m *ChunkedSeries) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkedSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkedSeries proto.InternalMessageInfo + +func (m *ChunkedSeries) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ChunkedSeries) GetChunks() []Chunk { + if m != nil { + return m.Chunks + } + return nil +} + +func init() { + proto.RegisterEnum("prometheus.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) + proto.RegisterEnum("prometheus.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) + proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) + proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) + proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") + proto.RegisterType((*Sample)(nil), "prometheus.Sample") + proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar") + proto.RegisterType((*Histogram)(nil), "prometheus.Histogram") + proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan") + proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") + proto.RegisterType((*Label)(nil), "prometheus.Label") + proto.RegisterType((*Labels)(nil), "prometheus.Labels") + proto.RegisterType((*LabelMatcher)(nil), "prometheus.LabelMatcher") + proto.RegisterType((*ReadHints)(nil), "prometheus.ReadHints") + proto.RegisterType((*Chunk)(nil), "prometheus.Chunk") + proto.RegisterType((*ChunkedSeries)(nil), "prometheus.ChunkedSeries") +} + +func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } + +var fileDescriptor_d938547f84707355 = []byte{ + // 1092 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, + 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, + 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, + 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, + 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, + 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, + 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, + 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, + 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, + 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, + 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, + 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, + 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, + 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, + 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, + 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, + 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, + 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, + 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, + 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, + 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, + 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, + 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, + 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, + 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, + 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, + 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, + 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, + 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, + 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, + 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, + 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, + 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, + 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, + 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, + 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, + 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, + 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, + 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, + 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, + 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, + 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, + 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, + 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, + 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, + 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, + 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, + 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, + 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, + 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, + 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, + 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, + 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, + 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, + 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, + 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, + 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, + 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, + 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, + 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, + 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, + 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, + 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, + 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, + 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, + 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, + 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, + 0x13, 0x09, 0x00, 0x00, +} + +func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x22 + } + if len(m.MetricFamilyName) > 0 { + i -= len(m.MetricFamilyName) + copy(dAtA[i:], m.MetricFamilyName) + i = encodeVarintTypes(dAtA, i, uint64(len(m.MetricFamilyName))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j2 int + dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x3 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x3 >= 1<<7 { + dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) + j2++ + x3 >>= 7 + } + dAtA4[j2] = uint8(x3) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA4[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j6 int + dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintTypes(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Labels) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Labels) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReadHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RangeMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RangeMs)) + i-- + dAtA[i] = 0x38 + } + if m.By { + i-- + if m.By { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.Grouping) > 0 { + for iNdEx := len(m.Grouping) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Grouping[iNdEx]) + copy(dAtA[i:], m.Grouping[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Grouping[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.EndMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.EndMs)) + i-- + dAtA[i] = 0x20 + } + if m.StartMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.StartMs)) + i-- + dAtA[i] = 0x18 + } + if len(m.Func) > 0 { + i -= len(m.Func) + copy(dAtA[i:], m.Func) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Func))) + i-- + dAtA[i] = 0x12 + } + if m.StepMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.StepMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Chunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.MaxTimeMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTimeMs)) + i-- + dAtA[i] = 0x10 + } + if m.MinTimeMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MinTimeMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ChunkedSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkedSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkedSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MetricMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.MetricFamilyName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Label) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Labels) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LabelMatcher) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StepMs != 0 { + n += 1 + sovTypes(uint64(m.StepMs)) + } + l = len(m.Func) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.StartMs != 0 { + n += 1 + sovTypes(uint64(m.StartMs)) + } + if m.EndMs != 0 { + n += 1 + sovTypes(uint64(m.EndMs)) + } + if len(m.Grouping) > 0 { + for _, s := range m.Grouping { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.By { + n += 2 + } + if m.RangeMs != 0 { + n += 1 + sovTypes(uint64(m.RangeMs)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Chunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTimeMs != 0 { + n += 1 + sovTypes(uint64(m.MinTimeMs)) + } + if m.MaxTimeMs != 0 { + n += 1 + sovTypes(uint64(m.MaxTimeMs)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChunkedSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MetricMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricMetadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricFamilyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricFamilyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Label) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Label: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Labels) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Labels: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Labels: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelMatcher) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= LabelMatcher_Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StepMs", wireType) + } + m.StepMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StepMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Func", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Func = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartMs", wireType) + } + m.StartMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndMs", wireType) + } + m.EndMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Grouping", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Grouping = append(m.Grouping, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field By", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.By = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeMs", wireType) + } + m.RangeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RangeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Chunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTimeMs", wireType) + } + m.MinTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTimeMs", wireType) + } + m.MaxTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Chunk_Encoding(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChunkedSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkedSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkedSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, Chunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/types.proto b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/types.proto new file mode 100644 index 00000000..61fc1e01 --- /dev/null +++ b/src/otel-collector/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package prometheus; + +option go_package = "prompb"; + +import "gogoproto/gogo.proto"; + +message MetricMetadata { + enum MetricType { + UNKNOWN = 0; + COUNTER = 1; + GAUGE = 2; + HISTOGRAM = 3; + GAUGEHISTOGRAM = 4; + SUMMARY = 5; + INFO = 6; + STATESET = 7; + } + + // Represents the metric type, these match the set from Prometheus. + // Refer to github.com/prometheus/common/model/metadata.go for details. + MetricType type = 1; + string metric_family_name = 2; + string help = 4; + string unit = 5; +} + +message Sample { + double value = 1; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 2; +} + +message Exemplar { + // Optional, can be empty. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + double value = 2; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 3; +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + UNKNOWN = 0; // Need to test for a counter reset explicitly. + YES = 1; // This is the 1st histogram after a counter reset. + NO = 2; // There was no counter reset between this and the previous Histogram. + GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 15; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + +// TimeSeries represents samples and labels for a single time series. +message TimeSeries { + // For a timeseries to be valid, and for the samples and exemplars + // to be ingested by the remote system properly, the labels field is required. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; +} + +message Label { + string name = 1; + string value = 2; +} + +message Labels { + repeated Label labels = 1 [(gogoproto.nullable) = false]; +} + +// Matcher specifies a rule, which can match or set of labels or not. +message LabelMatcher { + enum Type { + EQ = 0; + NEQ = 1; + RE = 2; + NRE = 3; + } + Type type = 1; + string name = 2; + string value = 3; +} + +message ReadHints { + int64 step_ms = 1; // Query step size in milliseconds. + string func = 2; // String representation of surrounding function or aggregation. + int64 start_ms = 3; // Start time in milliseconds. + int64 end_ms = 4; // End time in milliseconds. + repeated string grouping = 5; // List of label names used in aggregation. + bool by = 6; // Indicate whether it is without or by. + int64 range_ms = 7; // Range vector selector range in milliseconds. +} + +// Chunk represents a TSDB chunk. +// Time range [min, max] is inclusive. +message Chunk { + int64 min_time_ms = 1; + int64 max_time_ms = 2; + + // We require this to match chunkenc.Encoding. + enum Encoding { + UNKNOWN = 0; + XOR = 1; + HISTOGRAM = 2; + FLOAT_HISTOGRAM = 3; + } + Encoding type = 3; + bytes data = 4; +} + +// ChunkedSeries represents single, encoded time series. +message ChunkedSeries { + // Labels should be sorted. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + // Chunks will be in start time order and may overlap. + repeated Chunk chunks = 2 [(gogoproto.nullable) = false]; +} diff --git a/src/otel-collector/vendor/github.com/tidwall/gjson/LICENSE b/src/otel-collector/vendor/github.com/tidwall/gjson/LICENSE new file mode 100644 index 00000000..58f5819a --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/gjson/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/otel-collector/vendor/github.com/tidwall/gjson/README.md b/src/otel-collector/vendor/github.com/tidwall/gjson/README.md new file mode 100644 index 00000000..f4898478 --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/gjson/README.md @@ -0,0 +1,488 @@ +

+GJSON +
+GoDoc +GJSON Playground +

+ +

get json values quickly

+ +GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document. +It has features such as [one line retrieval](#get-a-value), [dot notation paths](#path-syntax), [iteration](#iterate-through-an-object-or-array), and [parsing json lines](#json-lines). + +Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool. + +Getting Started +=============== + +## Installing + +To start using GJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/gjson +``` + +This will retrieve the library. + +## Get a value +Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". When the value is found it's returned immediately. + +```go +package main + +import "github.com/tidwall/gjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value := gjson.Get(json, "name.last") + println(value.String()) +} +``` + +This will print: + +``` +Prichard +``` +*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.* + +## Path Syntax + +Below is a quick overview of the path syntax, for more complete information please +check out [GJSON Syntax](SYNTAX.md). + +A path is a series of keys separated by a dot. +A key may contain special wildcard characters '\*' and '?'. +To access an array value use the index as the key. +To get the number of elements in an array or to access a child path, use the '#' character. +The dot and wildcard characters can be escaped with '\\'. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children" >> ["Sara","Alex","Jack"] +"children.#" >> 3 +"children.1" >> "Alex" +"child*.2" >> "Jack" +"c?ildren.0" >> "Sara" +"fav\.movie" >> "Deer Hunter" +"friends.#.first" >> ["Dale","Roger","Jane"] +"friends.1.last" >> "Craig" +``` + +You can also query an array for the first match by using `#(...)`, or find all +matches with `#(...)#`. Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` +comparison operators and the simple pattern matching `%` (like) and `!%` +(not like) operators. + +``` +friends.#(last=="Murphy").first >> "Dale" +friends.#(last=="Murphy")#.first >> ["Dale","Jane"] +friends.#(age>45)#.last >> ["Craig","Murphy"] +friends.#(first%"D*").last >> "Murphy" +friends.#(first!%"D*").last >> "Craig" +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new +[multipath](SYNTAX.md#multipaths) syntax. For backwards compatibility, +`#[...]` will continue to work until the next major release.* + +## Result Type + +GJSON supports the json types `string`, `number`, `bool`, and `null`. +Arrays and Objects are returned as their raw json types. + +The `Result` type holds one of these: + +``` +bool, for JSON booleans +float64, for JSON numbers +string, for JSON string literals +nil, for JSON null +``` + +To directly access the value: + +```go +result.Type // can be String, Number, True, False, Null, or JSON +result.Str // holds the string +result.Num // holds the float64 number +result.Raw // holds the raw json +result.Index // index of raw value in original json, zero means index unknown +result.Indexes // indexes of all the elements that match on a path containing the '#' query character. +``` + +There are a variety of handy functions that work on a result: + +```go +result.Exists() bool +result.Value() interface{} +result.Int() int64 +result.Uint() uint64 +result.Float() float64 +result.String() string +result.Bool() bool +result.Time() time.Time +result.Array() []gjson.Result +result.Map() map[string]gjson.Result +result.Get(path string) Result +result.ForEach(iterator func(key, value Result) bool) +result.Less(token Result, caseSensitive bool) bool +``` + +The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types: + +```go +boolean >> bool +number >> float64 +string >> string +null >> nil +array >> []interface{} +object >> map[string]interface{} +``` + +The `result.Array()` function returns back an array of values. +If the result represents a non-existent value, then an empty array will be returned. +If the result is not a JSON array, the return value will be an array containing one result. + +### 64-bit integers + +The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, allowing for large JSON integers. + +```go +result.Int() int64 // -9223372036854775808 to 9223372036854775807 +result.Uint() int64 // 0 to 18446744073709551615 +``` + +## Modifiers and path chaining + +New in version 1.2 is support for modifier functions and path chaining. + +A modifier is a path component that performs custom processing on the +json. + +Multiple paths can be "chained" together using the pipe character. +This is useful for getting results from a modified query. + +For example, using the built-in `@reverse` modifier on the above json document, +we'll get `children` array and reverse the order: + +``` +"children|@reverse" >> ["Jack","Alex","Sara"] +"children|@reverse|0" >> "Jack" +``` + +There are currently the following built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from a json document. +- `@pretty`: Make the json document more human readable. +- `@this`: Returns the current element. It can be used to retrieve the root element. +- `@valid`: Ensure the json document is valid. +- `@flatten`: Flattens an array. +- `@join`: Joins multiple objects into a single object. +- `@keys`: Returns an array of keys for an object. +- `@values`: Returns an array of values for an object. + +### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON +document or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier that makes the entire json document upper +or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +``` + +``` +"children|@case:upper" >> ["SARA","ALEX","JACK"] +"children|@case:lower|@reverse" >> ["jack","alex","sara"] +``` + +## JSON Lines + +There's support for [JSON Lines](http://jsonlines.org/) using the `..` prefix, which treats a multilined document as an array. + +For example: + +``` +{"name": "Gilbert", "age": 61} +{"name": "Alexa", "age": 34} +{"name": "May", "age": 57} +{"name": "Deloise", "age": 44} +``` + +``` +..# >> 4 +..1 >> {"name": "Alexa", "age": 34} +..3 >> {"name": "Deloise", "age": 44} +..#.name >> ["Gilbert","Alexa","May","Deloise"] +..#(name="May").age >> 57 +``` + +The `ForEachLines` function will iterate through JSON lines. + +```go +gjson.ForEachLine(json, func(line gjson.Result) bool{ + println(line.String()) + return true +}) +``` + +## Get nested array values + +Suppose you want all the last names from the following json: + +```json +{ + "programmers": [ + { + "firstName": "Janet", + "lastName": "McLaughlin", + }, { + "firstName": "Elliotte", + "lastName": "Hunter", + }, { + "firstName": "Jason", + "lastName": "Harold", + } + ] +} +``` + +You would use the path "programmers.#.lastName" like such: + +```go +result := gjson.Get(json, "programmers.#.lastName") +for _, name := range result.Array() { + println(name.String()) +} +``` + +You can also query an object inside an array: + +```go +name := gjson.Get(json, `programmers.#(lastName="Hunter").firstName`) +println(name.String()) // prints "Elliotte" +``` + +## Iterate through an object or array + +The `ForEach` function allows for quickly iterating through an object or array. +The key and value are passed to the iterator function for objects. +Only the value is passed for arrays. +Returning `false` from an iterator will stop iteration. + +```go +result := gjson.Get(json, "programmers") +result.ForEach(func(key, value gjson.Result) bool { + println(value.String()) + return true // keep iterating +}) +``` + +## Simple Parse and Get + +There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result. + +For example, all of these will return the same result: + +```go +gjson.Parse(json).Get("name").Get("last") +gjson.Get(json, "name").Get("last") +gjson.Get(json, "name.last") +``` + +## Check for the existence of a value + +Sometimes you just want to know if a value exists. + +```go +value := gjson.Get(json, "name.last") +if !value.Exists() { + println("no last name") +} else { + println(value.String()) +} + +// Or as one step +if gjson.Get(json, "name.last").Exists() { + println("has a last name") +} +``` + +## Validate JSON + +The `Get*` and `Parse*` functions expects that the json is well-formed. Bad json will not panic, but it may return back unexpected results. + +If you are consuming JSON from an unpredictable source then you may want to validate prior to using GJSON. + +```go +if !gjson.Valid(json) { + return errors.New("invalid json") +} +value := gjson.Get(json, "name.last") +``` + +## Unmarshal to a map + +To unmarshal to a `map[string]interface{}`: + +```go +m, ok := gjson.Parse(json).Value().(map[string]interface{}) +if !ok { + // not a map +} +``` + +## Working with Bytes + +If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`. + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +``` + +If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern: + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +var raw []byte +if result.Index > 0 { + raw = json[result.Index:result.Index+len(result.Raw)] +} else { + raw = []byte(result.Raw) +} +``` + +This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. + +## Get multiple values at once + +The `GetMany` function can be used to get multiple values at the same time. + +```go +results := gjson.GetMany(json, "name.first", "name.last", "age") +``` + +The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths. + +## Performance + +Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +[jsonparser](https://github.com/buger/jsonparser), +and [json-iterator](https://github.com/json-iterator/go) + +``` +BenchmarkGJSONGet-16 11644512 311 ns/op 0 B/op 0 allocs/op +BenchmarkGJSONUnmarshalMap-16 1122678 3094 ns/op 1920 B/op 26 allocs/op +BenchmarkJSONUnmarshalMap-16 516681 6810 ns/op 2944 B/op 69 allocs/op +BenchmarkJSONUnmarshalStruct-16 697053 5400 ns/op 928 B/op 13 allocs/op +BenchmarkJSONDecoder-16 330450 10217 ns/op 3845 B/op 160 allocs/op +BenchmarkFFJSONLexer-16 1424979 2585 ns/op 880 B/op 8 allocs/op +BenchmarkEasyJSONLexer-16 3000000 729 ns/op 501 B/op 5 allocs/op +BenchmarkJSONParserGet-16 3000000 366 ns/op 21 B/op 0 allocs/op +BenchmarkJSONIterator-16 3000000 869 ns/op 693 B/op 14 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated through one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +*These benchmarks were run on a MacBook Pro 16" 2.4 GHz Intel Core i9 using Go 1.17 and can be found [here](https://github.com/tidwall/gjson-benchmarks).* diff --git a/src/otel-collector/vendor/github.com/tidwall/gjson/SYNTAX.md b/src/otel-collector/vendor/github.com/tidwall/gjson/SYNTAX.md new file mode 100644 index 00000000..9bc18c88 --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/gjson/SYNTAX.md @@ -0,0 +1,319 @@ +# GJSON Path Syntax + +A GJSON Path is a text string syntax that describes a search pattern for quickly retreiving values from a JSON payload. + +This document is designed to explain the structure of a GJSON Path through examples. + +- [Path structure](#path-structure) +- [Basic](#basic) +- [Wildcards](#wildcards) +- [Escape Character](#escape-character) +- [Arrays](#arrays) +- [Queries](#queries) +- [Dot vs Pipe](#dot-vs-pipe) +- [Modifiers](#modifiers) +- [Multipaths](#multipaths) + +The definitive implemenation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson). +Use the [GJSON Playground](https://gjson.dev) to experiment with the syntax online. + + +## Path structure + +A GJSON Path is intended to be easily expressed as a series of components seperated by a `.` character. + +Along with `.` character, there are a few more that have special meaning, including `|`, `#`, `@`, `\`, `*`, and `?`. + +## Example + +Given this JSON + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` + +The following GJSON Paths evaluate to the accompanying values. + +### Basic + +In many cases you'll just want to retreive values by object name or array index. + +```go +name.last "Anderson" +name.first "Tom" +age 37 +children ["Sara","Alex","Jack"] +children.0 "Sara" +children.1 "Alex" +friends.1 {"first": "Roger", "last": "Craig", "age": 68} +friends.1.first "Roger" +``` + +### Wildcards + +A key may contain the special wildcard characters `*` and `?`. +The `*` will match on any zero+ characters, and `?` matches on any one character. + +```go +child*.2 "Jack" +c?ildren.0 "Sara" +``` + +### Escape character + +Special purpose characters, such as `.`, `*`, and `?` can be escaped with `\`. + +```go +fav\.movie "Deer Hunter" +``` + +You'll also need to make sure that the `\` character is correctly escaped when hardcoding a path in you source code. + +```go +// Go +val := gjson.Get(json, "fav\\.movie") // must escape the slash +val := gjson.Get(json, `fav\.movie`) // no need to escape the slash +``` + +```rust +// Rust +let val = gjson::get(json, "fav\\.movie") // must escape the slash +let val = gjson::get(json, r#"fav\.movie"#) // no need to escape the slash +``` + + +### Arrays + +The `#` character allows for digging into JSON Arrays. + +To get the length of an array you'll just use the `#` all by itself. + +```go +friends.# 3 +friends.#.age [44,68,47] +``` + +### Queries + +You can also query an array for the first match by using `#(...)`, or find all matches with `#(...)#`. +Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators, +and the simple pattern matching `%` (like) and `!%` (not like) operators. + +```go +friends.#(last=="Murphy").first "Dale" +friends.#(last=="Murphy")#.first ["Dale","Jane"] +friends.#(age>45)#.last ["Craig","Murphy"] +friends.#(first%"D*").last "Murphy" +friends.#(first!%"D*").last "Craig" +``` + +To query for a non-object value in an array, you can forgo the string to the right of the operator. + +```go +children.#(!%"*a*") "Alex" +children.#(%"*a*")# ["Sara","Jack"] +``` + +Nested queries are allowed. + +```go +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new [multipath](#multipaths) +syntax. For backwards compatibility, `#[...]` will continue to work until the +next major release.* + +The `~` (tilde) operator will convert a value to a boolean before comparison. + +For example, using the following JSON: + +```json +{ + "vals": [ + { "a": 1, "b": true }, + { "a": 2, "b": true }, + { "a": 3, "b": false }, + { "a": 4, "b": "0" }, + { "a": 5, "b": 0 }, + { "a": 6, "b": "1" }, + { "a": 7, "b": 1 }, + { "a": 8, "b": "true" }, + { "a": 9, "b": false }, + { "a": 10, "b": null }, + { "a": 11 } + ] +} +``` + +You can now query for all true(ish) or false(ish) values: + +``` +vals.#(b==~true)#.a >> [1,2,6,7,8] +vals.#(b==~false)#.a >> [3,4,5,9,10,11] +``` + +The last value which was non-existent is treated as `false` + +### Dot vs Pipe + +The `.` is standard separator, but it's also possible to use a `|`. +In most cases they both end up returning the same results. +The cases where`|` differs from `.` is when it's used after the `#` for [Arrays](#arrays) and [Queries](#queries). + +Here are some examples + +```go +friends.0.first "Dale" +friends|0.first "Dale" +friends.0|first "Dale" +friends|0|first "Dale" +friends|# 3 +friends.# 3 +friends.#(last="Murphy")# [{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +friends.#(last="Murphy")#.first ["Dale","Jane"] +friends.#(last="Murphy")#|first +friends.#(last="Murphy")#.0 [] +friends.#(last="Murphy")#|0 {"first": "Dale", "last": "Murphy", "age": 44} +friends.#(last="Murphy")#.# [] +friends.#(last="Murphy")#|# 2 +``` + +Let's break down a few of these. + +The path `friends.#(last="Murphy")#` all by itself results in + +```json +[{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +``` + +The `.first` suffix will process the `first` path on each array element *before* returning the results. Which becomes + +```json +["Dale","Jane"] +``` + +But the `|first` suffix actually processes the `first` path *after* the previous result. +Since the previous result is an array, not an object, it's not possible to process +because `first` does not exist. + +Yet, `|0` suffix returns + +```json +{"first": "Dale", "last": "Murphy", "age": 44} +``` + +Because `0` is the first index of the previous result. + +### Modifiers + +A modifier is a path component that performs custom processing on the JSON. + +For example, using the built-in `@reverse` modifier on the above JSON payload will reverse the `children` array: + +```go +children.@reverse ["Jack","Alex","Sara"] +children.@reverse.0 "Jack" +``` + +There are currently the following built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from JSON. +- `@pretty`: Make the JSON more human readable. +- `@this`: Returns the current element. It can be used to retrieve the root element. +- `@valid`: Ensure the json document is valid. +- `@flatten`: Flattens an array. +- `@join`: Joins multiple objects into a single object. +- `@keys`: Returns an array of keys for an object. +- `@values`: Returns an array of values for an object. + +#### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON payload or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +#### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier which makes the entire JSON payload upper or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +"children.@case:upper" ["SARA","ALEX","JACK"] +"children.@case:lower.@reverse" ["jack","alex","sara"] +``` + +*Note: Custom modifiers are not yet available in the Rust version* + +### Multipaths + +Starting with v1.3.0, GJSON added the ability to join multiple paths together +to form new documents. Wrapping comma-separated paths between `[...]` or +`{...}` will result in a new array or object, respectively. + +For example, using the given multipath + +``` +{name.first,age,"the_murphys":friends.#(last="Murphy")#.first} +``` + +Here we selected the first name, age, and the first name for friends with the +last name "Murphy". + +You'll notice that an optional key can be provided, in this case +"the_murphys", to force assign a key to a value. Otherwise, the name of the +actual field will be used, in this case "first". If a name cannot be +determined, then "_" is used. + +This results in + +``` +{"first":"Tom","age":37,"the_murphys":["Dale","Jane"]} +``` + + diff --git a/src/otel-collector/vendor/github.com/tidwall/gjson/gjson.go b/src/otel-collector/vendor/github.com/tidwall/gjson/gjson.go new file mode 100644 index 00000000..279649ee --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/gjson/gjson.go @@ -0,0 +1,2973 @@ +// Package gjson provides searching for json strings. +package gjson + +import ( + "encoding/json" + "strconv" + "strings" + "time" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/tidwall/match" + "github.com/tidwall/pretty" +) + +// Type is Result type +type Type int + +const ( + // Null is a null json value + Null Type = iota + // False is a json false boolean + False + // Number is json number + Number + // String is a json string + String + // True is a json true boolean + True + // JSON is a raw block of JSON + JSON +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + default: + return "" + case Null: + return "Null" + case False: + return "False" + case Number: + return "Number" + case String: + return "String" + case True: + return "True" + case JSON: + return "JSON" + } +} + +// Result represents a json value that is returned from Get(). +type Result struct { + // Type is the json type + Type Type + // Raw is the raw json + Raw string + // Str is the json string + Str string + // Num is the json number + Num float64 + // Index of raw value in original json, zero means index unknown + Index int + // Indexes of all the elements that match on a path containing the '#' + // query character. + Indexes []int +} + +// String returns a string representation of the value. +func (t Result) String() string { + switch t.Type { + default: + return "" + case False: + return "false" + case Number: + if len(t.Raw) == 0 { + // calculated result + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + var i int + if t.Raw[0] == '-' { + i++ + } + for ; i < len(t.Raw); i++ { + if t.Raw[i] < '0' || t.Raw[i] > '9' { + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + } + return t.Raw + case String: + return t.Str + case JSON: + return t.Raw + case True: + return "true" + } +} + +// Bool returns an boolean representation. +func (t Result) Bool() bool { + switch t.Type { + default: + return false + case True: + return true + case String: + b, _ := strconv.ParseBool(strings.ToLower(t.Str)) + return b + case Number: + return t.Num != 0 + } +} + +// Int returns an integer representation. +func (t Result) Int() int64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseInt(t.Str) + return n + case Number: + // try to directly convert the float64 to int64 + i, ok := safeInt(t.Num) + if ok { + return i + } + // now try to parse the raw string + i, ok = parseInt(t.Raw) + if ok { + return i + } + // fallback to a standard conversion + return int64(t.Num) + } +} + +// Uint returns an unsigned integer representation. +func (t Result) Uint() uint64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseUint(t.Str) + return n + case Number: + // try to directly convert the float64 to uint64 + i, ok := safeInt(t.Num) + if ok && i >= 0 { + return uint64(i) + } + // now try to parse the raw string + u, ok := parseUint(t.Raw) + if ok { + return u + } + // fallback to a standard conversion + return uint64(t.Num) + } +} + +// Float returns an float64 representation. +func (t Result) Float() float64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseFloat(t.Str, 64) + return n + case Number: + return t.Num + } +} + +// Time returns a time.Time representation. +func (t Result) Time() time.Time { + res, _ := time.Parse(time.RFC3339, t.String()) + return res +} + +// Array returns back an array of values. +// If the result represents a null value or is non-existent, then an empty +// array will be returned. +// If the result is not a JSON array, the return value will be an +// array containing one result. +func (t Result) Array() []Result { + if t.Type == Null { + return []Result{} + } + if !t.IsArray() { + return []Result{t} + } + r := t.arrayOrMap('[', false) + return r.a +} + +// IsObject returns true if the result value is a JSON object. +func (t Result) IsObject() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '{' +} + +// IsArray returns true if the result value is a JSON array. +func (t Result) IsArray() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '[' +} + +// ForEach iterates through values. +// If the result represents a non-existent value, then no values will be +// iterated. If the result is an Object, the iterator will pass the key and +// value of each item. If the result is an Array, the iterator will only pass +// the value of each item. If the result is not a JSON array or object, the +// iterator will pass back one value equal to the result. +func (t Result) ForEach(iterator func(key, value Result) bool) { + if !t.Exists() { + return + } + if t.Type != JSON { + iterator(Result{}, t) + return + } + json := t.Raw + var keys bool + var i int + var key, value Result + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + key.Type = String + keys = true + break + } else if json[i] == '[' { + i++ + break + } + if json[i] > ' ' { + return + } + } + var str string + var vesc bool + var ok bool + for ; i < len(json); i++ { + if keys { + if json[i] != '"' { + continue + } + s := i + i, str, vesc, ok = parseString(json, i+1) + if !ok { + return + } + if vesc { + key.Str = unescape(str[1 : len(str)-1]) + } else { + key.Str = str[1 : len(str)-1] + } + key.Raw = str + key.Index = s + } + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { + continue + } + break + } + s := i + i, value, ok = parseAny(json, i, true) + if !ok { + return + } + value.Index = s + if !iterator(key, value) { + return + } + } +} + +// Map returns back a map of values. The result should be a JSON object. +// If the result is not a JSON object, the return value will be an empty map. +func (t Result) Map() map[string]Result { + if t.Type != JSON { + return map[string]Result{} + } + r := t.arrayOrMap('{', false) + return r.o +} + +// Get searches result for the specified path. +// The result should be a JSON array or object. +func (t Result) Get(path string) Result { + return Get(t.Raw, path) +} + +type arrayOrMapResult struct { + a []Result + ai []interface{} + o map[string]Result + oi map[string]interface{} + vc byte +} + +func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { + var json = t.Raw + var i int + var value Result + var count int + var key Result + if vc == 0 { + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + r.vc = json[i] + i++ + break + } + if json[i] > ' ' { + goto end + } + } + } else { + for ; i < len(json); i++ { + if json[i] == vc { + i++ + break + } + if json[i] > ' ' { + goto end + } + } + r.vc = vc + } + if r.vc == '{' { + if valueize { + r.oi = make(map[string]interface{}) + } else { + r.o = make(map[string]Result) + } + } else { + if valueize { + r.ai = make([]interface{}, 0) + } else { + r.a = make([]Result, 0) + } + } + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + // get next value + if json[i] == ']' || json[i] == '}' { + break + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + value.Str = "" + } else { + continue + } + case '{', '[': + value.Type = JSON + value.Raw = squash(json[i:]) + value.Str, value.Num = "", 0 + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + value.Num = 0 + } + i += len(value.Raw) - 1 + + if r.vc == '{' { + if count%2 == 0 { + key = value + } else { + if valueize { + if _, ok := r.oi[key.Str]; !ok { + r.oi[key.Str] = value.Value() + } + } else { + if _, ok := r.o[key.Str]; !ok { + r.o[key.Str] = value + } + } + } + count++ + } else { + if valueize { + r.ai = append(r.ai, value.Value()) + } else { + r.a = append(r.a, value) + } + } + } +end: + return +} + +// Parse parses the json and returns a result. +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Parse(json string) Result { + var value Result + for i := 0; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + value.Type = JSON + value.Raw = json[i:] // just take the entire raw + break + } + if json[i] <= ' ' { + continue + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + return Result{} + } + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + } + break + } + return value +} + +// ParseBytes parses the json and returns a result. +// If working with bytes, this method preferred over Parse(string(data)) +func ParseBytes(json []byte) Result { + return Parse(string(json)) +} + +func squash(json string) string { + // expects that the lead character is a '[' or '{' or '(' or '"' + // squash the value, ignoring all nested arrays and objects. + var i, depth int + if json[0] != '"' { + i, depth = 1, 1 + } + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + if depth == 0 { + if i >= len(json) { + return json + } + return json[:i+1] + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + return json[:i+1] + } + } + } + } + return json +} + +func tonum(json string) (raw string, num float64) { + for i := 1; i < len(json); i++ { + // less than dash might have valid characters + if json[i] <= '-' { + if json[i] <= ' ' || json[i] == ',' { + // break on whitespace and comma + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + // could be a '+' or '-'. let's assume so. + continue + } + if json[i] < ']' { + // probably a valid number + continue + } + if json[i] == 'e' || json[i] == 'E' { + // allow for exponential numbers + continue + } + // likely a ']' or '}' + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + raw = json + num, _ = strconv.ParseFloat(raw, 64) + return +} + +func tolit(json string) (raw string) { + for i := 1; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return json[:i] + } + } + return json +} + +func tostr(json string) (raw string, str string) { + // expects that the lead character is a '"' + for i := 1; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return json[:i+1], json[1:i] + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return json[:i+1], unescape(json[1:i]) + } + } + var ret string + if i+1 < len(json) { + ret = json[:i+1] + } else { + ret = json[:i] + } + return ret, unescape(json[1:i]) + } + } + return json, json[1:] +} + +// Exists returns true if value exists. +// +// if gjson.Get(json, "name.last").Exists(){ +// println("value exists") +// } +func (t Result) Exists() bool { + return t.Type != Null || len(t.Raw) != 0 +} + +// Value returns one of these types: +// +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// map[string]interface{}, for JSON objects +// []interface{}, for JSON arrays +// +func (t Result) Value() interface{} { + if t.Type == String { + return t.Str + } + switch t.Type { + default: + return nil + case False: + return false + case Number: + return t.Num + case JSON: + r := t.arrayOrMap(0, true) + if r.vc == '{' { + return r.oi + } else if r.vc == '[' { + return r.ai + } + return nil + case True: + return true + } +} + +func parseString(json string, i int) (int, string, bool, bool) { + var s = i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return i + 1, json[s-1 : i+1], false, true + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return i + 1, json[s-1 : i+1], true, true + } + } + break + } + } + return i, json[s-1:], false, false +} + +func parseNumber(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || + json[i] == '}' { + return i, json[s:i] + } + } + return i, json[s:] +} + +func parseLiteral(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return i, json[s:i] + } + } + return i, json[s:] +} + +type arrayPathResult struct { + part string + path string + pipe string + piped bool + more bool + alogok bool + arrch bool + alogkey string + query struct { + on bool + all bool + path string + op string + value string + } +} + +func parseArrayPath(path string) (r arrayPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + r.part = path[:i] + if !r.arrch && i < len(path)-1 && isDotPiperChar(path[i+1]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } + if path[i] == '#' { + r.arrch = true + if i == 0 && len(path) > 1 { + if path[1] == '.' { + r.alogok = true + r.alogkey = path[2:] + r.path = path[:1] + } else if path[1] == '[' || path[1] == '(' { + // query + r.query.on = true + qpath, op, value, _, fi, vesc, ok := + parseQuery(path[i:]) + if !ok { + // bad query, end now + break + } + if len(value) >= 2 && value[0] == '"' && + value[len(value)-1] == '"' { + value = value[1 : len(value)-1] + if vesc { + value = unescape(value) + } + } + r.query.path = qpath + r.query.op = op + r.query.value = value + + i = fi - 1 + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + } + } + continue + } + } + r.part = path + r.path = "" + return +} + +// splitQuery takes a query and splits it into three parts: +// path, op, middle, and right. +// So for this query: +// #(first_name=="Murphy").last +// Becomes +// first_name # path +// =="Murphy" # middle +// .last # right +// Or, +// #(service_roles.#(=="one")).cap +// Becomes +// service_roles.#(=="one") # path +// # middle +// .cap # right +func parseQuery(query string) ( + path, op, value, remain string, i int, vesc, ok bool, +) { + if len(query) < 2 || query[0] != '#' || + (query[1] != '(' && query[1] != '[') { + return "", "", "", "", i, false, false + } + i = 2 + j := 0 // start of value part + depth := 1 + for ; i < len(query); i++ { + if depth == 1 && j == 0 { + switch query[i] { + case '!', '=', '<', '>', '%': + // start of the value part + j = i + continue + } + } + if query[i] == '\\' { + i++ + } else if query[i] == '[' || query[i] == '(' { + depth++ + } else if query[i] == ']' || query[i] == ')' { + depth-- + if depth == 0 { + break + } + } else if query[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(query); i++ { + if query[i] == '\\' { + vesc = true + i++ + } else if query[i] == '"' { + break + } + } + } + } + if depth > 0 { + return "", "", "", "", i, false, false + } + if j > 0 { + path = trim(query[2:j]) + value = trim(query[j:i]) + remain = query[i+1:] + // parse the compare op from the value + var opsz int + switch { + case len(value) == 1: + opsz = 1 + case value[0] == '!' && value[1] == '=': + opsz = 2 + case value[0] == '!' && value[1] == '%': + opsz = 2 + case value[0] == '<' && value[1] == '=': + opsz = 2 + case value[0] == '>' && value[1] == '=': + opsz = 2 + case value[0] == '=' && value[1] == '=': + value = value[1:] + opsz = 1 + case value[0] == '<': + opsz = 1 + case value[0] == '>': + opsz = 1 + case value[0] == '=': + opsz = 1 + case value[0] == '%': + opsz = 1 + } + op = value[:opsz] + value = trim(value[opsz:]) + } else { + path = trim(query[2:i]) + remain = query[i+1:] + } + return path, op, value, remain, i + 1, vesc, true +} + +func trim(s string) string { +left: + if len(s) > 0 && s[0] <= ' ' { + s = s[1:] + goto left + } +right: + if len(s) > 0 && s[len(s)-1] <= ' ' { + s = s[:len(s)-1] + goto right + } + return s +} + +// peek at the next byte and see if it's a '@', '[', or '{'. +func isDotPiperChar(c byte) bool { + return !DisableModifiers && (c == '@' || c == '[' || c == '{') +} + +type objectPathResult struct { + part string + path string + pipe string + piped bool + wild bool + more bool +} + +func parseObjectPath(path string) (r objectPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + r.part = path[:i] + if i < len(path)-1 && isDotPiperChar(path[i+1]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } + if path[i] == '*' || path[i] == '?' { + r.wild = true + continue + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + if i < len(path) { + epart = append(epart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + if i < len(path)-1 && isDotPiperChar(path[i+1]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + } + r.more = true + return + } else if path[i] == '|' { + r.part = string(epart) + r.pipe = path[i+1:] + r.piped = true + return + } else if path[i] == '*' || path[i] == '?' { + r.wild = true + } + epart = append(epart, path[i]) + } + } + // append the last part + r.part = string(epart) + return + } + } + r.part = path + return +} + +func parseSquash(json string, i int) (int, string) { + // expects that the lead character is a '[' or '{' or '(' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' or '(' has already been read + s := i + i++ + depth := 1 + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + i++ + return i, json[s:i] + } + } + } + } + return i, json[s:] +} + +func parseObject(c *parseContext, i int, path string) (int, bool) { + var pmatch, kesc, vesc, ok, hit bool + var key, val string + rp := parseObjectPath(path) + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + for i < len(c.json) { + for ; i < len(c.json); i++ { + if c.json[i] == '"' { + // parse_key_string + // this is slightly different from getting s string value + // because we don't need the outer quotes. + i++ + var s = i + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + i, key, kesc, ok = i+1, c.json[s:i], false, true + goto parse_key_string_done + } + if c.json[i] == '\\' { + i++ + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + // look for an escaped slash + if c.json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if c.json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + i, key, kesc, ok = i+1, c.json[s:i], true, true + goto parse_key_string_done + } + } + break + } + } + key, kesc, ok = c.json[s:], false, false + parse_key_string_done: + break + } + if c.json[i] == '}' { + return i + 1, false + } + } + if !ok { + return i, false + } + if rp.wild { + if kesc { + pmatch = matchLimit(unescape(key), rp.part) + } else { + pmatch = matchLimit(key, rp.part) + } + } else { + if kesc { + pmatch = rp.part == unescape(key) + } else { + pmatch = rp.part == key + } + } + hit = pmatch && !rp.more + for ; i < len(c.json); i++ { + var num bool + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case 'n': + if i+1 < len(c.json) && c.json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + } + if num { + i, val = parseNumber(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + } + break + } + } + return i, false +} + +// matchLimit will limit the complexity of the match operation to avoid ReDos +// attacks from arbritary inputs. +// See the github.com/tidwall/match.MatchLimit function for more information. +func matchLimit(str, pattern string) bool { + matched, _ := match.MatchLimit(str, pattern, 10000) + return matched +} + +func queryMatches(rp *arrayPathResult, value Result) bool { + rpv := rp.query.value + if len(rpv) > 0 && rpv[0] == '~' { + // convert to bool + rpv = rpv[1:] + if value.Bool() { + value = Result{Type: True} + } else { + value = Result{Type: False} + } + } + if !value.Exists() { + return false + } + if rp.query.op == "" { + // the query is only looking for existence, such as: + // friends.#(name) + // which makes sure that the array "friends" has an element of + // "name" that exists + return true + } + switch value.Type { + case String: + switch rp.query.op { + case "=": + return value.Str == rpv + case "!=": + return value.Str != rpv + case "<": + return value.Str < rpv + case "<=": + return value.Str <= rpv + case ">": + return value.Str > rpv + case ">=": + return value.Str >= rpv + case "%": + return matchLimit(value.Str, rpv) + case "!%": + return !matchLimit(value.Str, rpv) + } + case Number: + rpvn, _ := strconv.ParseFloat(rpv, 64) + switch rp.query.op { + case "=": + return value.Num == rpvn + case "!=": + return value.Num != rpvn + case "<": + return value.Num < rpvn + case "<=": + return value.Num <= rpvn + case ">": + return value.Num > rpvn + case ">=": + return value.Num >= rpvn + } + case True: + switch rp.query.op { + case "=": + return rpv == "true" + case "!=": + return rpv != "true" + case ">": + return rpv == "false" + case ">=": + return true + } + case False: + switch rp.query.op { + case "=": + return rpv == "false" + case "!=": + return rpv != "false" + case "<": + return rpv == "true" + case "<=": + return true + } + } + return false +} +func parseArray(c *parseContext, i int, path string) (int, bool) { + var pmatch, vesc, ok, hit bool + var val string + var h int + var alog []int + var partidx int + var multires []byte + var queryIndexes []int + rp := parseArrayPath(path) + if !rp.arrch { + n, ok := parseUint(rp.part) + if !ok { + partidx = -1 + } else { + partidx = int(n) + } + } + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + + procQuery := func(qval Result) bool { + if rp.query.all { + if len(multires) == 0 { + multires = append(multires, '[') + } + } + var tmp parseContext + tmp.value = qval + fillIndex(c.json, &tmp) + parentIndex := tmp.value.Index + var res Result + if qval.Type == JSON { + res = qval.Get(rp.query.path) + } else { + if rp.query.path != "" { + return false + } + res = qval + } + if queryMatches(&rp, res) { + if rp.more { + left, right, ok := splitPossiblePipe(rp.path) + if ok { + rp.path = left + c.pipe = right + c.piped = true + } + res = qval.Get(rp.path) + } else { + res = qval + } + if rp.query.all { + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + if raw != "" { + if len(multires) > 1 { + multires = append(multires, ',') + } + multires = append(multires, raw...) + queryIndexes = append(queryIndexes, res.Index+parentIndex) + } + } else { + c.value = res + return true + } + } + return false + } + for i < len(c.json)+1 { + if !rp.arrch { + pmatch = partidx == h + hit = pmatch && !rp.more + } + h++ + if rp.alogok { + alog = append(alog, i) + } + for ; ; i++ { + var ch byte + if i > len(c.json) { + break + } else if i == len(c.json) { + ch = ']' + } else { + ch = c.json[i] + } + var num bool + switch ch { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if rp.query.on { + var qval Result + if vesc { + qval.Str = unescape(val[1 : len(val)-1]) + } else { + qval.Str = val[1 : len(val)-1] + } + qval.Raw = val + qval.Type = String + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case 'n': + if i+1 < len(c.json) && c.json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + switch vc { + case 't': + qval.Type = True + case 'f': + qval.Type = False + } + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + case ']': + if rp.arrch && rp.part == "#" { + if rp.alogok { + left, right, ok := splitPossiblePipe(rp.alogkey) + if ok { + rp.alogkey = left + c.pipe = right + c.piped = true + } + var indexes = make([]int, 0, 64) + var jsons = make([]byte, 0, 64) + jsons = append(jsons, '[') + for j, k := 0, 0; j < len(alog); j++ { + idx := alog[j] + for idx < len(c.json) { + switch c.json[idx] { + case ' ', '\t', '\r', '\n': + idx++ + continue + } + break + } + if idx < len(c.json) && c.json[idx] != ']' { + _, res, ok := parseAny(c.json, idx, true) + parentIndex := res.Index + if ok { + res := res.Get(rp.alogkey) + if res.Exists() { + if k > 0 { + jsons = append(jsons, ',') + } + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + jsons = append(jsons, []byte(raw)...) + indexes = append(indexes, + res.Index+parentIndex) + k++ + } + } + } + } + jsons = append(jsons, ']') + c.value.Type = JSON + c.value.Raw = string(jsons) + c.value.Indexes = indexes + return i + 1, true + } + if rp.alogok { + break + } + + c.value.Type = Number + c.value.Num = float64(h - 1) + c.value.Raw = strconv.Itoa(h - 1) + c.calcd = true + return i + 1, true + } + if !c.value.Exists() { + if len(multires) > 0 { + c.value = Result{ + Raw: string(append(multires, ']')), + Type: JSON, + Indexes: queryIndexes, + } + } else if rp.query.all { + c.value = Result{ + Raw: "[]", + Type: JSON, + } + } + } + return i + 1, false + } + if num { + i, val = parseNumber(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + qval.Type = Number + qval.Num, _ = strconv.ParseFloat(val, 64) + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + } + break + } + } + return i, false +} + +func splitPossiblePipe(path string) (left, right string, ok bool) { + // take a quick peek for the pipe character. If found we'll split the piped + // part of the path into the c.pipe field and shorten the rp. + var possible bool + for i := 0; i < len(path); i++ { + if path[i] == '|' { + possible = true + break + } + } + if !possible { + return + } + + if len(path) > 0 && path[0] == '{' { + squashed := squash(path[1:]) + if len(squashed) < len(path)-1 { + squashed = path[:len(squashed)+1] + remain := path[len(squashed):] + if remain[0] == '|' { + return squashed, remain[1:], true + } + } + return + } + + // split the left and right side of the path with the pipe character as + // the delimiter. This is a little tricky because we'll need to basically + // parse the entire path. + for i := 0; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '.' { + if i == len(path)-1 { + return + } + if path[i+1] == '#' { + i += 2 + if i == len(path) { + return + } + if path[i] == '[' || path[i] == '(' { + var start, end byte + if path[i] == '[' { + start, end = '[', ']' + } else { + start, end = '(', ')' + } + // inside selector, balance brackets + i++ + depth := 1 + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == start { + depth++ + } else if path[i] == end { + depth-- + if depth == 0 { + break + } + } else if path[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '"' { + break + } + } + } + } + } + } + } else if path[i] == '|' { + return path[:i], path[i+1:], true + } + } + return +} + +// ForEachLine iterates through lines of JSON as specified by the JSON Lines +// format (http://jsonlines.org/). +// Each line is returned as a GJSON Result. +func ForEachLine(json string, iterator func(line Result) bool) { + var res Result + var i int + for { + i, res, _ = parseAny(json, i, true) + if !res.Exists() { + break + } + if !iterator(res) { + return + } + } +} + +type subSelector struct { + name string + path string +} + +// parseSubSelectors returns the subselectors belonging to a '[path1,path2]' or +// '{"field1":path1,"field2":path2}' type subSelection. It's expected that the +// first character in path is either '[' or '{', and has already been checked +// prior to calling this function. +func parseSubSelectors(path string) (sels []subSelector, out string, ok bool) { + modifer := 0 + depth := 1 + colon := 0 + start := 1 + i := 1 + pushSel := func() { + var sel subSelector + if colon == 0 { + sel.path = path[start:i] + } else { + sel.name = path[start:colon] + sel.path = path[colon+1 : i] + } + sels = append(sels, sel) + colon = 0 + start = i + 1 + } + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '@': + if modifer == 0 && i > 0 && (path[i-1] == '.' || path[i-1] == '|') { + modifer = i + } + case ':': + if modifer == 0 && colon == 0 && depth == 1 { + colon = i + } + case ',': + if depth == 1 { + pushSel() + } + case '"': + i++ + loop: + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '"': + break loop + } + } + case '[', '(', '{': + depth++ + case ']', ')', '}': + depth-- + if depth == 0 { + pushSel() + path = path[i+1:] + return sels, path, true + } + } + } + return +} + +// nameOfLast returns the name of the last component +func nameOfLast(path string) string { + for i := len(path) - 1; i >= 0; i-- { + if path[i] == '|' || path[i] == '.' { + if i > 0 { + if path[i-1] == '\\' { + continue + } + } + return path[i+1:] + } + } + return path +} + +func isSimpleName(component string) bool { + for i := 0; i < len(component); i++ { + if component[i] < ' ' { + return false + } + switch component[i] { + case '[', ']', '{', '}', '(', ')', '#', '|': + return false + } + } + return true +} + +func appendJSONString(dst []byte, s string) []byte { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] == '\\' || s[i] == '"' || s[i] > 126 { + d, _ := json.Marshal(s) + return append(dst, string(d)...) + } + } + dst = append(dst, '"') + dst = append(dst, s...) + dst = append(dst, '"') + return dst +} + +type parseContext struct { + json string + value Result + pipe string + piped bool + calcd bool + lines bool +} + +// Get searches json for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// When the value is found it's returned immediately. +// +// A path is a series of keys separated by a dot. +// A key may contain special wildcard characters '*' and '?'. +// To access an array value use the index as the key. +// To get the number of elements in an array or to access a child path, use +// the '#' character. +// The dot and wildcard character can be escaped with '\'. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children" >> ["Sara","Alex","Jack"] +// "children.#" >> 3 +// "children.1" >> "Alex" +// "child*.2" >> "Jack" +// "c?ildren.0" >> "Sara" +// "friends.#.first" >> ["James","Roger"] +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Get(json, path string) Result { + if len(path) > 1 { + if !DisableModifiers { + if path[0] == '@' { + // possible modifier + var ok bool + var npath string + var rjson string + npath, rjson, ok = execModifier(json, path) + if ok { + path = npath + if len(path) > 0 && (path[0] == '|' || path[0] == '.') { + res := Get(rjson, path[1:]) + res.Index = 0 + res.Indexes = nil + return res + } + return Parse(rjson) + } + } + } + if path[0] == '[' || path[0] == '{' { + // using a subselector path + kind := path[0] + var ok bool + var subs []subSelector + subs, path, ok = parseSubSelectors(path) + if ok { + if len(path) == 0 || (path[0] == '|' || path[0] == '.') { + var b []byte + b = append(b, kind) + var i int + for _, sub := range subs { + res := Get(json, sub.path) + if res.Exists() { + if i > 0 { + b = append(b, ',') + } + if kind == '{' { + if len(sub.name) > 0 { + if sub.name[0] == '"' && Valid(sub.name) { + b = append(b, sub.name...) + } else { + b = appendJSONString(b, sub.name) + } + } else { + last := nameOfLast(sub.path) + if isSimpleName(last) { + b = appendJSONString(b, last) + } else { + b = appendJSONString(b, "_") + } + } + b = append(b, ':') + } + var raw string + if len(res.Raw) == 0 { + raw = res.String() + if len(raw) == 0 { + raw = "null" + } + } else { + raw = res.Raw + } + b = append(b, raw...) + i++ + } + } + b = append(b, kind+2) + var res Result + res.Raw = string(b) + res.Type = JSON + if len(path) > 0 { + res = res.Get(path[1:]) + } + res.Index = 0 + return res + } + } + } + } + var i int + var c = &parseContext{json: json} + if len(path) >= 2 && path[0] == '.' && path[1] == '.' { + c.lines = true + parseArray(c, 0, path[2:]) + } else { + for ; i < len(c.json); i++ { + if c.json[i] == '{' { + i++ + parseObject(c, i, path) + break + } + if c.json[i] == '[' { + i++ + parseArray(c, i, path) + break + } + } + } + if c.piped { + res := c.value.Get(c.pipe) + res.Index = 0 + return res + } + fillIndex(json, c) + return c.value +} + +// GetBytes searches json for the specified path. +// If working with bytes, this method preferred over Get(string(data), path) +func GetBytes(json []byte, path string) Result { + return getBytes(json, path) +} + +// runeit returns the rune from the the \uXXXX +func runeit(json string) rune { + n, _ := strconv.ParseUint(json[:4], 16, 64) + return rune(n) +} + +// unescape unescapes a string +func unescape(json string) string { + var str = make([]byte, 0, len(json)) + for i := 0; i < len(json); i++ { + switch { + default: + str = append(str, json[i]) + case json[i] < ' ': + return string(str) + case json[i] == '\\': + i++ + if i >= len(json) { + return string(str) + } + switch json[i] { + default: + return string(str) + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + case '"': + str = append(str, '"') + case 'u': + if i+5 > len(json) { + return string(str) + } + r := runeit(json[i+1:]) + i += 5 + if utf16.IsSurrogate(r) { + // need another code + if len(json[i:]) >= 6 && json[i] == '\\' && + json[i+1] == 'u' { + // we expect it to be correct so just consume it + r = utf16.DecodeRune(r, runeit(json[i+2:])) + i += 6 + } + } + // provide enough space to encode the largest utf8 possible + str = append(str, 0, 0, 0, 0, 0, 0, 0, 0) + n := utf8.EncodeRune(str[len(str)-8:], r) + str = str[:len(str)-8+n] + i-- // backtrack index by one + } + } + } + return string(str) +} + +// Less return true if a token is less than another token. +// The caseSensitive paramater is used when the tokens are Strings. +// The order when comparing two different type is: +// +// Null < False < Number < String < True < JSON +// +func (t Result) Less(token Result, caseSensitive bool) bool { + if t.Type < token.Type { + return true + } + if t.Type > token.Type { + return false + } + if t.Type == String { + if caseSensitive { + return t.Str < token.Str + } + return stringLessInsensitive(t.Str, token.Str) + } + if t.Type == Number { + return t.Num < token.Num + } + return t.Raw < token.Raw +} + +func stringLessInsensitive(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// parseAny parses the next value from a json string. +// A Result is returned when the hit param is set. +// The return values are (i int, res Result, ok bool) +func parseAny(json string, i int, hit bool) (int, Result, bool) { + var res Result + var val string + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + i, val = parseSquash(json, i) + if hit { + res.Raw = val + res.Type = JSON + } + var tmp parseContext + tmp.value = res + fillIndex(json, &tmp) + return i, tmp.value, true + } + if json[i] <= ' ' { + continue + } + var num bool + switch json[i] { + case '"': + i++ + var vesc bool + var ok bool + i, val, vesc, ok = parseString(json, i) + if !ok { + return i, res, false + } + if hit { + res.Type = String + res.Raw = val + if vesc { + res.Str = unescape(val[1 : len(val)-1]) + } else { + res.Str = val[1 : len(val)-1] + } + } + return i, res, true + case 'n': + if i+1 < len(json) && json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := json[i] + i, val = parseLiteral(json, i) + if hit { + res.Raw = val + switch vc { + case 't': + res.Type = True + case 'f': + res.Type = False + } + return i, res, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + } + if num { + i, val = parseNumber(json, i) + if hit { + res.Raw = val + res.Type = Number + res.Num, _ = strconv.ParseFloat(val, 64) + } + return i, res, true + } + + } + return i, res, false +} + +// GetMany searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetMany(json string, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = Get(json, path) + } + return res +} + +// GetManyBytes searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetManyBytes(json []byte, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = GetBytes(json, path) + } + return res +} + +func validpayload(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + i, ok = validany(data, i) + if !ok { + return i, false + } + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + } + } + return i, true + case ' ', '\t', '\n', '\r': + continue + } + } + return i, false +} +func validany(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '{': + return validobject(data, i+1) + case '[': + return validarray(data, i+1) + case '"': + return validstring(data, i+1) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return validnumber(data, i+1) + case 't': + return validtrue(data, i+1) + case 'f': + return validfalse(data, i+1) + case 'n': + return validnull(data, i+1) + } + } + return i, false +} +func validobject(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '}': + return i + 1, true + case '"': + key: + if i, ok = validstring(data, i+1); !ok { + return i, false + } + if i, ok = validcolon(data, i); !ok { + return i, false + } + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, '}'); !ok { + return i, false + } + if data[i] == '}' { + return i + 1, true + } + i++ + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '"': + goto key + } + } + return i, false + } + } + return i, false +} +func validcolon(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ':': + return i + 1, true + } + } + return i, false +} +func validcomma(data []byte, i int, end byte) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ',': + return i, true + case end: + return i, true + } + } + return i, false +} +func validarray(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + for ; i < len(data); i++ { + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, ']'); !ok { + return i, false + } + if data[i] == ']' { + return i + 1, true + } + } + case ' ', '\t', '\n', '\r': + continue + case ']': + return i + 1, true + } + } + return i, false +} +func validstring(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + if data[i] < ' ' { + return i, false + } else if data[i] == '\\' { + i++ + if i == len(data) { + return i, false + } + switch data[i] { + default: + return i, false + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + case 'u': + for j := 0; j < 4; j++ { + i++ + if i >= len(data) { + return i, false + } + if !((data[i] >= '0' && data[i] <= '9') || + (data[i] >= 'a' && data[i] <= 'f') || + (data[i] >= 'A' && data[i] <= 'F')) { + return i, false + } + } + } + } else if data[i] == '"' { + return i + 1, true + } + } + return i, false +} +func validnumber(data []byte, i int) (outi int, ok bool) { + i-- + // sign + if data[i] == '-' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + } + // int + if i == len(data) { + return i, false + } + if data[i] == '0' { + i++ + } else { + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // frac + if i == len(data) { + return i, true + } + if data[i] == '.' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // exp + if i == len(data) { + return i, true + } + if data[i] == 'e' || data[i] == 'E' { + i++ + if i == len(data) { + return i, false + } + if data[i] == '+' || data[i] == '-' { + i++ + } + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + return i, true +} + +func validtrue(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'r' && data[i+1] == 'u' && + data[i+2] == 'e' { + return i + 3, true + } + return i, false +} +func validfalse(data []byte, i int) (outi int, ok bool) { + if i+4 <= len(data) && data[i] == 'a' && data[i+1] == 'l' && + data[i+2] == 's' && data[i+3] == 'e' { + return i + 4, true + } + return i, false +} +func validnull(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'u' && data[i+1] == 'l' && + data[i+2] == 'l' { + return i + 3, true + } + return i, false +} + +// Valid returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +// +func Valid(json string) bool { + _, ok := validpayload(stringBytes(json), 0) + return ok +} + +// ValidBytes returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +// +// If working with bytes, this method preferred over ValidBytes(string(data)) +// +func ValidBytes(json []byte) bool { + _, ok := validpayload(json, 0) + return ok +} + +func parseUint(s string) (n uint64, ok bool) { + var i int + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + uint64(s[i]-'0') + } else { + return 0, false + } + } + return n, true +} + +func parseInt(s string) (n int64, ok bool) { + var i int + var sign bool + if len(s) > 0 && s[0] == '-' { + sign = true + i++ + } + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + int64(s[i]-'0') + } else { + return 0, false + } + } + if sign { + return n * -1, true + } + return n, true +} + +// safeInt validates a given JSON number +// ensures it lies within the minimum and maximum representable JSON numbers +func safeInt(f float64) (n int64, ok bool) { + // https://tc39.es/ecma262/#sec-number.min_safe_integer + // https://tc39.es/ecma262/#sec-number.max_safe_integer + if f < -9007199254740991 || f > 9007199254740991 { + return 0, false + } + return int64(f), true +} + +// execModifier parses the path to find a matching modifier function. +// then input expects that the path already starts with a '@' +func execModifier(json, path string) (pathOut, res string, ok bool) { + name := path[1:] + var hasArgs bool + for i := 1; i < len(path); i++ { + if path[i] == ':' { + pathOut = path[i+1:] + name = path[1:i] + hasArgs = len(pathOut) > 0 + break + } + if path[i] == '|' { + pathOut = path[i:] + name = path[1:i] + break + } + if path[i] == '.' { + pathOut = path[i:] + name = path[1:i] + break + } + } + if fn, ok := modifiers[name]; ok { + var args string + if hasArgs { + var parsedArgs bool + switch pathOut[0] { + case '{', '[', '"': + res := Parse(pathOut) + if res.Exists() { + args = squash(pathOut) + pathOut = pathOut[len(args):] + parsedArgs = true + } + } + if !parsedArgs { + idx := strings.IndexByte(pathOut, '|') + if idx == -1 { + args = pathOut + pathOut = "" + } else { + args = pathOut[:idx] + pathOut = pathOut[idx:] + } + } + } + return pathOut, fn(json, args), true + } + return pathOut, res, false +} + +// unwrap removes the '[]' or '{}' characters around json +func unwrap(json string) string { + json = trim(json) + if len(json) >= 2 && (json[0] == '[' || json[0] == '{') { + json = json[1 : len(json)-1] + } + return json +} + +// DisableModifiers will disable the modifier syntax +var DisableModifiers = false + +var modifiers = map[string]func(json, arg string) string{ + "pretty": modPretty, + "ugly": modUgly, + "reverse": modReverse, + "this": modThis, + "flatten": modFlatten, + "join": modJoin, + "valid": modValid, + "keys": modKeys, + "values": modValues, +} + +// AddModifier binds a custom modifier command to the GJSON syntax. +// This operation is not thread safe and should be executed prior to +// using all other gjson function. +func AddModifier(name string, fn func(json, arg string) string) { + modifiers[name] = fn +} + +// ModifierExists returns true when the specified modifier exists. +func ModifierExists(name string, fn func(json, arg string) string) bool { + _, ok := modifiers[name] + return ok +} + +// cleanWS remove any non-whitespace from string +func cleanWS(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case ' ', '\t', '\n', '\r': + continue + default: + var s2 []byte + for i := 0; i < len(s); i++ { + switch s[i] { + case ' ', '\t', '\n', '\r': + s2 = append(s2, s[i]) + } + } + return string(s2) + } + } + return s +} + +// @pretty modifier makes the json look nice. +func modPretty(json, arg string) string { + if len(arg) > 0 { + opts := *pretty.DefaultOptions + Parse(arg).ForEach(func(key, value Result) bool { + switch key.String() { + case "sortKeys": + opts.SortKeys = value.Bool() + case "indent": + opts.Indent = cleanWS(value.String()) + case "prefix": + opts.Prefix = cleanWS(value.String()) + case "width": + opts.Width = int(value.Int()) + } + return true + }) + return bytesString(pretty.PrettyOptions(stringBytes(json), &opts)) + } + return bytesString(pretty.Pretty(stringBytes(json))) +} + +// @this returns the current element. Can be used to retrieve the root element. +func modThis(json, arg string) string { + return json +} + +// @ugly modifier removes all whitespace. +func modUgly(json, arg string) string { + return bytesString(pretty.Ugly(stringBytes(json))) +} + +// @reverse reverses array elements or root object members. +func modReverse(json, arg string) string { + res := Parse(json) + if res.IsArray() { + var values []Result + res.ForEach(func(_, value Result) bool { + values = append(values, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '[') + for i, j := len(values)-1, 0; i >= 0; i, j = i-1, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, values[i].Raw...) + } + out = append(out, ']') + return bytesString(out) + } + if res.IsObject() { + var keyValues []Result + res.ForEach(func(key, value Result) bool { + keyValues = append(keyValues, key, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '{') + for i, j := len(keyValues)-2, 0; i >= 0; i, j = i-2, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, keyValues[i+0].Raw...) + out = append(out, ':') + out = append(out, keyValues[i+1].Raw...) + } + out = append(out, '}') + return bytesString(out) + } + return json +} + +// @flatten an array with child arrays. +// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,[6,7]] +// The {"deep":true} arg can be provide for deep flattening. +// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,6,7] +// The original json is returned when the json is not an array. +func modFlatten(json, arg string) string { + res := Parse(json) + if !res.IsArray() { + return json + } + var deep bool + if arg != "" { + Parse(arg).ForEach(func(key, value Result) bool { + if key.String() == "deep" { + deep = value.Bool() + } + return true + }) + } + var out []byte + out = append(out, '[') + var idx int + res.ForEach(func(_, value Result) bool { + var raw string + if value.IsArray() { + if deep { + raw = unwrap(modFlatten(value.Raw, arg)) + } else { + raw = unwrap(value.Raw) + } + } else { + raw = value.Raw + } + raw = strings.TrimSpace(raw) + if len(raw) > 0 { + if idx > 0 { + out = append(out, ',') + } + out = append(out, raw...) + idx++ + } + return true + }) + out = append(out, ']') + return bytesString(out) +} + +// @keys extracts the keys from an object. +// {"first":"Tom","last":"Smith"} -> ["first","last"] +func modKeys(json, arg string) string { + v := Parse(json) + if !v.Exists() { + return "[]" + } + obj := v.IsObject() + var out strings.Builder + out.WriteByte('[') + var i int + v.ForEach(func(key, _ Result) bool { + if i > 0 { + out.WriteByte(',') + } + if obj { + out.WriteString(key.Raw) + } else { + out.WriteString("null") + } + i++ + return true + }) + out.WriteByte(']') + return out.String() +} + +// @values extracts the values from an object. +// {"first":"Tom","last":"Smith"} -> ["Tom","Smith"] +func modValues(json, arg string) string { + v := Parse(json) + if !v.Exists() { + return "[]" + } + if v.IsArray() { + return json + } + var out strings.Builder + out.WriteByte('[') + var i int + v.ForEach(func(_, value Result) bool { + if i > 0 { + out.WriteByte(',') + } + out.WriteString(value.Raw) + i++ + return true + }) + out.WriteByte(']') + return out.String() +} + +// @join multiple objects into a single object. +// [{"first":"Tom"},{"last":"Smith"}] -> {"first","Tom","last":"Smith"} +// The arg can be "true" to specify that duplicate keys should be preserved. +// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":37,"age":41} +// Without preserved keys: +// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":41} +// The original json is returned when the json is not an object. +func modJoin(json, arg string) string { + res := Parse(json) + if !res.IsArray() { + return json + } + var preserve bool + if arg != "" { + Parse(arg).ForEach(func(key, value Result) bool { + if key.String() == "preserve" { + preserve = value.Bool() + } + return true + }) + } + var out []byte + out = append(out, '{') + if preserve { + // Preserve duplicate keys. + var idx int + res.ForEach(func(_, value Result) bool { + if !value.IsObject() { + return true + } + if idx > 0 { + out = append(out, ',') + } + out = append(out, unwrap(value.Raw)...) + idx++ + return true + }) + } else { + // Deduplicate keys and generate an object with stable ordering. + var keys []Result + kvals := make(map[string]Result) + res.ForEach(func(_, value Result) bool { + if !value.IsObject() { + return true + } + value.ForEach(func(key, value Result) bool { + k := key.String() + if _, ok := kvals[k]; !ok { + keys = append(keys, key) + } + kvals[k] = value + return true + }) + return true + }) + for i := 0; i < len(keys); i++ { + if i > 0 { + out = append(out, ',') + } + out = append(out, keys[i].Raw...) + out = append(out, ':') + out = append(out, kvals[keys[i].String()].Raw...) + } + } + out = append(out, '}') + return bytesString(out) +} + +// @valid ensures that the json is valid before moving on. An empty string is +// returned when the json is not valid, otherwise it returns the original json. +func modValid(json, arg string) string { + if !Valid(json) { + return "" + } + return json +} + +// stringHeader instead of reflect.StringHeader +type stringHeader struct { + data unsafe.Pointer + len int +} + +// sliceHeader instead of reflect.SliceHeader +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +// getBytes casts the input json bytes to a string and safely returns the +// results as uniquely allocated data. This operation is intended to minimize +// copies and allocations for the large json string->[]byte. +func getBytes(json []byte, path string) Result { + var result Result + if json != nil { + // unsafe cast to string + result = Get(*(*string)(unsafe.Pointer(&json)), path) + // safely get the string headers + rawhi := *(*stringHeader)(unsafe.Pointer(&result.Raw)) + strhi := *(*stringHeader)(unsafe.Pointer(&result.Str)) + // create byte slice headers + rawh := sliceHeader{data: rawhi.data, len: rawhi.len, cap: rawhi.len} + strh := sliceHeader{data: strhi.data, len: strhi.len, cap: rawhi.len} + if strh.data == nil { + // str is nil + if rawh.data == nil { + // raw is nil + result.Raw = "" + } else { + // raw has data, safely copy the slice header to a string + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + } + result.Str = "" + } else if rawh.data == nil { + // raw is nil + result.Raw = "" + // str has data, safely copy the slice header to a string + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } else if uintptr(strh.data) >= uintptr(rawh.data) && + uintptr(strh.data)+uintptr(strh.len) <= + uintptr(rawh.data)+uintptr(rawh.len) { + // Str is a substring of Raw. + start := uintptr(strh.data) - uintptr(rawh.data) + // safely copy the raw slice header + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + // substring the raw + result.Str = result.Raw[start : start+uintptr(strh.len)] + } else { + // safely copy both the raw and str slice headers to strings + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } + } + return result +} + +// fillIndex finds the position of Raw data and assigns it to the Index field +// of the resulting value. If the position cannot be found then Index zero is +// used instead. +func fillIndex(json string, c *parseContext) { + if len(c.value.Raw) > 0 && !c.calcd { + jhdr := *(*stringHeader)(unsafe.Pointer(&json)) + rhdr := *(*stringHeader)(unsafe.Pointer(&(c.value.Raw))) + c.value.Index = int(uintptr(rhdr.data) - uintptr(jhdr.data)) + if c.value.Index < 0 || c.value.Index >= len(json) { + c.value.Index = 0 + } + } +} + +func stringBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + data: (*stringHeader)(unsafe.Pointer(&s)).data, + len: len(s), + cap: len(s), + })) +} + +func bytesString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/src/otel-collector/vendor/github.com/tidwall/gjson/logo.png b/src/otel-collector/vendor/github.com/tidwall/gjson/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..17a8bbe9d651e5d79cbd59a3645a152443440ad6 GIT binary patch literal 15936 zcmdseRa9I})9xU_0znfrFi3EBcXtB8-Q8tyw?MEE+&u)>;O-g-!QF!n?wrm0ecyln zi+`Pqb92@@b204MboZ|AuIj4isoIgsic)AO1SlX72u(&>{38ei4_w2jBEbT8g#0}> zfj{`J5}K~64(6^NM$Tp+5mN_aGq8-Ek%ieuGb2+^ry(I^6HUo1oax$2(u&{u+covw&X$WR|Y3j=W4v9v&Wy9&C&b&K688JUl#1%&bhTtPDU41{Y6zS0f06 zy$kt&Mi4i1F>$tXbhUD@2fvJHWbEMPDnJHE`mZV2IVvdp*TD8J|6V9y$(SHUj!Z0! z%uIH6FZ24RwTtUVv;Qr||Jd3^&C}70>7$v8gPXGnupj2+|LF{@-T(PPFAV`{c$J*3 zfK4&76?ZUkvoo`Il@S*p1OCHkYGukR$|buuylm3H z<}7aJY~^ldD(UQC2mWW3d9D5jDR|g;*tw0_nHkJkjZ7HWS$Vh_jJR2u8Q6HtIgQO& zSd4j$Sjqm~-}Jw&-oLaIxC6|@@jn9bnzJ+W7@M$}GZ-0hn=-JlaPctk7_+f5@NjXm zahsTP8Jln!kud=xGQB9ye^aFY+yb=o6ILj1BiHWiOsHjwSN(6-S#&IQQX(?!~oRWla6k zvj3m#lk>9cCv!wc6L6R*c~`T5NT-O!e%&k`LRLWCk5V)xyH@qD}#ba7*ern zD^KwwL26x@_H9n^_xJEwj%`LF z2_M&3l{$q7;~URr@StEw0z3u!#j3ZdE5%$S1w0iM^K2@_(3KW1T0xFBPH3#1nB2_f zvBMJo!z+(+E_6G$0(R-JL<$OXHd^e=iug&_x%->yfVBZnFV3sIHbvZbAFwXaW7AoKsJ9QG z>$+mQV_~P4&z8jBcSvrf!^$eZC-BSslJQJOm4Sir>t!xQeA0}meB*bk78Ll6JYotw zFN_tm1+Ok3dz)A2wN_Xinfj#Ee?*6SsMzd_g;n4n@*KNU9c5|uUDdVb5;09jU=BA8 z7Ip^CUs>Q(aGI&ybtyH$7K|L2U(LdFslcD0RJYaHH%xx{O^TZmsdabO$yfZSTX|)r z=-oR$toP)*A6N)!Ci&fYk#B|NMo}VC;kw_(;DJ=HK8MnXxCRjx zm+Ele5%M~`%RXZFhRaja zofW}k`izaw_BnN>(4NODngeC2ev#$a0Za@}FunO>tS9?CYq6*J5?4v-!SCmNEg}($ zsiIfoly7`UXRh?F`!)YcJa>S4yLz&L#;kL&6#~9Ni+?|0z}yLl+KUVh*{UN zQL82>H7J0_BwzsZbE2-owg@HT^}ncXoZ;dh4@Ag(^&lQPK)^E=LOJs3m1N!Uj9F54V7Fy*9;Hf!S| z2Vtfjj8{AZ6|UJ&-*wZR;=h8&K-WF?$U44F^rNysF*k#YfwM3ww(UIiz!K$Vl6g^; zpZSmDI41>YtUMi>*8?muaBUxB;C6#-g+)6l$2v@q$uZDbJ6wES8#l*s2D<1?VzXJ$ zNn3AE*NNnAtmKenlM+7=mM9>ZV6zb+`lI$2@hpIeP1DdcS*Cvz5A~9(XQ5ee8Zy?1 zV$H!Cd=InD(OPcd;^t`I|2d8dNC%ws6z&4#gegDv>rH+oz!8Nz>NP}eD-R;bVvA0S z5fJb?Ou@|fK(P*e**ICmfISbcs}Y$fZuREW@ZFBDNYmhXW7PA6V7+}jLHzU1y=p!n z^hyRvQ|hIqYYoin6oO1NuT}m&C#3Y93YZnNA2Tz$8cr96%FkEFIxLhO1c8xa?YS>1 zYfbRvnrv%W@wwZlMg$41zv!F1Uthy~PJ0n;XM;%WG#G1Z(D~^_heW34*YaC}4fwaY zQ_|5S2@Q6+L&grf$wpF2KXm2n`%skl-*5HsEQC3gz~7nJ8i!$efQc!-p`}FGdT|bp zjc+K291ok>nAU4-{|as^#|^q`l>3ommlA=!Yk*~f4lIlN?BhIKO<)tnThs?ySx%(Q zZ{BqMNZOg7QO=}w219Yn;z5ayrjclN12jx{->DqdA?>)@B*M55Z6*>9uOVG^g4+gB$$ z&!XEFAyF0vK@Hi45CHqg@K*IEb;v3e@Qc{QP%M+qSM^o+6flzV`6>&uh)6U4UOl>Y zUko)LSj?`l;=xRs21!m{!rvOi>)at<`A6r#TDg{HRsnKvk6eoC&S-x#Mq{7vf>j;i zpoU-IgJbwqGu*^D+T5dUG0a_I-j;HEIj*%e?#p&>hhO%ho;0*k&ga~7^2w21Ks!^N zJ88HQ;e-ECm0XE^;oQz-+JymauGTlg2o=jQD#D!&eneT>3#Y{`@=}~!gct3epErdK zU`nI?9GgnOgA%X>7A8>kKcDjv8zn1;!d*!BC&n1*8?K}4xYS*8J-uGJt;RUTVXir%{}C0=q5B02D<=xOiYxcn?4l1}Em4lV07IMeqs8t@G*e)leU z^LZ&bK;v9xiiAiYvcgAIY^ z{hsN<54t}2vf2`*A~p<)503a+JOop=cS87$R%0Y9b}n%u{HMx2QvUVcnksc$0lfh| zO+0575+Z)~?&@C8M99dsa?f$2%sd?hq|Ds&;G=9fK537ECE{4uO>1(q_^&|Vlh_Gr>&p4j#CTe1bR2DJ_0}1+G9&6IH~WZ{A=YBXE4q6-nx9bn^kCGD zKq`a|i1oH?5f(Dp2PUamK}6jL3(BY6_{%!xtDA#drfkJ=#M6{`HRjVF)bpUQq&Ef6 zyRCSNEr>MF%a_m1u9p-Py~pK8D7c5rSo^`&g9y&-@)_~;#h*R~7VEJ@5*>Y+Ig5{* z{KaPMCB3%XAcWYoc^46OK;ZSTx9HGO-}GxhBWm@2u4qc+2r6b_aBFo$7yfX_H3CEQ zaAx(~OTHSE^w9jPe|Db0^FFGfRAv|V-o14(YFFLKu2^KV)OYV$MilR}0%8L8mXrX# z?aXNsv{y&t%MTik%-A};(yQA&Eexy&Tel+t@?5=F>qE*mRdi^0cko1yYtB`K{ocK2 ze?^g1EeKeXFLrFjvhUiy$_GM26OFkX+qLLsXfuf_ zAiE6m$DGN>RDMk}<35n$|sHc(gYzBw5(l8USpk56G_tt5v zT2RPk>)0{R5nwf2>%09ZBb^t1O4UtbrXz%J=bQ7Aj>{^+T`g!YD&6-HAA&h~?oH{F z-*(;OvA=)%wE6M!TY|;d-~Kce;ebbI&U$_a!QI&6cp@;QC-Ynh;FOeB9wl}vAGWi_ zI6Eb)y%&;;oN5BIUcqEK(H~zuZ*l#_kGGu+l9nrvvTET;x{?m&Pz z)6(6l?`Nf+N{at87srvs@R$3YNC|1B5vn8BpnndfCUEtywUobq0}Eh3Y)W$a((OMt zeVTMUi_J2y7^X9|!b=N>O53xrkSYtG=HIjP+ixUv0p0}H|0JbRs^%hA!Jwk}>L&MM z80l=$8*p?p5nqzB2flUC+qS5co0cA{{~DgUCuuOfxzg5oVle|X6R|&z9HyjTBz7=W z6(hQ3!ou1{BT@o3I#0`zwh{7Z9D2aZ8IS0|LDbPB~{a9 zxp|-)J|}zIePx$CI@kZ{&VL_`9mP8HDU)6(_8QG5JF(05;fr7V+K5bA1Etfxgh#5W>6P@@ucg&Jzly%#@3VwSKV{@&j#^M( zTlz^^ZmfJ%6rHQ3%yEH?9(L!Qb(!Do<{dt5FB1GvxNV4LI{Pjr0R!MUrMb5A4pX>C zAxvpRy)idQ1E-@e{fUAX=xnu;V!H}&V4CM$%=yv*v=$M;MOYIcj@A-jG z?Rpip=$Wr@_3CPVC#tvr@>g2)Q4aUQ5cATrcEjnW2##{myh=>MLI$B&y6jSWY!swkxM01Od*SEg zL*Mh~ni#4$)U3y+i&vQhK|9qf!@Ie%77J-pfjx;IqN%|*VX(lO%{8>t0l{p^gB)j@ zJH57Z00N>gy2Swd*!!iFW0?|!a>O(rOZL2g&I>ppw~Od1Z5(JPKd=Q&^ICR7OvUvu z?=;QVzQ(dwYT=aA*Bg?3&P}yAThCa2|K}GvR-m-L*_!MseF=o;M zOc|!hnA_@||7VYZk>zZ=UNBMA*Vtu^yo#aiaXg2+Bh3A_ z*k(u*3VX`)Fpa0tfiD<{pqWjZHrb&h8UI8iP-1;S@Ctq-P$n^!{&NzMp(LvzzPbW5 z&{drM`nf{0E^A*AS;QZ4;qFHOENld#)$;q_vGmq_=O^r}ovg!Le`|uzGHczAnUBcG z`*F2&g&n(N8{#Biba+rq`+AT}7E##C*+mWAHcl~e8IF*BlD}ptBDDp7inFHgCu%CG zMQGj9<9umYwTdv9IK}7?I-Az9B4JJeXGNp&Y~9dg6EMByhM;=CJgzMOHPo|HyPwEF z>AJ~_8b(2SFF>9)iwy@AzPGmoydb?M{G9dKy6!c@N}i|Lt*7q!;Msy>FWlp16n&Oo zmA=5&{-W||a^yg79H=mpEMh=;B08fpy9ZhoVvt6ohvYk&9Dko*huy|_Vcj-#n-^%Y zs(?&;92eWq(+`)_??I^-a?v+g0KQKt*?l$Eba3dO7`iLIHz8VPfP?g+cWvw%=+?0$ zw8JC0o-q5rI(C{(+>M|Yd?#NqbO%~Z8MLGynQ3(_s2no9^BqZSUb>Vsq3+-&Z~pLd z_+I&t*zl@gEawwB70Gx!2LBP^FYe$`QL1{2+{*4>GM~87Lx9~C-tG~*?hRhhc=M|v zKz`3aEk0yz8Txxs?~Urdp+R0uG5?!T)2G;O-?IC#fm9y}er2kwt<)7;#|LG5_jh)$ z2Sc<@EHgtod#3K*7;KqZ%_)YTiJL+t8o!4XIkwUN@t`E$Yq7=AK*!U9H?3HzBPCK4 z1;{FwV^;P{uD18>%3tX3j%6R_MIMv&@f1V4k%w(DWcZccnI;9GVC^$jwAX_dysW1j zM`z~{y%DsP60N1h*GBx4LRqvFn|B3AG`4>4XHx@(`6tg$H8rQaVQ_b9%`q`{{4}r2 z=OP;`SSJVDA#p82i#aqb_VL+eJ|9?gaG4go0A~hZ1+MA=FDhbXUeMCt&#CDcTTP6} zxtZStJ{EiC|G-Fk6MeJpZ%|$9Rn@xfF(WyQBgyfy#vHNweKWz*P#EpEYl?H6KHpMe zvf?*tGh4Og7>Gv?^om*Qpu)Eyho@?PDF~@Ea^wN^&DqhE;aD3KU>Xh(01Gsi_yYOZ*mM@2DpdkOw<+vQpws53fo{ z&wajB`u(vzs_9!4?h&i8?CuR2u+G84+26IV*Kn`^r>fu|-veN(1Zn=_`3YN(XcZ|e1tk=THcWJbl zsYeS(c=;b95uXNv-?n*7v=S{~uMPFRje+fM2G~eIo<0k{7KhLSksr739qkp}2P;MF z#RHd6+;5fK#zzu#)US{5c4HuGKPuG*;1FXZM{@sop4IH+(rfX}ZIm^whc{X<>-WUx=Qlw6^^~L>&&#_wbjA%NM-BO- ziWfJ(=D?Q^MhL3 zhf&VlDz1)-0Zchw2k$O-icKDM^}Uif#Kk9m;R`tpuwOp5cG?~QR}6ZxbSN_q>1prP zakr4;NOzZ;Y^{A;uGpF=CV3`sPEq~4a=}w;)_$M_5ewbN6EZm-FnM6t)+yo$1Xzo8 zIpW+8HNz7 zlkMVkuwUHCF6C}Aw-E@>xDL3Tfbn_4oq*Rv>wJw&{jvDbvdHt}yMJ0Gb!csez+k{> zteQ9MH2^!E*)-B03O|-kOhidM>4!7jddvni6e+3Rhgmz{jFUl94}&j}0ME)Fp%mZ0 zXlE;DA$P!z`Ey7={LOdHkNp(zqOd+a5rWUY-&`B1w6#jF;9ooo#D?cdNf9r=4ZwBl zOS1FBam;Twsy90oaUIT{CrE+w=xr2`$no9-7C=scPgFEzAOBOZ<*$GeQ77bZwMSg@@7fXHj)VSZO3m{PYxnm7QPW=ydOs z{#chl`}vB`$gV*CX!a!p0D-WAVE#D;v=)%6LUt~fUM3C-2r&%X-h~3n92m=ymRYwXeipL(xBw46A4m}b zz`ipJfSi^h{{;9`Am@=3^@?4(vH)Plzr`-h3~f>NWL@{d&CLaZiG|xLcAC$!-`noB z4&_SEZ1pq&)UusTnZ1RoHFypZcN}#Eq#vv|MOIILF6sZa{{;$X363&PR_E3MC1@C^ zx;^*2YnYQiXAqu1qRWO2EB%sab?nau7YG4niID=(CE2JAH3TMJ{gmd4t!{42oMXA5 z=pHOdBnf^{06X{BZ;N29axO0saaETG~y-q@bv})p+406EHxk$8IMo93knK5R6iV?^+?4JEa=@BB{Wr;7$09%)WWpwdSJpc&gy?Xb#y=!13d4 znM6rJyf+X%lZo?o%SI9DF_A11@m+d`!3G!<29W9k_{MPczg;h)nTL@kmCpN_655)+ zV4#=PKa9~L!Z2&Ah&=5fCU1*LOJ_x$W27L$LOfiib%hP1*Z~zUp#+jQq7DbI-U%rP zk^T9Tl?*Eoupe)4-y&KT~+O##fDEImn4y>NutF;IVJ3=m*! z*rEW%TlVNiM?a>>U$u_$JuA>L6CZFXb!s@yP&g!R3gv~Bp}AgpK2=m0jV8VvIZv2C zmd^lRyVaiKaS$v6oYCt@$k#%)MsE3;z}yQ2L<8`{hpSucn4z6B`)k9mk!I(}zCa8Y zec8sli-a|~e-I8yodRg%#-Fe zUUq-YRYyjqA8u02I*uj{d;W>=)zk|nH^OPBH+T%!|4ND7zpZ*;!cs{XDZAc=)TpYy zygF?tJ_g{6^&8==X_&x=`hHuSa{O~;8JsY}g+Ry)97Lmf`m;+MhVx9uQw2LJB)YCN z@x%=aA>jGW@-K6kNsyeSxy?!*B;mpG3B8IhiJ`aL(iYA4>ejWW`Ba}KuFQ=qb|9UD zd1@S)Q6&PF^2(HexS@I93W4L)kM;M0t02}wjNHH_W_t5)TVGK6pzr>0Q^W#T7H~ar z!`O{mW@83Sl9!-P1c_I-0Gd=n(Al2Ah)MotJ8AbzM;HOuxA3PI2G5Xa4-KFnd(R<& zl~(O&-Jsc|!_LVQcPi|XD}`wzR1Xn(cG2(D_N3D~k*V(8k4i+i9K+ulxOuCy!l zG3U%zV9j+$hqsRH4K8qiX)Y%5kpNKe-@i7c^ly-X9EHpqEaj*3o>oZwj-~iM`20S0 zT+X3*f&BF+O!NkR{xf~GM<9^RlYs$K*d}c0boMm+LsFzfZDJ%7->?En1@nR69#5W9 zha7r+_<*>ozO^~mkB|Jz*z!F@=x~3DwK8pu~`vT=$gd#s-trjR06bA&FSRn zXosx!+Ar!=CCEZdm*mgGVygGr=a;bPp3`1$ojoLd5*z)h`E`i{b~Tk6I_M{*hrhdq z%km?-j{K^V=N^-HEYrs)!oparg}x|u^fEHypM^&WQ?T7d$N;k6=t`n8b5A;R7+fd< zweEqw_&I7h2Q`^|Nhl+xehysMZI!+Zv%IfOO*?!yw98BGB}51~t1J8V4~T$`Y~Rpg zC&EB-_kDWzSVdXeG7o$eVQN}aM@*<;RL@^rCgVF8kWxLDx|}q#y}Sgg z??KODbbqBH3$u%uWAZp`x1p_9+vh$RDq>q8DArehLiv#? zS7p(k_klZ2h^UNb09!*yqk7YKmjb|*xuZ{kcYnK_eZDQDgCKrv@~G^rCEkhh1KQ8t zT~&5d1+9jn{HLO`4hJ}XbT4W*8iQ!OH&R^30MdVf>goXR<52$G7-HwbKISxdG2?gX zUWamaU{hM>`{N6CBF`m|UO7f;#%00^HUQjv90sS3{=D}p=qL&|w9G5VQ=hdE?qpB* z8CVyn;Vhr1QP!CQcS~LOSrN!b4hgV%PU9lhY4qrPo;TID@g4v|;8auwD2G~SY|8TM zRd4Zg*tNyl*2c@A_Z{0+cYF_Pnwm+58d;%$qhs2q2XkwXMBxG|t)dcF{495+0LXAt z%=xkC)NYk>2z6<8b(io?P7{c7z!dMrujH=okhn5qE$`KV2r?QH33y;KVw{~_a|@d1 zgc0Z`b1>*sRl_Co)qBcExZwdxbAVy&napFK=rMOLMPFe2WYbbO%19sPa*w$o;yi)Q z){i?j*$ujpB;L{(m!Q9Sdnfg#tTM-#YfR{CBw}t`;qUq-!x&bmVfcN}-Po_I4}X}_ z^t5y1VNMmk9z)``Ic6iQsFY7DwU8AJM3TN>l5#qSx#b5zyk}gA%k0LtM)YSep6s(K^!|HPUZnNmZx2o z56)-Om(G2o{0FL>zz=R>3h!{P^*+b!qaH#4@l(IQkuiEnE!ScPnFiaVAO&1gyai|l zAdJrT+kK3Se%<(USIy}n=jk}2Ht2IX#e*edEZ~-uUSJSqWC~k*1T5@YJrKyPnw|X< z+W3-Y{WKrQdHr~R3#5&-TZ3zM;w6TqCbhn|Rj2*Z?S*6e-R?XXaRSs|oId=TVXLu{ zj|URNRPe7+kR)4)8|nkVLsFM7<=Vmf~HeV0cp1|XXS5L@wjNj5yKie+KA z%09g+z^n`}M7P_vg>2)FYXMz=YLgchlQ~%ARY;7G(ys;3HB-ed^wLOTkupT@Nfk&o zqI^Bl$76XEwk1?h=qO6mkigTz!Fhq&Y~rX|9n!A;SBB16W?nFjd=|4(?*6`uT>Om4 zq!ivu+WiqM6jPn_bYNMC+&k!sajqnXW6Bh6mHZwrOTOLYAspD>VRI&w0>>Ym%QSk7jV!gm~4K#@3Ekv@C& z1ZW^9I}1pIxbnRqUm@~9k$hFsgO$SclmV)dsY(@axX-x;K@08<7z*M;!)D+)GnB-h_pX#E4&BdOm}hnZK05O34%JkmnSPvltCp%pfh1YkKaB; z3r{eK1y}F0zteZ!VbKxN(mGE+RBO2}il&HvJsmW5)9f&i7~9pNsoJ;9UuU{fdVZPdaTpl3xn;a*6FGldFooO)ID5!f~o{A(@Q|Z#mgd97e)$p~ZNmCyO%SVP)0qcao?}@xnBznLG_-jRz&F1t?k^&(O-9@Xe66ob6E_pADB_QBgqLn-gy;LfXQBSJbXm4Qa z)@0fUbjL-DMg$ghS|B?CBkpf7|ESPv)cUf`>w<=*s{H62A8V`IiK$3LELwT3jzOb8 zl!S`+Tx&GX2;@<}_wVf9BLV{P^(7He508+NcJ@gXb$|bH_KiMExuqD$AfHUdt4cDv znhtuostMM!A9W3zyP;)%a8-~&aywa>^w*%F9kkeh*?k3sl)5}_ql5OAK9{{G;i=&{ zUtHfQwYq)Mv2b#<<*l>m4R5ck-D}D4z}ASJ5n|#5oacfBD7c8Ej7x(YE2s{iT$}G8 zwe%kv{OB)T?3TF&{@pgAVDqA%#pj(8sb{K9MGKWIs8|2FC9!(el>JYu3twNt=QY>E zEfe)z$ve+s#4HImOHv>x>;K~XCmW++46hoJ3_n6i`N8IeHpqE;laOh)fhBpsdn`Zm z=#=<=nmx{M{f?01b_7r#Sw*A=7N%8-I`SyqirAZn#cfO6?4!h|ub2b6?Zr>?Gx+w7 z<{S4WRHS>hqD7{`zLA&m^E6s)FM4@tcOl;dBnnQWimCjB_^)p8k672RtIbfAuFEw6 zgYuq`4j>h|oy0tO_clJuOF6%_;JJXGg4^VC^xm%7&7Hy10VSjE=3!N?H7J&iqdP}I zNOMAsr-?az+$AOgF>TROKSqm>+JU2`&iM{Ko72ZNnN>6ZNGPvfr7rwp!%~yBJSI?n zB3R6?SX`*e^MKP=??Xlg7}vTlQ%RIYvO<*`dtIXjAi@O&*XGS5Uh^+>(R3{UOg9!# zeFU6}>N+X$p5K_^4*d4ma(tvz--++&_^^mulRJJ-8!^^{Pd@#l&?DVnNZmyNE?sJo z1v4i@CO?oX!Dg1Lqv3TWiOw2-FS)k+Ljd}Ilpuew^DX&cs7!n~Bu5*_ zbNkTWkOG>zvpx~^d~foOLY<`#J9-4ss8Adupv(u66p=MohfRAYxhNA>HSU|gUFPg!w!n|-*n&2dvMVX3i(q* z9avlqEO5CvT+~(A1xN=y58K~En@L`vMnKT5ufo5MXk%ca7vW2=31I*Km~>+KC%76EmzQ{Epl*zpBy66^=k| zgxepQc7MxTEKkj~;bY@6Gd!n`K!v>&``bhrGUsu ziUsg|6^14}8h@IKz!**2yt0@~gdZ8TQ6A456}yw%F8pOXchJfG;IsFRiK@Z=(_=hNf1EVaQxVt%sqi_6s+|aA2Hs^Z1^jkF} z)1c0f3(n&QqBF4M>5l>N+2Cviy}C>Nvp)=5PwiM9D`yM8h&eBJ^iH_7m4)X9Q%5MW z!_c~><|XVWOt+Ve`j5xtKb#eCe342+D}=)aV%}@?*l_nK;*CILu4J}tqmJH`|Q{N79k>Ago$ zw+kbnqFbA;2Yd~D;IrHI?7OkQTl}|AhyXHl!kF?*%64_xllGp7XvzmFLZx)x^DQ#( z8yw9_Kg)1_V%`v*<7w+#aJ1^1V0 zi}6B_vKX`{IOQ9xZOX1JvESZ!V(|c{SDSNfEs(d|CL$lQ?eu!R|NTpI?eS+Kf7vNl z%!IbKTC}f)@d@}t?+klTHUjTrMxbJXOeb(+XezwY*G|WOOma4Pdp;kk0Fl~k&%h$# zkNqIcqQ*;D3DT7}0g$zkqn+g9IR#X2V^fthIf z6xut?2mox2Q$mF~UZ_v*={bqjQ*QF8smcNS^TPHE8vGd1^gz*sl^2S)?%;8oMxCX* z0SKz`wGxRnWD$dRPEJ3QEZIK37gU^>f=z&GmJiDQqp92G7HAK&!?*nn%c^fgy_Rc6 z2i%y;23Y}W_x9$Q$t)tiX=};p&i0?ga{!QL{KM0y9!)RXqLg$ix6$n3zDrBF#amXMJ8=viyxAutbQYXsIV&Y%NuDPMK&4 z49)cj0zMhnla%IgMc=|Vc20Hv6p#_`b$l?h_yQC$DIz&u_>1yOOYIZ}$7ErKy`9Iw zO&^zH_d%1a{z{drpduoOO9>4K0hs_Wqej8OE3;~Jz;l|XDy($&>P*2><8H_*uLF

Bg_)L$48&o}ack zt!HAQladuqH6nxcdZjxF6vTK*7%AV0IOX}JZ^u_OkMumMel#gIzv1VJUb|?$j71?Q zE%B-a&vFk0&Z20HLn$L5hH9dQ4Ef5bKHOX_RfA5_`gJvR%bW4}0DCGg0Va&YC>upU z1&8GC>xLY}LkxejF;yq2sOY~5Cqo4UPpDVfIk~O=P(Gb%k`up^XsPRv9)fXL9y}y5 z_nYE{jUJUly7pJ?$r_eiajL0B@N?t6efF1pS9lk2UGDMWIj>@eS3OH*kme)>qMs4c zXm_q!8hu`I8Dhv5J1?UvlYLJ*X6D(~=y^sb&mT@}>JxIp-$sTvzSdP`p2 zJSTpI%GX_eG4+h<`8LXOC{XRWB^Zd0<_&>b2eoF`!(!~j1?f~Xrey`D;63!OSo@1j zhVkNITfkbp3}EkdgcmMq1ZjCrPrG8@X$!y3nZFDG<8?k82gdmVJ<<=koN(R0uQJ&q&;Hm9we)+ z>EiW_<2L!S=W{bXQR{t-Xk&^=i3!xJ1GH*$ft^*`kFY2U5$7}?R(xpv1OE!wXhM-U z{|-m&g!{Wci5J~AR7xhAcz2%T1X}X;Lm5kqn$n>CdZG%!jlgyEgu}8cSE=Tna>KaW zE{i{1$MRasX61aWzh$Kz+gg_XXfmLI)gJjN8%-etjvKrZa0TIVu@FNzi_DBbgG}?X zc%`<^VRHLdaERnl?k6tkmy~V-6wDDz--2n{wg8GA9pEX|*>WDA)uFS%@&MN%-fruuv;9cz5hkb^M5ny{e|a>hsQrwX-D^ zNBO*f*^1b~?2NHQ2g~@Hiq$_V;Z8|Q@sz(OJ-_;@xOeF3^57a-&;bjXTR0^(9&O1`Qs5mDXKaZ{gYh~^a4>N0el@Q8ji@;G0VwtnJC=gh zkvhS5%hY0~-FeS=8*`P?hYjBB(TJ(h3vvLJ;q{z?7Jf~hQZOlh z8(M!wZ6}#fur(A{mkQvWp^(z{kJ9Qcf&hRH6HK!c5b1?W28wck^aQ{Ju^0{PD&(m< zLbu{`I17`!l>tz)uDi~j;Q*7#M!X;*rP#E#K`0MHVO=C_Y>Kpq3I%tz)KS21z(70w zu+S+efLBlwQ<3knLW3-aM&dW%uuv1r?kQqDWsn|wHc35K!jBl%t~9oLn(8w!(iF2Y zj)Ca3zC|-dT69O*c44Drzb zTWRcGB4pjuzq;tNLKJ7H7&3CTH`^5&fs^&Csh+4TdNT~n>o18P123FK1<6Ap7>9OE zH?~wjjt8TicxY-E;Ld@{^P%j9BpeC^HJlw8iW2I<*Vc#1VH5Y&k@$T23>riN5a^fE z1xtK3We+L~pTm`C@mKj&g@flbKtRjuc?i2+3nW^d516YT2Qx*|&}0%IIZ{BK)n@-A z7W$f>T8g%2qR?15Ezb7#Al1jSng=Um;<;^4CZ)UF4*B#s16}WxXDR&e;IFDemyJBP zQD>pCTMx2W)Iwg$ckc4ElkYeRp?hVt(&_O^6u-=WO4y!T>T70vx)M)1f17nmsqw{$ zZhYP=C<2j}m^Lg-ex&8RKf~0WrfrL!~NLQ^Xk?)No>Z1xiNn~MBa)Y#@)-369lm6 zDH#U3Pv&kw{mt3+_t3uW8=*{`^%?Rl0~ff@XZrjZDZCjasI5PtLP6)zBrRO$pYlfV z4DCdS&(8)uR*23AuP?jhTi@#6yL=G*b~7!5d3?2d_2DzzR3V+SVCZe@ULo|*Qz`CV z*j}+=>D|^uy$<{s0tgCLYba;t*4jWsj}CfRI?T4cS90us!fy8Lz;(qp7V}H&`+loKEpPZr6wt`2EqWMWj z6-EjjNWU@D@IQdNmlvT5>qUq{`9m maxcomp*len(str). +func MatchLimit(str, pattern string, maxcomp int) (matched, stopped bool) { + if pattern == "*" { + return true, false + } + counter := 0 + r := match(str, pattern, len(str), &counter, maxcomp) + if r == rStop { + return false, true + } + return r == rMatch, false +} + +type result int + +const ( + rNoMatch result = iota + rMatch + rStop +) + +func match(str, pat string, slen int, counter *int, maxcomp int) result { + // check complexity limit + if maxcomp > -1 { + if *counter > slen*maxcomp { + return rStop + } + *counter++ + } + + for len(pat) > 0 { + var wild bool + pc, ps := rune(pat[0]), 1 + if pc > 0x7f { + pc, ps = utf8.DecodeRuneInString(pat) + } + var sc rune + var ss int + if len(str) > 0 { + sc, ss = rune(str[0]), 1 + if sc > 0x7f { + sc, ss = utf8.DecodeRuneInString(str) + } + } + switch pc { + case '?': + if ss == 0 { + return rNoMatch + } + case '*': + // Ignore repeating stars. + for len(pat) > 1 && pat[1] == '*' { + pat = pat[1:] + } + + // If this star is the last character then it must be a match. + if len(pat) == 1 { + return rMatch + } + + // Match and trim any non-wildcard suffix characters. + var ok bool + str, pat, ok = matchTrimSuffix(str, pat) + if !ok { + return rNoMatch + } + + // Check for single star again. + if len(pat) == 1 { + return rMatch + } + + // Perform recursive wildcard search. + r := match(str, pat[1:], slen, counter, maxcomp) + if r != rNoMatch { + return r + } + if len(str) == 0 { + return rNoMatch + } + wild = true + default: + if ss == 0 { + return rNoMatch + } + if pc == '\\' { + pat = pat[ps:] + pc, ps = utf8.DecodeRuneInString(pat) + if ps == 0 { + return rNoMatch + } + } + if sc != pc { + return rNoMatch + } + } + str = str[ss:] + if !wild { + pat = pat[ps:] + } + } + if len(str) == 0 { + return rMatch + } + return rNoMatch +} + +// matchTrimSuffix matches and trims any non-wildcard suffix characters. +// Returns the trimed string and pattern. +// +// This is called because the pattern contains extra data after the wildcard +// star. Here we compare any suffix characters in the pattern to the suffix of +// the target string. Basically a reverse match that stops when a wildcard +// character is reached. This is a little trickier than a forward match because +// we need to evaluate an escaped character in reverse. +// +// Any matched characters will be trimmed from both the target +// string and the pattern. +func matchTrimSuffix(str, pat string) (string, string, bool) { + // It's expected that the pattern has at least two bytes and the first byte + // is a wildcard star '*' + match := true + for len(str) > 0 && len(pat) > 1 { + pc, ps := utf8.DecodeLastRuneInString(pat) + var esc bool + for i := 0; ; i++ { + if pat[len(pat)-ps-i-1] != '\\' { + if i&1 == 1 { + esc = true + ps++ + } + break + } + } + if pc == '*' && !esc { + match = true + break + } + sc, ss := utf8.DecodeLastRuneInString(str) + if !((pc == '?' && !esc) || pc == sc) { + match = false + break + } + str = str[:len(str)-ss] + pat = pat[:len(pat)-ps] + } + return str, pat, match +} + +var maxRuneBytes = [...]byte{244, 143, 191, 191} + +// Allowable parses the pattern and determines the minimum and maximum allowable +// values that the pattern can represent. +// When the max cannot be determined, 'true' will be returned +// for infinite. +func Allowable(pattern string) (min, max string) { + if pattern == "" || pattern[0] == '*' { + return "", "" + } + + minb := make([]byte, 0, len(pattern)) + maxb := make([]byte, 0, len(pattern)) + var wild bool + for i := 0; i < len(pattern); i++ { + if pattern[i] == '*' { + wild = true + break + } + if pattern[i] == '?' { + minb = append(minb, 0) + maxb = append(maxb, maxRuneBytes[:]...) + } else { + minb = append(minb, pattern[i]) + maxb = append(maxb, pattern[i]) + } + } + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb) +} + +// IsPattern returns true if the string is a pattern. +func IsPattern(str string) bool { + for i := 0; i < len(str); i++ { + if str[i] == '*' || str[i] == '?' { + return true + } + } + return false +} diff --git a/src/otel-collector/vendor/github.com/tidwall/pretty/LICENSE b/src/otel-collector/vendor/github.com/tidwall/pretty/LICENSE new file mode 100644 index 00000000..993b83f2 --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/pretty/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/otel-collector/vendor/github.com/tidwall/pretty/README.md b/src/otel-collector/vendor/github.com/tidwall/pretty/README.md new file mode 100644 index 00000000..d3be5e54 --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/pretty/README.md @@ -0,0 +1,122 @@ +# Pretty + +[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/tidwall/pretty) + +Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads. + +Getting Started +=============== + +## Installing + +To start using Pretty, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/pretty +``` + +This will retrieve the library. + +## Pretty + +Using this example: + +```json +{"name": {"first":"Tom","last":"Anderson"}, "age":37, +"children": ["Sara","Alex","Jack"], +"fav.movie": "Deer Hunter", "friends": [ + {"first": "Janet", "last": "Murphy", "age": 44} + ]} +``` + +The following code: +```go +result = pretty.Pretty(example) +``` + +Will format the json to: + +```json +{ + "name": { + "first": "Tom", + "last": "Anderson" + }, + "age": 37, + "children": ["Sara", "Alex", "Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + { + "first": "Janet", + "last": "Murphy", + "age": 44 + } + ] +} +``` + +## Color + +Color will colorize the json for outputing to the screen. + +```json +result = pretty.Color(json, nil) +``` + +Will add color to the result for printing to the terminal. +The second param is used for a customizing the style, and passing nil will use the default `pretty.TerminalStyle`. + +## Ugly + +The following code: +```go +result = pretty.Ugly(example) +``` + +Will format the json to: + +```json +{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}``` +``` + +## Customized output + +There's a `PrettyOptions(json, opts)` function which allows for customizing the output with the following options: + +```go +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} +``` +## Performance + +Benchmarks of Pretty alongside the builtin `encoding/json` Indent/Compact methods. +``` +BenchmarkPretty-16 1000000 1034 ns/op 720 B/op 2 allocs/op +BenchmarkPrettySortKeys-16 586797 1983 ns/op 2848 B/op 14 allocs/op +BenchmarkUgly-16 4652365 254 ns/op 240 B/op 1 allocs/op +BenchmarkUglyInPlace-16 6481233 183 ns/op 0 B/op 0 allocs/op +BenchmarkJSONIndent-16 450654 2687 ns/op 1221 B/op 0 allocs/op +BenchmarkJSONCompact-16 685111 1699 ns/op 442 B/op 0 allocs/op +``` + +*These benchmarks were run on a MacBook Pro 2.4 GHz 8-Core Intel Core i9.* + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +Pretty source code is available under the MIT [License](/LICENSE). + diff --git a/src/otel-collector/vendor/github.com/tidwall/pretty/pretty.go b/src/otel-collector/vendor/github.com/tidwall/pretty/pretty.go new file mode 100644 index 00000000..f3f756aa --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/pretty/pretty.go @@ -0,0 +1,674 @@ +package pretty + +import ( + "bytes" + "encoding/json" + "sort" + "strconv" +) + +// Options is Pretty options +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} + +// DefaultOptions is the default options for pretty formats. +var DefaultOptions = &Options{Width: 80, Prefix: "", Indent: " ", SortKeys: false} + +// Pretty converts the input json into a more human readable format where each +// element is on it's own line with clear indentation. +func Pretty(json []byte) []byte { return PrettyOptions(json, nil) } + +// PrettyOptions is like Pretty but with customized options. +func PrettyOptions(json []byte, opts *Options) []byte { + if opts == nil { + opts = DefaultOptions + } + buf := make([]byte, 0, len(json)) + if len(opts.Prefix) != 0 { + buf = append(buf, opts.Prefix...) + } + buf, _, _, _ = appendPrettyAny(buf, json, 0, true, + opts.Width, opts.Prefix, opts.Indent, opts.SortKeys, + 0, 0, -1) + if len(buf) > 0 { + buf = append(buf, '\n') + } + return buf +} + +// Ugly removes insignificant space characters from the input json byte slice +// and returns the compacted result. +func Ugly(json []byte) []byte { + buf := make([]byte, 0, len(json)) + return ugly(buf, json) +} + +// UglyInPlace removes insignificant space characters from the input json +// byte slice and returns the compacted result. This method reuses the +// input json buffer to avoid allocations. Do not use the original bytes +// slice upon return. +func UglyInPlace(json []byte) []byte { return ugly(json, json) } + +func ugly(dst, src []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] > ' ' { + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } + } + } + return dst +} + +func isNaNOrInf(src []byte) bool { + return src[0] == 'i' || //Inf + src[0] == 'I' || // inf + src[0] == '+' || // +Inf + src[0] == 'N' || // Nan + (src[0] == 'n' && len(src) > 1 && src[1] != 'u') // nan +} + +func appendPrettyAny(buf, json []byte, i int, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == '"' { + return appendPrettyString(buf, json, i, nl) + } + + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' || isNaNOrInf(json[i:]) { + return appendPrettyNumber(buf, json, i, nl) + } + if json[i] == '{' { + return appendPrettyObject(buf, json, i, '{', '}', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + if json[i] == '[' { + return appendPrettyObject(buf, json, i, '[', ']', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + switch json[i] { + case 't': + return append(buf, 't', 'r', 'u', 'e'), i + 4, nl, true + case 'f': + return append(buf, 'f', 'a', 'l', 's', 'e'), i + 5, nl, true + case 'n': + return append(buf, 'n', 'u', 'l', 'l'), i + 4, nl, true + } + } + return buf, i, nl, true +} + +type pair struct { + kstart, kend int + vstart, vend int +} + +type byKeyVal struct { + sorted bool + json []byte + buf []byte + pairs []pair +} + +func (arr *byKeyVal) Len() int { + return len(arr.pairs) +} +func (arr *byKeyVal) Less(i, j int) bool { + if arr.isLess(i, j, byKey) { + return true + } + if arr.isLess(j, i, byKey) { + return false + } + return arr.isLess(i, j, byVal) +} +func (arr *byKeyVal) Swap(i, j int) { + arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i] + arr.sorted = true +} + +type byKind int + +const ( + byKey byKind = 0 + byVal byKind = 1 +) + +type jtype int + +const ( + jnull jtype = iota + jfalse + jnumber + jstring + jtrue + jjson +) + +func getjtype(v []byte) jtype { + if len(v) == 0 { + return jnull + } + switch v[0] { + case '"': + return jstring + case 'f': + return jfalse + case 't': + return jtrue + case 'n': + return jnull + case '[', '{': + return jjson + default: + return jnumber + } +} + +func (arr *byKeyVal) isLess(i, j int, kind byKind) bool { + k1 := arr.json[arr.pairs[i].kstart:arr.pairs[i].kend] + k2 := arr.json[arr.pairs[j].kstart:arr.pairs[j].kend] + var v1, v2 []byte + if kind == byKey { + v1 = k1 + v2 = k2 + } else { + v1 = bytes.TrimSpace(arr.buf[arr.pairs[i].vstart:arr.pairs[i].vend]) + v2 = bytes.TrimSpace(arr.buf[arr.pairs[j].vstart:arr.pairs[j].vend]) + if len(v1) >= len(k1)+1 { + v1 = bytes.TrimSpace(v1[len(k1)+1:]) + } + if len(v2) >= len(k2)+1 { + v2 = bytes.TrimSpace(v2[len(k2)+1:]) + } + } + t1 := getjtype(v1) + t2 := getjtype(v2) + if t1 < t2 { + return true + } + if t1 > t2 { + return false + } + if t1 == jstring { + s1 := parsestr(v1) + s2 := parsestr(v2) + return string(s1) < string(s2) + } + if t1 == jnumber { + n1, _ := strconv.ParseFloat(string(v1), 64) + n2, _ := strconv.ParseFloat(string(v2), 64) + return n1 < n2 + } + return string(v1) < string(v2) + +} + +func parsestr(s []byte) []byte { + for i := 1; i < len(s); i++ { + if s[i] == '\\' { + var str string + json.Unmarshal(s, &str) + return []byte(str) + } + if s[i] == '"' { + return s[1:i] + } + } + return nil +} + +func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + var ok bool + if width > 0 { + if pretty && open == '[' && max == -1 { + // here we try to create a single line array + max := width - (len(buf) - nl) + if max > 3 { + s1, s2 := len(buf), i + buf, i, _, ok = appendPrettyObject(buf, json, i, '[', ']', false, width, prefix, "", sortkeys, 0, 0, max) + if ok && len(buf)-s1 <= max { + return buf, i, nl, true + } + buf = buf[:s1] + i = s2 + } + } else if max != -1 && open == '{' { + return buf, i, nl, false + } + } + buf = append(buf, open) + i++ + var pairs []pair + if open == '{' && sortkeys { + pairs = make([]pair, 0, 8) + } + var n int + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == close { + if pretty { + if open == '{' && sortkeys { + buf = sortPairs(json, buf, pairs) + } + if n > 0 { + nl = len(buf) + if buf[nl-1] == ' ' { + buf[nl-1] = '\n' + } else { + buf = append(buf, '\n') + } + } + if buf[len(buf)-1] != open { + buf = appendTabs(buf, prefix, indent, tabs) + } + } + buf = append(buf, close) + return buf, i + 1, nl, open != '{' + } + if open == '[' || json[i] == '"' { + if n > 0 { + buf = append(buf, ',') + if width != -1 && open == '[' { + buf = append(buf, ' ') + } + } + var p pair + if pretty { + nl = len(buf) + if buf[nl-1] == ' ' { + buf[nl-1] = '\n' + } else { + buf = append(buf, '\n') + } + if open == '{' && sortkeys { + p.kstart = i + p.vstart = len(buf) + } + buf = appendTabs(buf, prefix, indent, tabs+1) + } + if open == '{' { + buf, i, nl, _ = appendPrettyString(buf, json, i, nl) + if sortkeys { + p.kend = i + } + buf = append(buf, ':') + if pretty { + buf = append(buf, ' ') + } + } + buf, i, nl, ok = appendPrettyAny(buf, json, i, pretty, width, prefix, indent, sortkeys, tabs+1, nl, max) + if max != -1 && !ok { + return buf, i, nl, false + } + if pretty && open == '{' && sortkeys { + p.vend = len(buf) + if p.kstart > p.kend || p.vstart > p.vend { + // bad data. disable sorting + sortkeys = false + } else { + pairs = append(pairs, p) + } + } + i-- + n++ + } + } + return buf, i, nl, open != '{' +} +func sortPairs(json, buf []byte, pairs []pair) []byte { + if len(pairs) == 0 { + return buf + } + vstart := pairs[0].vstart + vend := pairs[len(pairs)-1].vend + arr := byKeyVal{false, json, buf, pairs} + sort.Stable(&arr) + if !arr.sorted { + return buf + } + nbuf := make([]byte, 0, vend-vstart) + for i, p := range pairs { + nbuf = append(nbuf, buf[p.vstart:p.vend]...) + if i < len(pairs)-1 { + nbuf = append(nbuf, ',') + nbuf = append(nbuf, '\n') + } + } + return append(buf[:vstart], nbuf...) +} + +func appendPrettyString(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] == '"' { + var sc int + for j := i - 1; j > s; j-- { + if json[j] == '\\' { + sc++ + } else { + break + } + } + if sc%2 == 1 { + continue + } + i++ + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendPrettyNumber(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' || json[i] == ']' || json[i] == '}' { + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendTabs(buf []byte, prefix, indent string, tabs int) []byte { + if len(prefix) != 0 { + buf = append(buf, prefix...) + } + if len(indent) == 2 && indent[0] == ' ' && indent[1] == ' ' { + for i := 0; i < tabs; i++ { + buf = append(buf, ' ', ' ') + } + } else { + for i := 0; i < tabs; i++ { + buf = append(buf, indent...) + } + } + return buf +} + +// Style is the color style +type Style struct { + Key, String, Number [2]string + True, False, Null [2]string + Escape [2]string + Append func(dst []byte, c byte) []byte +} + +func hexp(p byte) byte { + switch { + case p < 10: + return p + '0' + default: + return (p - 10) + 'a' + } +} + +// TerminalStyle is for terminals +var TerminalStyle *Style + +func init() { + TerminalStyle = &Style{ + Key: [2]string{"\x1B[94m", "\x1B[0m"}, + String: [2]string{"\x1B[92m", "\x1B[0m"}, + Number: [2]string{"\x1B[93m", "\x1B[0m"}, + True: [2]string{"\x1B[96m", "\x1B[0m"}, + False: [2]string{"\x1B[96m", "\x1B[0m"}, + Null: [2]string{"\x1B[91m", "\x1B[0m"}, + Escape: [2]string{"\x1B[35m", "\x1B[0m"}, + Append: func(dst []byte, c byte) []byte { + if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') { + dst = append(dst, "\\u00"...) + dst = append(dst, hexp((c>>4)&0xF)) + return append(dst, hexp((c)&0xF)) + } + return append(dst, c) + }, + } +} + +// Color will colorize the json. The style parma is used for customizing +// the colors. Passing nil to the style param will use the default +// TerminalStyle. +func Color(src []byte, style *Style) []byte { + if style == nil { + style = TerminalStyle + } + apnd := style.Append + if apnd == nil { + apnd = func(dst []byte, c byte) []byte { + return append(dst, c) + } + } + type stackt struct { + kind byte + key bool + } + var dst []byte + var stack []stackt + for i := 0; i < len(src); i++ { + if src[i] == '"' { + key := len(stack) > 0 && stack[len(stack)-1].key + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + dst = apnd(dst, '"') + esc := false + uesc := 0 + for i = i + 1; i < len(src); i++ { + if src[i] == '\\' { + if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + dst = append(dst, style.Escape[0]...) + dst = apnd(dst, src[i]) + esc = true + if i+1 < len(src) && src[i+1] == 'u' { + uesc = 5 + } else { + uesc = 1 + } + } else if esc { + dst = apnd(dst, src[i]) + if uesc == 1 { + esc = false + dst = append(dst, style.Escape[1]...) + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + } else { + uesc-- + } + } else { + dst = apnd(dst, src[i]) + } + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + if esc { + dst = append(dst, style.Escape[1]...) + } else if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + } else if src[i] == '{' || src[i] == '[' { + stack = append(stack, stackt{src[i], src[i] == '{'}) + dst = apnd(dst, src[i]) + } else if (src[i] == '}' || src[i] == ']') && len(stack) > 0 { + stack = stack[:len(stack)-1] + dst = apnd(dst, src[i]) + } else if (src[i] == ':' || src[i] == ',') && len(stack) > 0 && stack[len(stack)-1].kind == '{' { + stack[len(stack)-1].key = !stack[len(stack)-1].key + dst = apnd(dst, src[i]) + } else { + var kind byte + if (src[i] >= '0' && src[i] <= '9') || src[i] == '-' || isNaNOrInf(src[i:]) { + kind = '0' + dst = append(dst, style.Number[0]...) + } else if src[i] == 't' { + kind = 't' + dst = append(dst, style.True[0]...) + } else if src[i] == 'f' { + kind = 'f' + dst = append(dst, style.False[0]...) + } else if src[i] == 'n' { + kind = 'n' + dst = append(dst, style.Null[0]...) + } else { + dst = apnd(dst, src[i]) + } + if kind != 0 { + for ; i < len(src); i++ { + if src[i] <= ' ' || src[i] == ',' || src[i] == ':' || src[i] == ']' || src[i] == '}' { + i-- + break + } + dst = apnd(dst, src[i]) + } + if kind == '0' { + dst = append(dst, style.Number[1]...) + } else if kind == 't' { + dst = append(dst, style.True[1]...) + } else if kind == 'f' { + dst = append(dst, style.False[1]...) + } else if kind == 'n' { + dst = append(dst, style.Null[1]...) + } + } + } + } + return dst +} + +// Spec strips out comments and trailing commas and convert the input to a +// valid JSON per the official spec: https://tools.ietf.org/html/rfc8259 +// +// The resulting JSON will always be the same length as the input and it will +// include all of the same line breaks at matching offsets. This is to ensure +// the result can be later processed by a external parser and that that +// parser will report messages or errors with the correct offsets. +func Spec(src []byte) []byte { + return spec(src, nil) +} + +// SpecInPlace is the same as Spec, but this method reuses the input json +// buffer to avoid allocations. Do not use the original bytes slice upon return. +func SpecInPlace(src []byte) []byte { + return spec(src, src) +} + +func spec(src, dst []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] == '/' { + if i < len(src)-1 { + if src[i+1] == '/' { + dst = append(dst, ' ', ' ') + i += 2 + for ; i < len(src); i++ { + if src[i] == '\n' { + dst = append(dst, '\n') + break + } else if src[i] == '\t' || src[i] == '\r' { + dst = append(dst, src[i]) + } else { + dst = append(dst, ' ') + } + } + continue + } + if src[i+1] == '*' { + dst = append(dst, ' ', ' ') + i += 2 + for ; i < len(src)-1; i++ { + if src[i] == '*' && src[i+1] == '/' { + dst = append(dst, ' ', ' ') + i++ + break + } else if src[i] == '\n' || src[i] == '\t' || + src[i] == '\r' { + dst = append(dst, src[i]) + } else { + dst = append(dst, ' ') + } + } + continue + } + } + } + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } else if src[i] == '}' || src[i] == ']' { + for j := len(dst) - 2; j >= 0; j-- { + if dst[j] <= ' ' { + continue + } + if dst[j] == ',' { + dst[j] = ' ' + } + break + } + } + } + return dst +} diff --git a/src/otel-collector/vendor/github.com/tidwall/tinylru/LICENSE b/src/otel-collector/vendor/github.com/tidwall/tinylru/LICENSE new file mode 100644 index 00000000..3c0c6ad8 --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/tinylru/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2020 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/src/otel-collector/vendor/github.com/tidwall/tinylru/README.md b/src/otel-collector/vendor/github.com/tidwall/tinylru/README.md new file mode 100644 index 00000000..0c67dbd2 --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/tinylru/README.md @@ -0,0 +1,54 @@ +# `tinylru` + +[![GoDoc](https://godoc.org/github.com/tidwall/tinylru?status.svg)](https://godoc.org/github.com/tidwall/tinylru) + +A fast little LRU cache. + +## Getting Started + +### Installing + +To start using `tinylru`, install Go and run go get: + +``` +$ go get -u github.com/tidwall/tinylru +``` + +This will retrieve the library. + +### Usage + +```go +// Create an LRU cache +var cache tinylru.LRU + +// Set the cache size. This is the maximum number of items that the cache can +// hold before evicting old items. The default size is 256. +cache.Resize(1024) + +// Set a key. Returns the previous value and ok if a previous value exists. +prev, ok := cache.Set("hello", "world") + +// Get a key. Returns the value and ok if the value exists. +value, ok := cache.Get("hello") + +// Delete a key. Returns the deleted value and ok if a previous value exists. +prev, ok := tr.Delete("hello") +``` + +A `Set` function may evict old items when adding a new item while LRU is at +capacity. If you want to know what was evicted then use the `SetEvicted` +function. + +```go +// Set a key and return the evicted item, if any. +prev, ok, evictedKey, evictedValue, evicted := cache.SetEvicted("hello", "jello") +``` + +### Contact + +Josh Baker [@tidwall](https://twitter.com/tidwall) + +### License + +`tinylru` source code is available under the MIT License. diff --git a/src/otel-collector/vendor/github.com/tidwall/tinylru/lru.go b/src/otel-collector/vendor/github.com/tidwall/tinylru/lru.go new file mode 100644 index 00000000..682230ab --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/tinylru/lru.go @@ -0,0 +1,208 @@ +// Copyright 2020 Joshua J Baker. All rights reserved. +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package tinylru + +import "sync" + +// DefaultSize is the default maximum size of an LRU cache before older items +// get automatically evicted. +const DefaultSize = 256 + +type lruItem struct { + key interface{} // user-defined key + value interface{} // user-defined value + prev *lruItem // prev item in list. More recently used + next *lruItem // next item in list. Less recently used +} + +// LRU implements an LRU cache +type LRU struct { + mu sync.RWMutex // protect all things + size int // max number of items. + items map[interface{}]*lruItem // active items + head *lruItem // head of list + tail *lruItem // tail of list +} + +//go:noinline +func (lru *LRU) init() { + lru.items = make(map[interface{}]*lruItem) + lru.head = new(lruItem) + lru.tail = new(lruItem) + lru.head.next = lru.tail + lru.tail.prev = lru.head + if lru.size == 0 { + lru.size = DefaultSize + } +} + +func (lru *LRU) evict() *lruItem { + item := lru.tail.prev + lru.pop(item) + delete(lru.items, item.key) + return item +} + +func (lru *LRU) pop(item *lruItem) { + item.prev.next = item.next + item.next.prev = item.prev +} + +func (lru *LRU) push(item *lruItem) { + lru.head.next.prev = item + item.next = lru.head.next + item.prev = lru.head + lru.head.next = item +} + +// Resize sets the maximum size of an LRU cache. If this value is less than +// the number of items currently in the cache, then items will be evicted. +// Returns evicted items. +// This operation will panic if the size is less than one. +func (lru *LRU) Resize(size int) (evictedKeys []interface{}, + evictedValues []interface{}) { + if size <= 0 { + panic("invalid size") + } + + lru.mu.Lock() + defer lru.mu.Unlock() + for size < len(lru.items) { + item := lru.evict() + evictedKeys = append(evictedKeys, item.key) + evictedValues = append(evictedValues, item.value) + } + lru.size = size + return evictedKeys, evictedValues +} + +// Len returns the length of the lru cache +func (lru *LRU) Len() int { + lru.mu.RLock() + defer lru.mu.RUnlock() + return len(lru.items) +} + +// SetEvicted sets or replaces a value for a key. If this operation causes an +// eviction then the evicted item is returned. +func (lru *LRU) SetEvicted(key interface{}, value interface{}) ( + prev interface{}, replaced bool, evictedKey interface{}, + evictedValue interface{}, evicted bool) { + lru.mu.Lock() + defer lru.mu.Unlock() + if lru.items == nil { + lru.init() + } + item := lru.items[key] + if item == nil { + if len(lru.items) == lru.size { + item = lru.evict() + evictedKey, evictedValue, evicted = item.key, item.value, true + } else { + item = new(lruItem) + } + item.key = key + item.value = value + lru.push(item) + lru.items[key] = item + } else { + prev, replaced = item.value, true + item.value = value + if lru.head.next != item { + lru.pop(item) + lru.push(item) + } + } + return prev, replaced, evictedKey, evictedValue, evicted +} + +// Set or replace a value for a key. +func (lru *LRU) Set(key interface{}, value interface{}) (prev interface{}, + replaced bool) { + prev, replaced, _, _, _ = lru.SetEvicted(key, value) + return prev, replaced +} + +// Get a value for key +func (lru *LRU) Get(key interface{}) (value interface{}, ok bool) { + lru.mu.Lock() + defer lru.mu.Unlock() + item := lru.items[key] + if item == nil { + return nil, false + } + if lru.head.next != item { + lru.pop(item) + lru.push(item) + } + return item.value, true +} + +// Contains returns true if the key exists. +func (lru *LRU) Contains(key interface{}) bool { + lru.mu.RLock() + defer lru.mu.RUnlock() + _, ok := lru.items[key] + return ok +} + +// Peek returns the value for key value without updating +// the recently used status. +func (lru *LRU) Peek(key interface{}) (value interface{}, ok bool) { + lru.mu.RLock() + defer lru.mu.RUnlock() + + if item := lru.items[key]; item != nil { + return item.value, true + } + return nil, false +} + +// Delete a value for a key +func (lru *LRU) Delete(key interface{}) (prev interface{}, deleted bool) { + lru.mu.Lock() + defer lru.mu.Unlock() + item := lru.items[key] + if item == nil { + return nil, false + } + delete(lru.items, key) + lru.pop(item) + return item.value, true +} + +// Range iterates over all key/values in the order of most recently to +// least recently used items. +// It's not safe to call other LRU operations while ranging. +func (lru *LRU) Range(iter func(key interface{}, value interface{}) bool) { + lru.mu.RLock() + defer lru.mu.RUnlock() + if head := lru.head; head != nil { + item := head.next + for item != lru.tail { + if !iter(item.key, item.value) { + return + } + item = item.next + } + } +} + +// Reverse iterates over all key/values in the order of least recently to +// most recently used items. +// It's not safe to call other LRU operations while ranging. +func (lru *LRU) Reverse(iter func(key interface{}, value interface{}) bool) { + lru.mu.RLock() + defer lru.mu.RUnlock() + if tail := lru.tail; tail != nil { + item := tail.prev + for item != lru.head { + if !iter(item.key, item.value) { + return + } + item = item.prev + } + } +} diff --git a/src/otel-collector/vendor/github.com/tidwall/wal/.gitignore b/src/otel-collector/vendor/github.com/tidwall/wal/.gitignore new file mode 100644 index 00000000..b724714b --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/wal/.gitignore @@ -0,0 +1,3 @@ +testlog +.idea +vendor \ No newline at end of file diff --git a/src/otel-collector/vendor/github.com/tidwall/wal/LICENSE b/src/otel-collector/vendor/github.com/tidwall/wal/LICENSE new file mode 100644 index 00000000..22e6fe0a --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/wal/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2019 Joshua J Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/src/otel-collector/vendor/github.com/tidwall/wal/README.md b/src/otel-collector/vendor/github.com/tidwall/wal/README.md new file mode 100644 index 00000000..68cc0cd7 --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/wal/README.md @@ -0,0 +1,79 @@ +# `wal` +[![GoDoc](https://godoc.org/github.com/tidwall/wal?status.svg)](https://godoc.org/github.com/tidwall/wal) + +A simple and fast write ahead log for Go. + +## Features + +- High durability +- Fast operations +- Monotonic indexes +- Batch writes +- Log truncation from front or back. + +## Getting Started + +### Installing + +To start using `wal`, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/wal +``` + +This will retrieve the library. + +### Example + +```go +// open a new log file +log, err := Open("mylog", nil) + +// write some entries +err = log.Write(1, []byte("first entry")) +err = log.Write(2, []byte("second entry")) +err = log.Write(3, []byte("third entry")) + +// read an entry +data, err := log.Read(1) +println(string(data)) // output: first entry + +// close the log +err = log.Close() +``` + +Batch writes: + +```go + +// write three entries as a batch +batch := new(Batch) +batch.Write(1, []byte("first entry")) +batch.Write(2, []byte("second entry")) +batch.Write(3, []byte("third entry")) + +err = log.WriteBatch(batch) +``` + +Truncating: + +```go +// write some entries +err = log.Write(1, []byte("first entry")) +... +err = log.Write(1000, []byte("thousandth entry")) + +// truncate the log from index starting 350 and ending with 950. +err = l.TruncateFront(350) +err = l.TruncateBack(950) +``` + + + +## Contact + +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +`wal` source code is available under the MIT [License](/LICENSE). diff --git a/src/otel-collector/vendor/github.com/tidwall/wal/wal.go b/src/otel-collector/vendor/github.com/tidwall/wal/wal.go new file mode 100644 index 00000000..42b8cd29 --- /dev/null +++ b/src/otel-collector/vendor/github.com/tidwall/wal/wal.go @@ -0,0 +1,917 @@ +package wal + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "unicode/utf8" + "unsafe" + + "github.com/tidwall/gjson" + "github.com/tidwall/tinylru" +) + +var ( + // ErrCorrupt is returns when the log is corrupt. + ErrCorrupt = errors.New("log corrupt") + + // ErrClosed is returned when an operation cannot be completed because + // the log is closed. + ErrClosed = errors.New("log closed") + + // ErrNotFound is returned when an entry is not found. + ErrNotFound = errors.New("not found") + + // ErrOutOfOrder is returned from Write() when the index is not equal to + // LastIndex()+1. It's required that log monotonically grows by one and has + // no gaps. Thus, the series 10,11,12,13,14 is valid, but 10,11,13,14 is + // not because there's a gap between 11 and 13. Also, 10,12,11,13 is not + // valid because 12 and 11 are out of order. + ErrOutOfOrder = errors.New("out of order") + + // ErrOutOfRange is returned from TruncateFront() and TruncateBack() when + // the index not in the range of the log's first and last index. Or, this + // may be returned when the caller is attempting to remove *all* entries; + // The log requires that at least one entry exists following a truncate. + ErrOutOfRange = errors.New("out of range") +) + +// LogFormat is the format of the log files. +type LogFormat byte + +const ( + // Binary format writes entries in binary. This is the default and, unless + // a good reason otherwise, should be used in production. + Binary LogFormat = 0 + // JSON format writes entries as JSON lines. This causes larger, human + // readable files. + JSON LogFormat = 1 +) + +// Options for Log +type Options struct { + // NoSync disables fsync after writes. This is less durable and puts the + // log at risk of data loss when there's a server crash. + NoSync bool + // SegmentSize of each segment. This is just a target value, actual size + // may differ. Default is 20 MB. + SegmentSize int + // LogFormat is the format of the log files. Default is Binary. + LogFormat LogFormat + // SegmentCacheSize is the maximum number of segments that will be held in + // memory for caching. Increasing this value may enhance performance for + // concurrent read operations. Default is 1 + SegmentCacheSize int + // NoCopy allows for the Read() operation to return the raw underlying data + // slice. This is an optimization to help minimize allocations. When this + // option is set, do not modify the returned data because it may affect + // other Read calls. Default false + NoCopy bool + // Perms represents the datafiles modes and permission bits + DirPerms os.FileMode + FilePerms os.FileMode +} + +// DefaultOptions for Open(). +var DefaultOptions = &Options{ + NoSync: false, // Fsync after every write + SegmentSize: 20971520, // 20 MB log segment files. + LogFormat: Binary, // Binary format is small and fast. + SegmentCacheSize: 2, // Number of cached in-memory segments + NoCopy: false, // Make a new copy of data for every Read call. + DirPerms: 0750, // Permissions for the created directories + FilePerms: 0640, // Permissions for the created data files +} + +// Log represents a write ahead log +type Log struct { + mu sync.RWMutex + path string // absolute path to log directory + opts Options // log options + closed bool // log is closed + corrupt bool // log may be corrupt + segments []*segment // all known log segments + firstIndex uint64 // index of the first entry in log + lastIndex uint64 // index of the last entry in log + sfile *os.File // tail segment file handle + wbatch Batch // reusable write batch + scache tinylru.LRU // segment entries cache +} + +// segment represents a single segment file. +type segment struct { + path string // path of segment file + index uint64 // first index of segment + ebuf []byte // cached entries buffer + epos []bpos // cached entries positions in buffer +} + +type bpos struct { + pos int // byte position + end int // one byte past pos +} + +// Open a new write ahead log +func Open(path string, opts *Options) (*Log, error) { + if opts == nil { + opts = DefaultOptions + } + if opts.SegmentCacheSize <= 0 { + opts.SegmentCacheSize = DefaultOptions.SegmentCacheSize + } + if opts.SegmentSize <= 0 { + opts.SegmentSize = DefaultOptions.SegmentSize + } + if opts.DirPerms == 0 { + opts.DirPerms = DefaultOptions.DirPerms + } + if opts.FilePerms == 0 { + opts.FilePerms = DefaultOptions.FilePerms + } + + var err error + path, err = abs(path) + if err != nil { + return nil, err + } + l := &Log{path: path, opts: *opts} + l.scache.Resize(l.opts.SegmentCacheSize) + if err := os.MkdirAll(path, l.opts.DirPerms); err != nil { + return nil, err + } + if err := l.load(); err != nil { + return nil, err + } + return l, nil +} + +func abs(path string) (string, error) { + if path == ":memory:" { + return "", errors.New("in-memory log not supported") + } + return filepath.Abs(path) +} + +func (l *Log) pushCache(segIdx int) { + _, _, _, v, evicted := + l.scache.SetEvicted(segIdx, l.segments[segIdx]) + if evicted { + s := v.(*segment) + s.ebuf = nil + s.epos = nil + } +} + +// load all the segments. This operation also cleans up any START/END segments. +func (l *Log) load() error { + fis, err := ioutil.ReadDir(l.path) + if err != nil { + return err + } + startIdx := -1 + endIdx := -1 + for _, fi := range fis { + name := fi.Name() + if fi.IsDir() || len(name) < 20 { + continue + } + index, err := strconv.ParseUint(name[:20], 10, 64) + if err != nil || index == 0 { + continue + } + isStart := len(name) == 26 && strings.HasSuffix(name, ".START") + isEnd := len(name) == 24 && strings.HasSuffix(name, ".END") + if len(name) == 20 || isStart || isEnd { + if isStart { + startIdx = len(l.segments) + } else if isEnd && endIdx == -1 { + endIdx = len(l.segments) + } + l.segments = append(l.segments, &segment{ + index: index, + path: filepath.Join(l.path, name), + }) + } + } + if len(l.segments) == 0 { + // Create a new log + l.segments = append(l.segments, &segment{ + index: 1, + path: filepath.Join(l.path, segmentName(1)), + }) + l.firstIndex = 1 + l.lastIndex = 0 + l.sfile, err = os.OpenFile(l.segments[0].path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, l.opts.FilePerms) + return err + } + // Open existing log. Clean up log if START of END segments exists. + if startIdx != -1 { + if endIdx != -1 { + // There should not be a START and END at the same time + return ErrCorrupt + } + // Delete all files leading up to START + for i := 0; i < startIdx; i++ { + if err := os.Remove(l.segments[i].path); err != nil { + return err + } + } + l.segments = append([]*segment{}, l.segments[startIdx:]...) + // Rename the START segment + orgPath := l.segments[0].path + finalPath := orgPath[:len(orgPath)-len(".START")] + err := os.Rename(orgPath, finalPath) + if err != nil { + return err + } + l.segments[0].path = finalPath + } + if endIdx != -1 { + // Delete all files following END + for i := len(l.segments) - 1; i > endIdx; i-- { + if err := os.Remove(l.segments[i].path); err != nil { + return err + } + } + l.segments = append([]*segment{}, l.segments[:endIdx+1]...) + if len(l.segments) > 1 && l.segments[len(l.segments)-2].index == + l.segments[len(l.segments)-1].index { + // remove the segment prior to the END segment because it shares + // the same starting index. + l.segments[len(l.segments)-2] = l.segments[len(l.segments)-1] + l.segments = l.segments[:len(l.segments)-1] + } + // Rename the END segment + orgPath := l.segments[len(l.segments)-1].path + finalPath := orgPath[:len(orgPath)-len(".END")] + err := os.Rename(orgPath, finalPath) + if err != nil { + return err + } + l.segments[len(l.segments)-1].path = finalPath + } + l.firstIndex = l.segments[0].index + // Open the last segment for appending + lseg := l.segments[len(l.segments)-1] + l.sfile, err = os.OpenFile(lseg.path, os.O_WRONLY, l.opts.FilePerms) + if err != nil { + return err + } + if _, err := l.sfile.Seek(0, 2); err != nil { + return err + } + // Load the last segment entries + if err := l.loadSegmentEntries(lseg); err != nil { + return err + } + l.lastIndex = lseg.index + uint64(len(lseg.epos)) - 1 + return nil +} + +// segmentName returns a 20-byte textual representation of an index +// for lexical ordering. This is used for the file names of log segments. +func segmentName(index uint64) string { + return fmt.Sprintf("%020d", index) +} + +// Close the log. +func (l *Log) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + if l.closed { + if l.corrupt { + return ErrCorrupt + } + return ErrClosed + } + if err := l.sfile.Sync(); err != nil { + return err + } + if err := l.sfile.Close(); err != nil { + return err + } + l.closed = true + if l.corrupt { + return ErrCorrupt + } + return nil +} + +// Write an entry to the log. +func (l *Log) Write(index uint64, data []byte) error { + l.mu.Lock() + defer l.mu.Unlock() + if l.corrupt { + return ErrCorrupt + } else if l.closed { + return ErrClosed + } + l.wbatch.Clear() + l.wbatch.Write(index, data) + return l.writeBatch(&l.wbatch) +} + +func (l *Log) appendEntry(dst []byte, index uint64, data []byte) (out []byte, + epos bpos) { + if l.opts.LogFormat == JSON { + return appendJSONEntry(dst, index, data) + } + return appendBinaryEntry(dst, data) +} + +// Cycle the old segment for a new segment. +func (l *Log) cycle() error { + if err := l.sfile.Sync(); err != nil { + return err + } + if err := l.sfile.Close(); err != nil { + return err + } + // cache the previous segment + l.pushCache(len(l.segments) - 1) + s := &segment{ + index: l.lastIndex + 1, + path: filepath.Join(l.path, segmentName(l.lastIndex+1)), + } + var err error + l.sfile, err = os.OpenFile(s.path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, l.opts.FilePerms) + if err != nil { + return err + } + l.segments = append(l.segments, s) + return nil +} + +func appendJSONEntry(dst []byte, index uint64, data []byte) (out []byte, + epos bpos) { + // {"index":number,"data":string} + mark := len(dst) + dst = append(dst, `{"index":"`...) + dst = strconv.AppendUint(dst, index, 10) + dst = append(dst, `","data":`...) + dst = appendJSONData(dst, data) + dst = append(dst, '}', '\n') + return dst, bpos{mark, len(dst)} +} + +func appendJSONData(dst []byte, s []byte) []byte { + if utf8.Valid(s) { + b, _ := json.Marshal(*(*string)(unsafe.Pointer(&s))) + dst = append(dst, '"', '+') + return append(dst, b[1:]...) + } + dst = append(dst, '"', '$') + dst = append(dst, base64.URLEncoding.EncodeToString(s)...) + return append(dst, '"') +} + +func appendBinaryEntry(dst []byte, data []byte) (out []byte, epos bpos) { + // data_size + data + pos := len(dst) + dst = appendUvarint(dst, uint64(len(data))) + dst = append(dst, data...) + return dst, bpos{pos, len(dst)} +} + +func appendUvarint(dst []byte, x uint64) []byte { + var buf [10]byte + n := binary.PutUvarint(buf[:], x) + dst = append(dst, buf[:n]...) + return dst +} + +// Batch of entries. Used to write multiple entries at once using WriteBatch(). +type Batch struct { + entries []batchEntry + datas []byte +} + +type batchEntry struct { + index uint64 + size int +} + +// Write an entry to the batch +func (b *Batch) Write(index uint64, data []byte) { + b.entries = append(b.entries, batchEntry{index, len(data)}) + b.datas = append(b.datas, data...) +} + +// Clear the batch for reuse. +func (b *Batch) Clear() { + b.entries = b.entries[:0] + b.datas = b.datas[:0] +} + +// WriteBatch writes the entries in the batch to the log in the order that they +// were added to the batch. The batch is cleared upon a successful return. +func (l *Log) WriteBatch(b *Batch) error { + l.mu.Lock() + defer l.mu.Unlock() + if l.corrupt { + return ErrCorrupt + } else if l.closed { + return ErrClosed + } + if len(b.entries) == 0 { + return nil + } + return l.writeBatch(b) +} + +func (l *Log) writeBatch(b *Batch) error { + // check that all indexes in batch are sane + for i := 0; i < len(b.entries); i++ { + if b.entries[i].index != l.lastIndex+uint64(i+1) { + return ErrOutOfOrder + } + } + // load the tail segment + s := l.segments[len(l.segments)-1] + if len(s.ebuf) > l.opts.SegmentSize { + // tail segment has reached capacity. Close it and create a new one. + if err := l.cycle(); err != nil { + return err + } + s = l.segments[len(l.segments)-1] + } + + mark := len(s.ebuf) + datas := b.datas + for i := 0; i < len(b.entries); i++ { + data := datas[:b.entries[i].size] + var epos bpos + s.ebuf, epos = l.appendEntry(s.ebuf, b.entries[i].index, data) + s.epos = append(s.epos, epos) + if len(s.ebuf) >= l.opts.SegmentSize { + // segment has reached capacity, cycle now + if _, err := l.sfile.Write(s.ebuf[mark:]); err != nil { + return err + } + l.lastIndex = b.entries[i].index + if err := l.cycle(); err != nil { + return err + } + s = l.segments[len(l.segments)-1] + mark = 0 + } + datas = datas[b.entries[i].size:] + } + if len(s.ebuf)-mark > 0 { + if _, err := l.sfile.Write(s.ebuf[mark:]); err != nil { + return err + } + l.lastIndex = b.entries[len(b.entries)-1].index + } + if !l.opts.NoSync { + if err := l.sfile.Sync(); err != nil { + return err + } + } + b.Clear() + return nil +} + +// FirstIndex returns the index of the first entry in the log. Returns zero +// when log has no entries. +func (l *Log) FirstIndex() (index uint64, err error) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.corrupt { + return 0, ErrCorrupt + } else if l.closed { + return 0, ErrClosed + } + // We check the lastIndex for zero because the firstIndex is always one or + // more, even when there's no entries + if l.lastIndex == 0 { + return 0, nil + } + return l.firstIndex, nil +} + +// LastIndex returns the index of the last entry in the log. Returns zero when +// log has no entries. +func (l *Log) LastIndex() (index uint64, err error) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.corrupt { + return 0, ErrCorrupt + } else if l.closed { + return 0, ErrClosed + } + if l.lastIndex == 0 { + return 0, nil + } + return l.lastIndex, nil +} + +// findSegment performs a bsearch on the segments +func (l *Log) findSegment(index uint64) int { + i, j := 0, len(l.segments) + for i < j { + h := i + (j-i)/2 + if index >= l.segments[h].index { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} + +func (l *Log) loadSegmentEntries(s *segment) error { + data, err := ioutil.ReadFile(s.path) + if err != nil { + return err + } + ebuf := data + var epos []bpos + var pos int + for exidx := s.index; len(data) > 0; exidx++ { + var n int + if l.opts.LogFormat == JSON { + n, err = loadNextJSONEntry(data) + } else { + n, err = loadNextBinaryEntry(data) + } + if err != nil { + return err + } + data = data[n:] + epos = append(epos, bpos{pos, pos + n}) + pos += n + } + s.ebuf = ebuf + s.epos = epos + return nil +} + +func loadNextJSONEntry(data []byte) (n int, err error) { + // {"index":number,"data":string} + idx := bytes.IndexByte(data, '\n') + if idx == -1 { + return 0, ErrCorrupt + } + line := data[:idx] + dres := gjson.Get(*(*string)(unsafe.Pointer(&line)), "data") + if dres.Type != gjson.String { + return 0, ErrCorrupt + } + return idx + 1, nil +} + +func loadNextBinaryEntry(data []byte) (n int, err error) { + // data_size + data + size, n := binary.Uvarint(data) + if n <= 0 { + return 0, ErrCorrupt + } + if uint64(len(data)-n) < size { + return 0, ErrCorrupt + } + return n + int(size), nil +} + +// loadSegment loads the segment entries into memory, pushes it to the front +// of the lru cache, and returns it. +func (l *Log) loadSegment(index uint64) (*segment, error) { + // check the last segment first. + lseg := l.segments[len(l.segments)-1] + if index >= lseg.index { + return lseg, nil + } + // check the most recent cached segment + var rseg *segment + l.scache.Range(func(_, v interface{}) bool { + s := v.(*segment) + if index >= s.index && index < s.index+uint64(len(s.epos)) { + rseg = s + } + return false + }) + if rseg != nil { + return rseg, nil + } + // find in the segment array + idx := l.findSegment(index) + s := l.segments[idx] + if len(s.epos) == 0 { + // load the entries from cache + if err := l.loadSegmentEntries(s); err != nil { + return nil, err + } + } + // push the segment to the front of the cache + l.pushCache(idx) + return s, nil +} + +// Read an entry from the log. Returns a byte slice containing the data entry. +func (l *Log) Read(index uint64) (data []byte, err error) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.corrupt { + return nil, ErrCorrupt + } else if l.closed { + return nil, ErrClosed + } + if index == 0 || index < l.firstIndex || index > l.lastIndex { + return nil, ErrNotFound + } + s, err := l.loadSegment(index) + if err != nil { + return nil, err + } + epos := s.epos[index-s.index] + edata := s.ebuf[epos.pos:epos.end] + if l.opts.LogFormat == JSON { + return readJSON(edata) + } + // binary read + size, n := binary.Uvarint(edata) + if n <= 0 { + return nil, ErrCorrupt + } + if uint64(len(edata)-n) < size { + return nil, ErrCorrupt + } + if l.opts.NoCopy { + data = edata[n : uint64(n)+size] + } else { + data = make([]byte, size) + copy(data, edata[n:]) + } + return data, nil +} + +//go:noinline +func readJSON(edata []byte) ([]byte, error) { + var data []byte + s := gjson.Get(*(*string)(unsafe.Pointer(&edata)), "data").String() + if len(s) > 0 && s[0] == '$' { + var err error + data, err = base64.URLEncoding.DecodeString(s[1:]) + if err != nil { + return nil, ErrCorrupt + } + } else if len(s) > 0 && s[0] == '+' { + data = make([]byte, len(s[1:])) + copy(data, s[1:]) + } else { + return nil, ErrCorrupt + } + return data, nil +} + +// ClearCache clears the segment cache +func (l *Log) ClearCache() error { + l.mu.Lock() + defer l.mu.Unlock() + if l.corrupt { + return ErrCorrupt + } else if l.closed { + return ErrClosed + } + l.clearCache() + return nil +} +func (l *Log) clearCache() { + l.scache.Range(func(_, v interface{}) bool { + s := v.(*segment) + s.ebuf = nil + s.epos = nil + return true + }) + l.scache = tinylru.LRU{} + l.scache.Resize(l.opts.SegmentCacheSize) +} + +// TruncateFront truncates the front of the log by removing all entries that +// are before the provided `index`. In other words the entry at +// `index` becomes the first entry in the log. +func (l *Log) TruncateFront(index uint64) error { + l.mu.Lock() + defer l.mu.Unlock() + if l.corrupt { + return ErrCorrupt + } else if l.closed { + return ErrClosed + } + return l.truncateFront(index) +} +func (l *Log) truncateFront(index uint64) (err error) { + if index == 0 || l.lastIndex == 0 || + index < l.firstIndex || index > l.lastIndex { + return ErrOutOfRange + } + if index == l.firstIndex { + // nothing to truncate + return nil + } + segIdx := l.findSegment(index) + var s *segment + s, err = l.loadSegment(index) + if err != nil { + return err + } + epos := s.epos[index-s.index:] + ebuf := s.ebuf[epos[0].pos:] + // Create a temp file contains the truncated segment. + tempName := filepath.Join(l.path, "TEMP") + err = func() error { + f, err := os.OpenFile(tempName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, l.opts.FilePerms) + if err != nil { + return err + } + defer f.Close() + if _, err := f.Write(ebuf); err != nil { + return err + } + if err := f.Sync(); err != nil { + return err + } + return f.Close() + }() + // Rename the TEMP file to it's START file name. + startName := filepath.Join(l.path, segmentName(index)+".START") + if err = os.Rename(tempName, startName); err != nil { + return err + } + // The log was truncated but still needs some file cleanup. Any errors + // following this message will not cause an on-disk data ocorruption, but + // may cause an inconsistency with the current program, so we'll return + // ErrCorrupt so the the user can attempt a recover by calling Close() + // followed by Open(). + defer func() { + if v := recover(); v != nil { + err = ErrCorrupt + l.corrupt = true + } + }() + if segIdx == len(l.segments)-1 { + // Close the tail segment file + if err = l.sfile.Close(); err != nil { + return err + } + } + // Delete truncated segment files + for i := 0; i <= segIdx; i++ { + if err = os.Remove(l.segments[i].path); err != nil { + return err + } + } + // Rename the START file to the final truncated segment name. + newName := filepath.Join(l.path, segmentName(index)) + if err = os.Rename(startName, newName); err != nil { + return err + } + s.path = newName + s.index = index + if segIdx == len(l.segments)-1 { + // Reopen the tail segment file + if l.sfile, err = os.OpenFile(newName, os.O_WRONLY, l.opts.FilePerms); err != nil { + return err + } + var n int64 + if n, err = l.sfile.Seek(0, 2); err != nil { + return err + } + if n != int64(len(ebuf)) { + err = errors.New("invalid seek") + return err + } + // Load the last segment entries + if err = l.loadSegmentEntries(s); err != nil { + return err + } + } + l.segments = append([]*segment{}, l.segments[segIdx:]...) + l.firstIndex = index + l.clearCache() + return nil +} + +// TruncateBack truncates the back of the log by removing all entries that +// are after the provided `index`. In other words the entry at `index` +// becomes the last entry in the log. +func (l *Log) TruncateBack(index uint64) error { + l.mu.Lock() + defer l.mu.Unlock() + if l.corrupt { + return ErrCorrupt + } else if l.closed { + return ErrClosed + } + return l.truncateBack(index) +} + +func (l *Log) truncateBack(index uint64) (err error) { + if index == 0 || l.lastIndex == 0 || + index < l.firstIndex || index > l.lastIndex { + return ErrOutOfRange + } + if index == l.lastIndex { + // nothing to truncate + return nil + } + segIdx := l.findSegment(index) + var s *segment + s, err = l.loadSegment(index) + if err != nil { + return err + } + epos := s.epos[:index-s.index+1] + ebuf := s.ebuf[:epos[len(epos)-1].end] + // Create a temp file contains the truncated segment. + tempName := filepath.Join(l.path, "TEMP") + err = func() error { + f, err := os.OpenFile(tempName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, l.opts.FilePerms) + if err != nil { + return err + } + defer f.Close() + if _, err := f.Write(ebuf); err != nil { + return err + } + if err := f.Sync(); err != nil { + return err + } + return f.Close() + }() + // Rename the TEMP file to it's END file name. + endName := filepath.Join(l.path, segmentName(s.index)+".END") + if err = os.Rename(tempName, endName); err != nil { + return err + } + // The log was truncated but still needs some file cleanup. Any errors + // following this message will not cause an on-disk data ocorruption, but + // may cause an inconsistency with the current program, so we'll return + // ErrCorrupt so the the user can attempt a recover by calling Close() + // followed by Open(). + defer func() { + if v := recover(); v != nil { + err = ErrCorrupt + l.corrupt = true + } + }() + + // Close the tail segment file + if err = l.sfile.Close(); err != nil { + return err + } + // Delete truncated segment files + for i := segIdx; i < len(l.segments); i++ { + if err = os.Remove(l.segments[i].path); err != nil { + return err + } + } + // Rename the END file to the final truncated segment name. + newName := filepath.Join(l.path, segmentName(s.index)) + if err = os.Rename(endName, newName); err != nil { + return err + } + // Reopen the tail segment file + if l.sfile, err = os.OpenFile(newName, os.O_WRONLY, l.opts.FilePerms); err != nil { + return err + } + var n int64 + n, err = l.sfile.Seek(0, 2) + if err != nil { + return err + } + if n != int64(len(ebuf)) { + err = errors.New("invalid seek") + return err + } + s.path = newName + l.segments = append([]*segment{}, l.segments[:segIdx+1]...) + l.lastIndex = index + l.clearCache() + if err = l.loadSegmentEntries(s); err != nil { + return err + } + return nil +} + +// Sync performs an fsync on the log. This is not necessary when the +// NoSync option is set to false. +func (l *Log) Sync() error { + l.mu.Lock() + defer l.mu.Unlock() + if l.corrupt { + return ErrCorrupt + } else if l.closed { + return ErrClosed + } + return l.sfile.Sync() +} diff --git a/src/otel-collector/vendor/modules.txt b/src/otel-collector/vendor/modules.txt index 314b64bf..089928e1 100644 --- a/src/otel-collector/vendor/modules.txt +++ b/src/otel-collector/vendor/modules.txt @@ -114,6 +114,10 @@ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/ ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.103.0 +## explicit; go 1.21.0 +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata # github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.103.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent @@ -123,6 +127,9 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetr # github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.103.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.103.0 +## explicit; go 1.21.0 +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib @@ -147,6 +154,11 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e +## explicit; go 1.21 +github.com/prometheus/prometheus/model/timestamp +github.com/prometheus/prometheus/model/value +github.com/prometheus/prometheus/prompb # github.com/rs/cors v1.10.1 ## explicit; go 1.13 github.com/rs/cors @@ -170,6 +182,21 @@ github.com/spf13/pflag # github.com/stretchr/testify v1.9.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +# github.com/tidwall/gjson v1.10.2 +## explicit; go 1.12 +github.com/tidwall/gjson +# github.com/tidwall/match v1.1.1 +## explicit; go 1.15 +github.com/tidwall/match +# github.com/tidwall/pretty v1.2.0 +## explicit; go 1.16 +github.com/tidwall/pretty +# github.com/tidwall/tinylru v1.1.0 +## explicit; go 1.14 +github.com/tidwall/tinylru +# github.com/tidwall/wal v1.1.7 +## explicit; go 1.13 +github.com/tidwall/wal # github.com/tklauser/go-sysconf v0.3.12 ## explicit; go 1.13 github.com/tklauser/go-sysconf