diff --git a/.ci/docker/Makefile b/.ci/docker/Makefile index d9ed9f05bed..35c4071eeed 100644 --- a/.ci/docker/Makefile +++ b/.ci/docker/Makefile @@ -45,6 +45,7 @@ test-golang-mage: prepare-test ## Run the tests for the specific app cp -f ../../go.* golang-mage mkdir -p golang-mage/approvaltest && cp -f ../../approvaltest/go.* golang-mage/approvaltest mkdir -p golang-mage/systemtest && cp -f ../../systemtest/go.* golang-mage/systemtest + mkdir -p golang-mage/internal/otel_collector && cp -f ../../internal/otel_collector/go.* golang-mage/internal/otel_collector @DOCKERFILE=golang-mage bats-core/bin/bats --tap tests | tee target/results.tap @$(MAKE) -s convert-tests-results diff --git a/.ci/docker/golang-mage/Dockerfile b/.ci/docker/golang-mage/Dockerfile index 5e4515826a1..4b0cd4add99 100644 --- a/.ci/docker/golang-mage/Dockerfile +++ b/.ci/docker/golang-mage/Dockerfile @@ -7,10 +7,12 @@ WORKDIR $TOOLS COPY go.mod go.sum ./ COPY approvaltest/go.mod approvaltest/go.sum ./approvaltest/ COPY systemtest/go.mod systemtest/go.sum ./systemtest/ +COPY internal/otel_collector/go.mod internal/otel_collector/go.sum ./internal/otel_collector/ RUN go mod download RUN cd approvaltest && go mod download RUN cd systemtest && go mod download +RUN cd internal/otel_collector && go mod download RUN apt-get update -y -qq \ && apt-get install -y -qq python3 python3-pip python3-venv \ diff --git a/.gitignore b/.gitignore index 13d56edf4cf..6bc8b9e9380 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ /.ci/docker/golang-mage/go.* /.ci/docker/golang-mage/approvaltest/ /.ci/docker/golang-mage/systemtest/ +/.ci/docker/golang-mage/internal/otel_collector/ **/*.idea /build /data diff --git a/Makefile b/Makefile index e9b355b1696..033467a797c 100644 --- a/Makefile +++ b/Makefile @@ -136,7 +136,7 @@ NOTICE.txt: $(PYTHON) go.mod .PHONY: add-headers add-headers: $(GOLICENSER) ifndef CHECK_HEADERS_DISABLED - @$(GOLICENSER) -exclude x-pack + @$(GOLICENSER) -exclude x-pack -exclude internal/otel_collector @$(GOLICENSER) -license Elastic x-pack endif @@ -222,7 +222,7 @@ check-changelogs: $(PYTHON) .PHONY: check-headers check-headers: $(GOLICENSER) ifndef CHECK_HEADERS_DISABLED - @$(GOLICENSER) -d -exclude build -exclude x-pack + @$(GOLICENSER) -d -exclude build -exclude x-pack -exclude internal/otel_collector @$(GOLICENSER) -d -exclude build -license Elastic x-pack endif diff --git a/NOTICE.txt b/NOTICE.txt index fcdcba3ebff..3d2198eb09f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -4281,7 +4281,7 @@ Version: v0.17.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector@v0.17.0/LICENSE: +Contents of probable licence file LICENSE: Apache License diff --git a/go.mod b/go.mod index fc509982928..a1ef5ca1300 100644 --- a/go.mod +++ b/go.mod @@ -56,6 +56,7 @@ require ( go.elastic.co/apm/module/apmhttp v1.7.2 go.elastic.co/ecszap v0.3.0 // indirect go.elastic.co/fastjson v1.1.0 + go.elastic.co/go-licence-detector v0.5.0 go.opentelemetry.io/collector v0.17.0 go.uber.org/atomic v1.7.0 go.uber.org/multierr v1.6.0 // indirect @@ -89,3 +90,5 @@ replace ( k8s.io/apimachinery => k8s.io/apimachinery v0.19.4 k8s.io/client-go => k8s.io/client-go v0.19.4 ) + +replace go.opentelemetry.io/collector => ./internal/otel_collector diff --git a/go.sum b/go.sum index 8babc053192..0efc44c99b3 100644 --- a/go.sum +++ b/go.sum @@ -1228,6 +1228,8 @@ go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHt go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.elastic.co/go-licence-detector v0.4.0/go.mod h1:fSJQU8au4SAgDK+UQFbgUPsXKYNBDv4E/dwWevrMpXU= +go.elastic.co/go-licence-detector v0.5.0 h1:YXPCyt9faKMdJ8uMrkcI4patk8WZ0ME5oaIhYBUsRU4= +go.elastic.co/go-licence-detector v0.5.0/go.mod h1:fSJQU8au4SAgDK+UQFbgUPsXKYNBDv4E/dwWevrMpXU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= diff --git a/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go b/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go new file mode 100644 index 00000000000..06be24b7e24 --- /dev/null +++ b/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + + gatewayruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" + "google.golang.org/grpc" + + collectorlog "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + collectormetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + collectortrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + "go.opentelemetry.io/collector/receiver/otlpreceiver/logs" + "go.opentelemetry.io/collector/receiver/otlpreceiver/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/trace" +) + +// RegisterTraceReceiver registers the trace receiver with a gRPC server and/or grpc-gateway mux, if non-nil. +func RegisterTraceReceiver(ctx context.Context, receiver *trace.Receiver, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { + if serverGRPC != nil { + collectortrace.RegisterTraceServiceServer(serverGRPC, receiver) + } + if gatewayMux != nil { + err := collectortrace.RegisterTraceServiceHandlerServer(ctx, gatewayMux, receiver) + if err != nil { + return err + } + // Also register an alias handler. This fixes bug https://github.com/open-telemetry/opentelemetry-collector/issues/1968 + return collectortrace.RegisterTraceServiceHandlerServerAlias(ctx, gatewayMux, receiver) + } + return nil +} + +// RegisterMetricsReceiver registers the metrics receiver with a gRPC server and/or grpc-gateway mux, if non-nil. +func RegisterMetricsReceiver(ctx context.Context, receiver *metrics.Receiver, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { + if serverGRPC != nil { + collectormetrics.RegisterMetricsServiceServer(serverGRPC, receiver) + } + if gatewayMux != nil { + return collectormetrics.RegisterMetricsServiceHandlerServer(ctx, gatewayMux, receiver) + } + return nil +} + +// RegisterLogsReceiver registers the logs receiver with a gRPC server and/or grpc-gateway mux, if non-nil. +func RegisterLogsReceiver(ctx context.Context, receiver *logs.Receiver, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { + if serverGRPC != nil { + collectorlog.RegisterLogsServiceServer(serverGRPC, receiver) + } + if gatewayMux != nil { + return collectorlog.RegisterLogsServiceHandlerServer(ctx, gatewayMux, receiver) + } + return nil +} diff --git a/internal/.otel_collector_mixin/service/defaultcomponents/defaults.go b/internal/.otel_collector_mixin/service/defaultcomponents/defaults.go new file mode 100644 index 00000000000..23dbabfbd49 --- /dev/null +++ b/internal/.otel_collector_mixin/service/defaultcomponents/defaults.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package defaultcomponents composes the default set of components used by the otel service +package defaultcomponents + +import ( + "go.opentelemetry.io/collector/component" +) + +// Components returns the default set of components used by the +// OpenTelemetry collector. +func Components() (component.Factories, error) { + return component.Factories{}, nil +} diff --git a/internal/otel_collector/CHANGELOG.md b/internal/otel_collector/CHANGELOG.md new file mode 100644 index 00000000000..c91fbc7f6ba --- /dev/null +++ b/internal/otel_collector/CHANGELOG.md @@ -0,0 +1,653 @@ +# Changelog + +## Unreleased + +## v0.17.0 Beta + +## 💡 Enhancements 💡 + +- Default config environment variable expansion (#2231) +- `prometheusremotewrite` exporter: Add batched exports (#2249) +- `memorylimiter` processor: Introduce soft and hard limits (#2250) + +## 🧰 Bug fixes 🧰 + +- Fix nits in pdata usage (#2235) +- Convert status to not be a pointer in the Span (#2242) +- Report the error from `pprof.StartCPUProfile` (#2263) +- Rename `service.Application.SignalTestComplete` to `Shutdown` (#2277) + +## v0.16.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename Push functions to be consistent across signals in `exporterhelper` (#2203) + +## 💡 Enhancements 💡 + +- Change default OTLP/gRPC port number to 4317, also continue receiving on legacy port + 55680 during transition period (#2104). +- `kafka` exporter: Add support for exporting metrics as otlp Protobuf. (#1966) +- Move scraper helpers to its own `scraperhelper` package (#2185) +- Add `componenthelper` package to help build components (#2186) +- Remove usage of custom init/stop in `scraper` and use start/shutdown from `component` (#2193) +- Add more trace annotations, so zpages are more useful to determine failures (#2206) +- Add support to skip TLS verification (#2202) +- Expose non-nullable metric types (#2208) +- Expose non-nullable elements from slices of pointers (#2200) + +## 🧰 Bug fixes 🧰 + +- Change InstrumentationLibrary to be non-nullable (#2196) +- Add support for slices to non-pointers, use non-nullable AnyValue (#2192) +- Fix `--set` flag to work with `{}` in configs (#2162) + +## v0.15.0 Beta + +## 🛑 Breaking changes 🛑 + +- Remove legacy metrics, they were marked as legacy for ~12 months #2105 + +## 💡 Enhancements 💡 + +- Implement conversion between OpenCensus and OpenTelemetry Summary Metric (#2048) +- Add ability to generate non nullable messages (#2005) +- Implement Summary Metric in Prometheus RemoteWrite Exporter (#2083) +- Add `resource_to_telemetry_conversion` to exporter helper expose exporter settings (#2060) +- Add `CustomRoundTripper` function to httpclientconfig (#2085) +- Allow for more logging options to be passed to `service` (#2132) +- Add config parameters for `jaeger` receiver (#2068) +- Map unset status code for `jaegar` translator as per spec (#2134) +- Add more trace annotations to the queue-retry logic (#2136) +- Add config settings for component telemetry (#2148) +- Use net.SplitHostPort for IPv6 support in `prometheus` receiver (#2154) +- Add --log-format command line option (default to "console") #2177. + +## 🧰 Bug fixes 🧰 + +- `logging` exporter: Add Logging for Summary Datapoint (#2084) +- `hostmetrics` receiver: use correct TCP state labels on Unix systems (#2087) +- Fix otlp_log receiver wrong use of trace measurement (#2117) +- Fix "process/memory/rss" metric units (#2112) +- Fix "process/cpu_seconds" metrics (#2113) +- Add check for nil logger in exporterhelper functions (#2141) +- `prometheus` receiver: + - Upgrade Prometheus version to fix race condition (#2121) + - Fix the scraper/discover manager coordination (#2089) + - Fix panic when adjusting buckets (#2168) + +## v0.14.0 Beta + +## 🚀 New components 🚀 + +- `otlphttp` exporter which implements OTLP over HTTP protocol. + +## 🛑 Breaking changes 🛑 + +- Rename consumer.TraceConsumer to consumer.TracesConsumer #1974 +- Rename component.TraceReceiver to component.TracesReceiver #1975 +- Rename component.TraceProcessor to component.TracesProcessor #1976 +- Rename component.TraceExporter to component.TracesExporter #1975 +- Deprecate NopExporter, add NopConsumer (#1972) +- Deprecate SinkExporter, add SinkConsumer (#1973) +- Move `tailsampling` processor to contrib (#2012) +- Remove NewAttributeValueSlice (#2028) and mark NewAttributeValue as deprecated (#2022) +- Remove pdata.StringValue (#2021) +- Remove pdata.InitFromAttributeMap, use CopyTo if needed (#2042) +- Remove SetMapVal and SetArrayVal for pdata.AttributeValue (#2039) + +## 💡 Enhancements 💡 + +- `zipkin` exporter: Add queue retry to zipkin (#1971) +- `prometheus` exporter: Add `send_timestamps` option (#1951) +- `filter` processor: Add `expr` pdata.Metric filtering support (#1940, #1996) +- `attribute` processor: Add log support (#1934) +- `logging` exporter: Add index for histogram buckets count (#2009) +- `otlphttp` exporter: Add correct handling of server error responses (#2016) +- `prometheusremotewrite` exporter: + - Add user agent header to outgoing http request (#2000) + - Convert histograms to cumulative (#2049) + - Return permanent errors (#2053) + - Add external labels (#2044) +- `hostmetrics` receiver: Use scraper controller (#1949) +- Change Span/Trace ID to be byte array (#2001) +- Add `simple` metrics helper to facilitate building pdata.Metrics in receivers (#1540) +- Improve diagnostic logging for exporters (#2020) +- Add obsreport to receiverhelper scrapers (#1961) +- Update OTLP to 0.6.0 and use the new Span Status code (#2031) +- Add support of partial requests for logs and metrics to the exporterhelper (#2059) + +## 🧰 Bug fixes 🧰 + +- `logging` exporter: Added array serialization (#1994) +- `zipkin` receiver: Allow receiver to parse string tags (#1893) +- `batch` processor: Fix shutdown race (#1967) +- Guard for nil data points (#2055) + +## v0.13.0 Beta + +## 🛑 Breaking changes 🛑 + +- Host metric `system.disk.time` renamed to `system.disk.operation_time` (#1887) +- Use consumer for sender interface, remove unnecessary receiver address from Runner (#1941) +- Enable sending queue by default in all exporters configured to use it (#1924) +- Removed `groupbytraceprocessor` (#1891) +- Remove ability to configure collection interval per scraper (#1947) + +## 💡 Enhancements 💡 + +- Host Metrics receiver now reports both `system.disk.io_time` and `system.disk.operation_time` (#1887) +- Match spans against the instrumentation library and resource attributes (#928) +- Add `receiverhelper` for creating flexible "scraper" metrics receiver (#1886, #1890, #1945, #1946) +- Migrate `tailsampling` processor to new OTLP-based internal data model and add Composite Sampler (#1894) +- Metadata Generator: Change Metrics fields to implement an interface with new methods (#1912) +- Add unmarshalling for `pdata.Traces` (#1948) +- Add debug-level message on error for `jaeger` exporter (#1964) + +## 🧰 Bug fixes 🧰 + +- Fix bug where the service does not correctly start/stop the log exporters (#1943) +- Fix Queued Retry Unusable without Batch Processor (#1813) - (#1930) +- `prometheus` receiver: Log error message when `process_start_time_seconds` gauge is missing (#1921) +- Fix trace jaeger conversion to internal traces zero time bug (#1957) +- Fix panic in otlp traces to zipkin (#1963) +- Fix OTLP/HTTP receiver's path to be /v1/traces (#1979) + +## v0.12.0 Beta + +## 🚀 New components 🚀 + +- `configauth` package with the auth settings that can be used by receivers (#1807, #1808, #1809, #1810) +- `perfcounters` package that uses perflib for host metrics receiver (#1835, #1836, #1868, #1869, #1870) + +## 💡 Enhancements 💡 + +- Remove `queued_retry` and enable `otlp` metrics receiver in default config (#1823, #1838) +- Add `limit_percentage` and `spike_limit_percentage` options to `memorylimiter` processor (#1622) +- `hostmetrics` receiver: + - Collect additional labels from partitions in the filesystems scraper (#1858) + - Add filters for mount point and filesystem type (#1866) +- Add cloud.provider semantic conventions (#1865) +- `attribute` processor: Add log support (#1783) +- Deprecate OpenCensus-based internal data structures (#1843) +- Introduce SpanID data type, not yet used in Protobuf messages ($1854, #1855) +- Enable `otlp` trace by default in the released docker image (#1883) +- `tailsampling` processor: Combine batches of spans into a single batch (#1864) +- `filter` processor: Update to use pdata (#1885) +- Allow MSI upgrades (#1914) + +## 🧰 Bug fixes 🧰 + +- `prometheus` receiver: Print a more informative message about 'up' metric value (#1826) +- Use custom data type and custom JSON serialization for traceid (#1840) +- Skip creation of redundant nil resource in translation from OC if there are no combined metrics (#1803) +- `tailsampling` processor: Only send to next consumer once (#1735) +- Report Windows pagefile usage in bytes (#1837) +- Fix issue where Prometheus SD config cannot be parsed (#1877) + +## v0.11.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename service.Start() to Run() since it's a blocking call +- Fix slice Append to accept by value the element in pdata +- Change CreateTraceProcessor and CreateMetricsProcessor to use the same parameter order as receivers/logs processor and exporters. +- Prevent accidental use of LogsToOtlp and LogsFromOtlp and the OTLP data structs (#1703) +- Remove SetType from configmodels, ensure all registered factories set the type in config (#1798) +- Move process telemetry to service/internal (#1794) + +## 💡 Enhancements 💡 + +- Add map and array attribute value type support (#1656) +- Add authentication support to kafka (#1632) +- Implement InstrumentationLibrary translation to jaeger (#1645) +- Add public functions to export pdata to ExportXServicesRequest Protobuf bytes (#1741) +- Expose telemetry level in the configtelemetry (#1796) +- Add configauth package (#1807) +- Add config to docker image (#1792) + +## 🧰 Bug fixes 🧰 + +- Use zap int argument for int values instead of conversion (#1779) +- Add support for gzip encoded payload in OTLP/HTTP receiver (#1581) +- Return proto status for OTLP receiver when failed (#1788) + +## v0.10.0 Beta + +## 🛑 Breaking changes 🛑 + +- **Update OTLP to v0.5.0, incompatible metrics protocol.** +- Remove support for propagating summary metrics in OtelCollector. + - This is a temporary change, and will affect mostly OpenCensus users who use metrics. + +## 💡 Enhancements 💡 +- Support zipkin proto in `kafka` receiver (#1646) +- Prometheus Remote Write Exporter supporting Cortex (#1577, #1643) +- Add deployment environment semantic convention (#1722) +- Add logs support to `batch` and `resource` processors (#1723, #1729) + +## 🧰 Bug fixes 🧰 +- Identify config error when expected map is other value type (#1641) +- Fix Kafka receiver closing ready channel multiple times (#1696) +- Fix a panic issue while processing Zipkin spans with an empty service name (#1742) +- Zipkin Receiver: Always set the endtime (#1750) + +## v0.9.0 Beta + +## 🛑 Breaking changes 🛑 + +- **Remove old base factories**: + - `ReceiverFactoryBase` (#1583) + - `ProcessorFactoryBase` (#1596) + - `ExporterFactoryBase` (#1630) +- Remove logs factories and merge with normal factories (#1569) +- Remove `reconnection_delay` from OpenCensus exporter (#1516) +- Remove `ConsumerOld` interfaces (#1631) + +## 🚀 New components 🚀 +- `prometheusremotewrite` exporter: Send metrics data in Prometheus TimeSeries format to Cortex or any Prometheus (#1544) +- `kafka` receiver: Receive traces from Kafka (#1410) + +## 💡 Enhancements 💡 +- `kafka` exporter: Enable queueing, retry, timeout (#1455) +- Add `Headers` field in HTTPClientSettings (#1552) +- Change OpenCensus receiver (#1556) and exporter (#1571) to the new interfaces +- Add semantic attribute for `telemetry.auto.version` (#1578) +- Add uptime and RSS memory self-observability metrics (#1549) +- Support conversion for OpenCensus `SameProcessAsParentSpan` (#1629) +- Access application version in components (#1559) +- Make Kafka payload encoding configurable (#1584) + +## 🧰 Bug fixes 🧰 +- Stop further processing if `filterprocessor` filters all data (#1500) +- `processscraper`: Use same scrape time for all data points coming from same process (#1539) +- Ensure that time conversion for 0 returns nil timestamps or Time where IsZero returns true (#1550) +- Fix multiple exporters panic (#1563) +- Allow `attribute` processor for external use (#1574) +- Do not duplicate filesystem metrics for devices with many mount points (#1617) + +## v0.8.0 Beta + +## 🚀 New components 🚀 + +- `groupbytrace` processor that waits for a trace to be completed (#1362) + +## 💡 Enhancements 💡 + +- Migrate `zipkin` receiver/exporter to the new interfaces (#1484) +- Migrate `prometheus` receiver/exporter to the new interfaces (#1477, #1515) +- Add new FactoryUnmarshaler support to all components, deprecate old way (#1468) +- Update `fileexporter` to write data in OTLP (#1488) +- Add extension factory helper (#1485) +- Host scrapers: Use same scrape time for all data points coming from same source (#1473) +- Make logs SeverityNumber publicly available (#1496) +- Add recently included conventions for k8s and container resources (#1519) +- Add new config StartTimeMetricRegex to `prometheus` receiver (#1511) +- Convert Zipkin receiver and exporter to use OTLP (#1446) + +## 🧰 Bug fixes 🧰 + +- Infer OpenCensus resource type based on OpenTelemetry's semantic conventions (#1462) +- Fix log adapter in `prometheus` receiver (#1493) +- Avoid frequent errors for process telemetry on Windows (#1487) + +## v0.7.0 Beta + +## 🚀 New components 🚀 + +- Receivers + - `fluentfoward` runs a TCP server that accepts events via the [Fluent Forward protocol](https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1) (#1173) +- Exporters + - `kafka` exports traces to Kafka (#1439) +- Extensions + - **Experimental** `fluentbit` facilitates running a FluentBit subprocess of the collector (#1381) + +## 💡 Enhancements 💡 + +- Updated `golang/protobuf` from v1.3.5 to v1.4.2 (#1308) +- Updated `opencensus-proto` from v0.2.1 to v0.3.0 (#1308) +- Added round_robin `balancer_name` as an option to gRPC client settings (#1353) +- `hostmetrics` receiver + - Switch to using perf counters to get disk io metrics on Windows (#1340) + - Add device filter for file system (#1379) and disk (#1378) scrapers + - Record process physical & virtual memory stats separately (#1403) + - Scrape system.disk.time on Windows (#1408) + - Add disk.pending_operations metric (#1428) + - Add network interface label to network metrics (#1377) +- Add `exporterhelper` (#1351) and `processorhelper` (#1359) factories +- Update OTLP to latest version (#1384) +- Disable timeout, retry on failure and sending queue for `logging` exporter (#1400) +- Add support for retry and sending queue for `jaeger` exporter (#1401) +- Add batch size bytes metric to `batch` processor (#1270) +- `otlp` receiver: Add Log Support (#1444) +- Allow to configure read/write buffer sizes for http Client (#1447) +- Update DB conventions to latest and add exception conventions (#1452) + +## 🧰 Bug fixes 🧰 + +- Fix `resource` processor for old metrics (#1412) +- `jaeger` receiver: Do not try to stop if failed to start. Collector service will do that (#1434) + +## v0.6.0 Beta + +## 🛑 Breaking changes 🛑 + +- Renamed the metrics generated by `hostmetrics` receiver to match the (currently still pending) OpenTelemetry system metric conventions (#1261) (#1269) +- Removed `vmmetrics` receiver (#1282) +- Removed `cpu` scraper `report_per_cpu` config option (#1326) + +## 💡 Enhancements 💡 + +- Added disk merged (#1267) and process count (#1268) metrics to `hostmetrics` +- Log metric data points in `logging` exporter (#1258) +- Changed the `batch` processor to not ignore the errors returned by the exporters (#1259) +- Build and publish MSI (#1153) and DEB/RPM packages (#1278, #1335) +- Added batch size metric to `batch` processor (#1241) +- Added log support for `memorylimiter` processor (#1291) and `logging` exporter (#1298) +- Always add tags for `observability`, other metrics may use them (#1312) +- Added metrics support (#1313) and allow partial retries in `queued_retry` processor (#1297) +- Update `resource` processor: introduce `attributes` config parameter to specify actions on attributes similar to `attributes` processor, old config interface is deprecated (#1315) +- Update memory state labels for non-Linux OSs (#1325) +- Ensure tcp connection value is provided for all states, even when count is 0 (#1329) +- Set `batch` processor channel size to num cpus (#1330) +- Add `send_batch_max_size` config parameter to `batch` processor enforcing hard limit on batch size (#1310) +- Add support for including a per-RPC authentication to gRPC settings (#1250) + +## 🧰 Bug fixes 🧰 + +- Fixed OTLP waitForReady, not set from config (#1254) +- Fixed all translation diffs between OTLP and Jaeger (#1222) +- Disabled `process` scraper for any non Linux/Windows OS (#1328) + +## v0.5.0 Beta + +## 🛑 Breaking changes 🛑 + +- **Update OTLP to v0.4.0 (#1142)**: Collector will be incompatible with any other sender or receiver of OTLP protocol +of different versions +- Make "--new-metrics" command line flag the default (#1148) +- Change `endpoint` to `url` in Zipkin exporter config (#1186) +- Change `tls_credentials` to `tls_settings` in Jaegar receiver config (#1233) +- OTLP receiver config change for `protocols` to support mTLS (#1223) +- Remove `export_resource_labels` flag from Zipkin exporter (#1163) + +## 🚀 New components 🚀 + +- Receivers + - Added process scraper to the `hostmetrics` receiver (#1047) + +## 💡 Enhancements 💡 + +- otlpexporter: send configured headers in request (#1130) +- Enable Collector to be run as a Windows service (#1120) +- Add config for HttpServer (#1196) +- Allow cors in HTTPServerSettings (#1211) +- Add a generic grpc server settings config, cleanup client config (#1183) +- Rely on gRPC to batch and loadbalance between connections instead of custom logic (#1212) +- Allow to tune the read/write buffers for gRPC clients (#1213) +- Allow to tune the read/write buffers for gRPC server (#1218) + +## 🧰 Bug fixes 🧰 + +- Handle overlapping metrics from different jobs in prometheus exporter (#1096) +- Fix handling of SpanKind INTERNAL in OTLP OC translation (#1143) +- Unify zipkin v1 and v2 annotation/tag parsing logic (#1002) +- mTLS: Add support to configure client CA and enforce ClientAuth (#1185) +- Fixed untyped Prometheus receiver bug (#1194) +- Do not embed ProtocolServerSettings in gRPC (#1210) +- Add Context to the missing CreateMetricsReceiver method (#1216) + +## v0.4.0 Beta + +Released 2020-06-16 + +## 🛑 Breaking changes 🛑 + +- `isEnabled` configuration option removed (#909) +- `thrift_tchannel` protocol moved from `jaeger` receiver to `jaeger_legacy` in contrib (#636) + +## ⚠️ Major changes ⚠️ + +- Switch from `localhost` to `0.0.0.0` by default for all receivers (#1006) +- Internal API Changes (only impacts contributors) + - Add context to `Start` and `Stop` methods in the component (#790) + - Rename `AttributeValue` and `AttributeMap` method names (#781) +(other breaking changes in the internal trace data types) + - Change entire repo to use the new vanityurl go.opentelemetry.io/collector (#977) + +## 🚀 New components 🚀 + +- Receivers + - `hostmetrics` receiver with CPU (#862), disk (#921), load (#974), filesystem (#926), memory (#911), network (#930), and virtual memory (#989) support +- Processors + - `batch` for batching received metrics (#1060) + - `filter` for filtering (dropping) received metrics (#1001) + +## 💡 Enhancements 💡 + +- `otlp` receiver implement HTTP X-Protobuf (#1021) +- Exporters: Support mTLS in gRPC exporters (#927) +- Extensions: Add `zpages` for service (servicez, pipelinez, extensions) (#894) + +## 🧰 Bug fixes 🧰 + +- Add missing logging for metrics at `debug` level (#1108) +- Fix setting internal status code in `jaeger` receivers (#1105) +- `zipkin` export fails on span without timestamp when used with `queued_retry` (#1068) +- Fix `zipkin` receiver status code conversion (#996) +- Remove extra send/receive annotations with using `zipkin` v1 (#960) +- Fix resource attribute mutation bug when exporting in `jaeger` proto (#907) +- Fix metric/spans count, add tests for nil entries in the slices (#787) + + +## 🧩 Components 🧩 + +### Traces + +| Receivers | Processors | Exporters | +|:----------:|:-----------:|:----------:| +| Jaeger | Attributes | File | +| OpenCensus | Batch | Jaeger | +| OTLP | Memory Limiter | Logging | +| Zipkin | Queued Retry | OpenCensus | +| | Resource | OTLP | +| | Sampling | Zipkin | +| | Span || + +### Metrics + +| Receivers | Processors | Exporters | +|:----------:|:-----------:|:----------:| +| HostMetrics | Batch | File | +| OpenCensus | Filter | Logging | +| OTLP | Memory Limiter | OpenCensus | +| Prometheus || OTLP | +| VM Metrics || Prometheus | + +### Extensions + +- Health Check +- Performance Profiler +- zPages + + +## v0.3.0 Beta + +Released 2020-03-30 + +### Breaking changes + +- Make prometheus receiver config loading strict. #697 +Prometheus receiver will now fail fast if the config contains unused keys in it. + +### Changes and fixes + +- Enable best effort serve by default of Prometheus Exporter (https://github.com/orijtech/prometheus-go-metrics-exporter/pull/6) +- Fix null pointer exception in the logging exporter #743 +- Remove unnecessary condition to have at least one processor #744 + +### Components + +| Receivers / Exporters | Processors | Extensions | +|:---------------------:|:-----------:|:-----------:| +| Jaeger | Attributes | Health Check | +| OpenCensus | Batch | Performance Profiler | +| OpenTelemetry | Memory Limiter | zPages | +| Zipkin | Queued Retry | | +| | Resource | | +| | Sampling | | +| | Span | | + + +## v0.2.8 Alpha + +Alpha v0.2.8 of OpenTelemetry Collector + +- Implemented OTLP receiver and exporter. +- Added ability to pass config to the service programmatically (useful for custom builds). +- Improved own metrics / observability. +- Refactored component and factory interface definitions (breaking change #683) + + +## v0.2.7 Alpha + +Alpha v0.2.7 of OpenTelemetry Collector + +- Improved error handling on shutdown +- Partial implementation of new metrics (new obsreport package) +- Include resource labels for Zipkin exporter +- New `HASH` action to attribute processor + + + +## v0.2.6 Alpha + +Alpha v0.2.6 of OpenTelemetry Collector. +- Update metrics prefix to `otelcol` and expose command line argument to modify the prefix value. +- Extend Span processor to have include/exclude span logic. +- Batch dropped span now emits zero when no spans are dropped. + + +## v0.2.5 Alpha + +Alpha v0.2.5 of OpenTelemetry Collector. + +- Regexp-based filtering of spans based on service names. +- Ability to choose strict or regexp matching for include/exclude filters. + + +## v0.2.4 Alpha + +Alpha v0.2.4 of OpenTelemetry Collector. + +- Regexp-based filtering of span names. +- Ability to extract attributes from span names and rename span. +- File exporter for debugging. +- Span processor is now enabled by default. + + +## v0.2.3 Alpha + +Alpha v0.2.3 of OpenTelemetry Collector. + +Changes: +21a70d6 Add a memory limiter processor (#498) +9778b16 Refactor Jaeger Receiver config (#490) +ec4ad0c Remove workers from OpenCensus receiver implementation (#497) +4e01fa3 Update k8s config to use opentelemetry docker image and configuration (#459) + + +## v0.2.2 Alpha + +Alpha v0.2.2 of OpenTelemetry Collector. + +Main changes visible to users since previous release: + +- Improved Testbed and added more E2E tests. +- Made component interfaces more uniform (this is a breaking change). + +Note: v0.2.1 never existed and is skipped since it was tainted in some dependencies. + + +## v0.2.0 Alpha + +Alpha v0.2 of OpenTelemetry Collector. + +Docker image: omnition/opentelemetry-collector:v0.2.0 (we are working on getting this under an OpenTelemetry org) + +Main changes visible to users since previous release: + +* Rename from `service` to `collector`, the binary is now named `otelcol` + +* Configuration reorganized and using strict mode + +* Concurrency issues for pipelines transforming data addressed + +Commits: + +```terminal +0e505d5 Refactor config: pipelines now under service (#376) +402b80c Add Capabilities to Processor and use for Fanout cloning decision (#374) +b27d824 Use strict mode to read config (#375) +d769eb5 Fix concurrency handling when data is fanned out (#367) +dc6b290 Rename all github paths from opentelemtry-service to opentelemetry-collector (#371) +d038801 Rename otelsvc to otelcol (#365) +c264e0e Add Include/Exclude logic for Attributes Processor (#363) +8ce427a Pin a commit for Prometheus dependency in go.mod (#364) +2393774 Bump Jaeger version to 1.14.0 (latest) (#349) +63362d5 Update testbed modules (#360) +c0e2a27 Change dashes to underscores to separate words in config files (#357) +7609eaa Rename OpenTelemetry Service to Collector in docs and comments (#354) +bc5b299 Add common gRPC configuration settings (#340) +b38505c Remove network access popups on macos (#348) +f7727d1 Fixed loop variable pointer bug in jaeger translator (#341) +958beed Ensure that ConsumeMetricsData() is not passed empty metrics in the Prometheus receiver (#345) +0be295f Change log statement in Prometheus receiver from info to debug. (#344) +d205393 Add Owais to codeowners (#339) +8fa6afe Translate OC resource labels to Jaeger process tags (#325) +``` + + +## v0.0.2 Alpha + +Alpha release of OpenTelemetry Service. + +Docker image: omnition/opentelemetry-service:v0.0.2 (we are working on getting this under an OpenTelemetry org) + +Main changes visible to users since previous release: + +```terminal +8fa6afe Translate OC resource labels to Jaeger process tags (#325) +047b0f3 Allow environment variables in config (#334) +96c24a3 Add exclude/include spans option to attributes processor (#311) +4db0414 Allow metric processors to be specified in pipelines (#332) +c277569 Add observability instrumentation for Prometheus receiver (#327) +f47aa79 Add common configuration for receiver tls (#288) +a493765 Refactor extensions to new config format (#310) +41a7afa Add Span Processor logic +97a71b3 Use full name for the metrics and spans created for observability (#316) +fed4ed2 Add support to record metrics for metricsexporter (#315) +5edca32 Add include_filter configuration to prometheus receiver (#298) +0068d0a Passthrough CORS allowed origins (#260) +``` + + +## v0.0.1 Alpha + +This is the first alpha release of OpenTelemetry Service. + +Docker image: omnition/opentelemetry-service:v0.0.1 + + +[v0.3.0]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.10...v0.3.0 +[v0.2.10]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.8...v0.2.10 +[v0.2.8]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.7...v0.2.8 +[v0.2.7]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.6...v0.2.7 +[v0.2.6]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.5...v0.2.6 +[v0.2.5]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.4...v0.2.5 +[v0.2.4]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.3...v0.2.4 +[v0.2.3]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.2...v0.2.3 +[v0.2.2]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.0...v0.2.2 +[v0.2.0]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.0.2...v0.2.0 +[v0.0.2]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.0.1...v0.0.2 +[v0.0.1]: https://github.com/open-telemetry/opentelemetry-collector/tree/v0.0.1 diff --git a/internal/otel_collector/CONTRIBUTING.md b/internal/otel_collector/CONTRIBUTING.md new file mode 100644 index 00000000000..34c4cb22cea --- /dev/null +++ b/internal/otel_collector/CONTRIBUTING.md @@ -0,0 +1,289 @@ +# Contributing Guide + +We'd love your help! + +## How to structure PRs to get expedient reviews? + +We recommend that any PR (unless it is trivial) to be smaller than 500 lines (excluding go mod/sum changes) in order to help reviewers to do a thorough and reasonably fast reviews. + +### When adding a new component + +Consider submitting different PRs for (more details about adding new components [here](#adding-new-components)) : + +* First PR should include the overall structure of the new component: + * Readme, configuration, and factory implementation usually using the helper factory structs. + * This PR is usually trivial to review, so the size limit does not apply to it. +* Second PR should include the concrete implementation of the component. +If the size of this PR is larger than the recommended size consider splitting it in multiple PRs. +* Last PR should enable the new component and add it to the `otelcontribcol` binary by updating the `components.go` file. +The component must be enabled only after sufficient testing, and there is enough confidence in the stability and quality of the component. + +### Refactoring Work + +Any refactoring work must be split in its own PR that does not include any behavior changes. +It is important to do this to avoid hidden changes in large and trivial refactoring PRs. + +## Report a bug or requesting feature + +Reporting bugs is an important contribution. Please make sure to include: + +* Expected and actual behavior +* OpenTelemetry version you are running +* If possible, steps to reproduce + +## How to contribute + +### Before you start + +Please read project contribution +[guide](https://github.com/open-telemetry/community/blob/master/CONTRIBUTING.md) +for general practices for OpenTelemetry project. + +Select a good issue from the links below (ordered by difficulty/complexity): + +* [Good First Issue](https://github.com/open-telemetry/opentelemetry-collector/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) +* [Up for Grabs](https://github.com/open-telemetry/opentelemetry-collector/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3Aup-for-grabs+) +* [Help Wanted](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) + +Comment on the issue that you want to work on so we can assign it to you and +clarify anything related to it. + +If you would like to work on something that is not listed as an issue +(e.g. a new feature or enhancement) please first read our [vision](docs/vision.md) and +[roadmap](docs/roadmap.md) to make sure your proposal aligns with the goals of the +Collector, then create an issue and describe your proposal. It is best to do this +in advance so that maintainers can decide if the proposal is a good fit for +this repository. This will help avoid situations when you spend significant time +on something that maintainers may decide this repo is not the right place for. + +Follow the instructions below to create your PR. + +### Fork + +In the interest of keeping this repository clean and manageable, you should +work from a fork. To create a fork, click the 'Fork' button at the top of the +repository, then clone the fork locally using `git clone +git@github.com:USERNAME/opentelemetry-service.git`. + +You should also add this repository as an "upstream" repo to your local copy, +in order to keep it up to date. You can add this as a remote like so: + +`git remote add upstream https://github.com/open-telemetry/opentelemetry-collector.git` + +Verify that the upstream exists: + +`git remote -v` + +To update your fork, fetch the upstream repo's branches and commits, then merge your master with upstream's master: + +``` +git fetch upstream +git checkout master +git merge upstream/master +``` + +Remember to always work in a branch of your local copy, as you might otherwise +have to contend with conflicts in master. + +Please also see [GitHub +workflow](https://github.com/open-telemetry/community/blob/master/CONTRIBUTING.md#github-workflow) +section of general project contributing guide. + +## Required Tools + +Working with the project sources requires the following tools: + +1. [git](https://git-scm.com/) +2. [go](https://golang.org/) (version 1.14 and up) +3. [make](https://www.gnu.org/software/make/) +4. [docker](https://www.docker.com/) + +## Repository Setup + +Fork the repo, checkout the upstream repo to your GOPATH by: + +``` +$ GO111MODULE="" go get -d go.opentelemetry.io/collector +``` + +Add your fork as an origin: + +```shell +$ cd $(go env GOPATH)/src/go.opentelemetry.io/collector +$ git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opentelemetry-service.git +``` + +Run tests, fmt and lint: + +```shell +$ make install-tools # Only first time. +$ make +``` + +*Note:* the default build target requires tools that are installed at `$(go env GOPATH)/bin`, ensure that `$(go env GOPATH)/bin` is included in your `PATH`. + +## Creating a PR + +Checkout a new branch, make modifications, build locally, and push the branch to your fork +to open a new PR: + +```shell +$ git checkout -b feature +# edit +$ make +$ make fmt +$ git commit +$ git push fork feature +``` + +## General Notes + +This project uses Go 1.14.* and CircleCI. + +CircleCI uses the Makefile with the `ci` target, it is recommended to +run it before submitting your PR. It runs `gofmt -s` (simplify) and `golint`. + +The dependencies are managed with `go mod` if you work with the sources under your +`$GOPATH` you need to set the environment variable `GO111MODULE=on`. + +## Coding Guidelines + +Although OpenTelemetry project as a whole is still in Alpha stage we consider +OpenTelemetry Collector to be close to production quality and the quality bar +for contributions is set accordingly. Contributions must have readable code written +with maintainability in mind (if in doubt check [Effective Go](https://golang.org/doc/effective_go.html) +for coding advice). The code must adhere to the following robustness principles that +are important for software that runs autonomously and continuously without direct +interaction with a human (such as this Collector). + +### Startup Error Handling + +Verify configuration during startup and fail fast if the configuration is invalid. +This will bring the attention of a human to the problem as it is more typical for humans +to notice problems when the process is starting as opposed to problems that may arise +sometime (potentially long time) after process startup. Monitoring systems are likely +to automatically flag processes that exit with failure during startup, making it +easier to notice the problem. The Collector should print a reasonable log message to +explain the problem and exit with a non-zero code. It is acceptable to crash the process +during startup if there is no good way to exit cleanly but do your best to log and +exit cleanly with a process exit code. + +### Propagate Errors to the Caller + +Do not crash or exit outside the `main()` function, e.g. via `log.Fatal` or `os.Exit`, +even during startup. Instead, return detailed errors to be handled appropriately +by the caller. The code in packages other than `main` may be imported and used by +third-party applications, and they should have full control over error handling +and process termination. + +### Do not Crash after Startup + +Do not crash or exit the Collector process after the startup sequence is finished. +A running Collector typically contains data that is received but not yet exported further +(e.g. is stored in the queues and other processors). Crashing or exiting the Collector +process will result in losing this data since typically the receiver has +already acknowledged the receipt for this data and the senders of the data will +not send that data again. + +### Bad Input Handling + +Do not crash on bad input in receivers or elsewhere in the pipeline. +[Crash-only software](https://en.wikipedia.org/wiki/Crash-only_software) +is valid in certain cases; however, this is not a correct approach for Collector (except +during startup, see above). The reason is that many senders from which Collector +receives data have built-in automatic retries of the _same_ data if no +acknowledgment is received from the Collector. If you crash on bad input +chances are high that after the Collector is restarted it will see the same +data in the input and will crash again. This will likely result in infinite +crashing loop if you have automatic retries in place. + +Typically bad input when detected in a receiver should be reported back to the +sender. If it is elsewhere in the pipeline it may be too late to send a response +to the sender (particularly in processors which are not synchronously processing +data). In either case it is recommended to keep a metric that counts bad input data. + +### Error Handling and Retries + +Be rigorous in error handling. Don't ignore errors. Think carefully about each +error and decide if it is a fatal problem or a transient problem that may go away +when retried. Fatal errors should be logged or recorded in an internal metric to +provide visibility to users of the Collector. For transient errors come up with a +retrying strategy and implement it. Typically you will +want to implement retries with some sort of exponential back-off strategy. For +connection or sending retries use jitter for back-off intervals to avoid overwhelming +your destination when network is restored or the destination is recovered. +[Exponential Backoff](https://github.com/cenkalti/backoff) is a good library that +provides all this functionality. + +### Logging + +Log your component startup and shutdown, including successful outcomes (but don't +overdo it, keep the number of success message to a minimum). +This can help to understand the context of failures if they occur elsewhere after +your code is successfully executed. + +Use logging carefully for events that can happen frequently to avoid flooding +the logs. Avoid outputting logs per a received or processed data item since this can +amount to very large number of log entries (Collector is designed to process +many thousands of spans and metrics per second). For such high-frequency events +instead of logging consider adding an internal metric and increment it when +the event happens. + +Make log message human readable and also include data that is needed for easier +understanding of what happened and in what context. + +### Resource Usage + +Limit usage of CPU, RAM or other resources that the code can use. Do not write code +that consumes resources in an uncontrolled manner. For example if you have a queue +that can contain unprocessed messages always limit the size of the queue unless you +have other ways to guarantee that the queue will be consumed faster than items are +added to it. + +Performance test the code for both normal use-cases under acceptable load and also for +abnormal use-cases when the load exceeds acceptable many times. Ensure that +your code performs predictably under abnormal use. For example if the code +needs to process received data and cannot keep up with the receiving rate it is +not acceptable to keep allocating more memory for received data until the Collector +runs out of memory. Instead have protections for these situations, e.g. when hitting +resource limits drop the data and record the fact that it was dropped in a metric +that is exposed to users. + +### Graceful Shutdown + +Collector does not yet support graceful shutdown but we plan to add it. All components +must be ready to shutdown gracefully via `Shutdown()` function that all component +interfaces require. If components contain any temporary data they need to process +and export it out of the Collector before shutdown is completed. The shutdown process +will have a maximum allowed duration so put a limit on how long your shutdown +operation can take. + +### Unit Tests + +Cover important functionality with unit tests. We require that contributions +do not decrease overall code coverage of the codebase - this is aligned with our +goal to increase coverage over time. Keep track of execution time for your unit +tests and try to keep them as short as possible. + +## End-to-end Tests + +If you implement a new component add end-to-end tests for the component using +the automated [Testbed](testbed/README.md). + +## Release + +See [release](docs/release.md) for details. + +## Common Issues + +Build fails due to dependency issues, e.g. + +```sh +go: github.com/golangci/golangci-lint@v1.31.0 requires + github.com/tommy-muehle/go-mnd@v1.3.1-0.20200224220436-e6f9a994e8fa: invalid pseudo-version: git fetch --unshallow -f origin in /root/go/pkg/mod/cache/vcs/053b1e985f53e43f78db2b3feaeb7e40a2ae482c92734ba3982ca463d5bf19ce: exit status 128: + fatal: git fetch-pack: expected shallow list + ``` + +`go env GOPROXY` should return `https://proxy.golang.org,direct`. If it does not, set it as an environment variable: + +`export GOPROXY=https://proxy.golang.org,direct` diff --git a/internal/otel_collector/LICENSE b/internal/otel_collector/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/internal/otel_collector/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/internal/otel_collector/Makefile b/internal/otel_collector/Makefile new file mode 100644 index 00000000000..ecb6d92b8fa --- /dev/null +++ b/internal/otel_collector/Makefile @@ -0,0 +1,309 @@ +include ./Makefile.Common + +# This is the code that we want to run checklicense, staticcheck, etc. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './cmd/issuegenerator/*' \ + -not -path './cmd/mdatagen/*' \ + -not -path './internal/tools/*' \ + -not -path './examples/demo/app/*' \ + -not -path './internal/data/opentelemetry-proto-gen/*' \ + -type f | sort) + +# ALL_PKGS is the list of all packages where ALL_SRC files reside. +ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) + +# All source code and documents. Used in spell check. +ALL_DOC := $(shell find . \( -name "*.md" -o -name "*.yaml" \) \ + -type f | sort) + +# ALL_MODULES includes ./* dirs (excludes . dir) +ALL_MODULES := $(shell find . -type f -name "go.mod" -exec dirname {} \; | sort | egrep '^./' ) + +CMD?= +TOOLS_MOD_DIR := ./internal/tools + +GOOS=$(shell go env GOOS) +GOARCH=$(shell go env GOARCH) +# BUILD_TYPE should be one of (dev, release). +BUILD_TYPE?=release + +GIT_SHA=$(shell git rev-parse --short HEAD) +BUILD_INFO_IMPORT_PATH=go.opentelemetry.io/collector/internal/version +BUILD_X1=-X $(BUILD_INFO_IMPORT_PATH).GitHash=$(GIT_SHA) +ifdef VERSION +BUILD_X2=-X $(BUILD_INFO_IMPORT_PATH).Version=$(VERSION) +endif +BUILD_X3=-X $(BUILD_INFO_IMPORT_PATH).BuildType=$(BUILD_TYPE) +BUILD_INFO=-ldflags "${BUILD_X1} ${BUILD_X2} ${BUILD_X3}" + +RUN_CONFIG?=examples/local/otel-config.yaml + +CONTRIB_PATH=$(CURDIR)/../opentelemetry-collector-contrib + +# Function to execute a command. Note the empty line before endef to make sure each command +# gets executed separately instead of concatenated with previous one. +# Accepts command to execute as first parameter. +define exec-command +$(1) + +endef + +.DEFAULT_GOAL := all + +.PHONY: all +all: gochecklicense goimpi golint gomisspell gotest otelcol + +all-modules: + @echo $(ALL_MODULES) | tr ' ' '\n' | sort + +.PHONY: testbed-loadtest +testbed-loadtest: otelcol + cd ./testbed/tests && ./runtests.sh + +.PHONY: testbed-correctness +testbed-correctness: otelcol + cd ./testbed/correctness/traces && ./runtests.sh + +.PHONY: testbed-list-loadtest +testbed-list-loadtest: + RUN_TESTBED=1 $(GOTEST) -v ./testbed/tests --test.list '.*'| grep "^Test" + +.PHONY: testbed-list-correctness +testbed-list-correctness: + RUN_TESTBED=1 $(GOTEST) -v ./testbed/correctness --test.list '.*'| grep "^Test" + +.PHONY: gotest +gotest: + @$(MAKE) for-all CMD="make test" + +.PHONY: gobenchmark +gobenchmark: + @$(MAKE) for-all CMD="make benchmark" + +.PHONY: gotest-with-cover +gotest-with-cover: + @echo pre-compiling tests + @time $(GOTEST) -i ./... + $(GO_ACC) ./... + go tool cover -html=coverage.txt -o coverage.html + +.PHONY: goaddlicense +goaddlicense: + @$(MAKE) for-all CMD="make addlicense" + +.PHONY: gochecklicense +gochecklicense: + @$(MAKE) for-all CMD="make checklicense" + +.PHONY: gomisspell +gomisspell: + @$(MAKE) for-all CMD="make misspell" + +.PHONY: gomisspell-correction +gomisspell-correction: + @$(MAKE) for-all CMD="make misspell-correction" + +.PHONY: golint +golint: + @$(MAKE) for-all CMD="make lint" + +.PHONY: goimpi +goimpi: + @$(MAKE) for-all CMD="make impi" + +.PHONY: gofmt +gofmt: + @$(MAKE) for-all CMD="make fmt" + +.PHONY: gotidy +gotidy: + $(MAKE) for-all CMD="rm -fr go.sum" + $(MAKE) for-all CMD="go mod tidy" + +.PHONY: install-tools +install-tools: + cd $(TOOLS_MOD_DIR) && go install github.com/client9/misspell/cmd/misspell + cd $(TOOLS_MOD_DIR) && go install github.com/golangci/golangci-lint/cmd/golangci-lint + cd $(TOOLS_MOD_DIR) && go install github.com/google/addlicense + cd $(TOOLS_MOD_DIR) && go install github.com/jstemmer/go-junit-report + cd $(TOOLS_MOD_DIR) && go install github.com/mjibson/esc + cd $(TOOLS_MOD_DIR) && go install github.com/ory/go-acc + cd $(TOOLS_MOD_DIR) && go install github.com/pavius/impi/cmd/impi + cd $(TOOLS_MOD_DIR) && go install github.com/securego/gosec/v2/cmd/gosec + cd $(TOOLS_MOD_DIR) && go install github.com/tcnksm/ghr + cd $(TOOLS_MOD_DIR) && go install golang.org/x/tools/cmd/goimports + cd $(TOOLS_MOD_DIR) && go install honnef.co/go/tools/cmd/staticcheck + cd cmd/mdatagen && go install ./ + +.PHONY: otelcol +otelcol: + go generate ./... + GO111MODULE=on CGO_ENABLED=0 go build -o ./bin/otelcol_$(GOOS)_$(GOARCH)$(EXTENSION) $(BUILD_INFO) ./cmd/otelcol + +.PHONY: run +run: + GO111MODULE=on go run --race ./cmd/otelcol/... --config ${RUN_CONFIG} + +.PHONY: docker-component # Not intended to be used directly +docker-component: check-component + GOOS=linux $(MAKE) $(COMPONENT) + cp ./bin/$(COMPONENT)_linux_amd64 ./cmd/$(COMPONENT)/$(COMPONENT) + docker build -t $(COMPONENT) ./cmd/$(COMPONENT)/ + rm ./cmd/$(COMPONENT)/$(COMPONENT) + +.PHONY: for-all +for-all: + @echo "running $${CMD} in root" + @$${CMD} + @set -e; for dir in $(ALL_MODULES); do \ + (cd "$${dir}" && \ + echo "running $${CMD} in $${dir}" && \ + $${CMD} ); \ + done + +.PHONY: check-component +check-component: +ifndef COMPONENT + $(error COMPONENT variable was not defined) +endif + +.PHONY: add-tag +add-tag: + @[ "${TAG}" ] || ( echo ">> env var TAG is not set"; exit 1 ) + @echo "Adding tag ${TAG}" + @git tag -a ${TAG} -s -m "Version ${TAG}" + +.PHONY: delete-tag +delete-tag: + @[ "${TAG}" ] || ( echo ">> env var TAG is not set"; exit 1 ) + @echo "Deleting tag ${TAG}" + @git tag -d ${TAG} + +.PHONY: docker-otelcol +docker-otelcol: + COMPONENT=otelcol $(MAKE) docker-component + +.PHONY: binaries +binaries: otelcol + +.PHONY: binaries-all-sys +binaries-all-sys: binaries-darwin_amd64 binaries-linux_amd64 binaries-linux_arm64 binaries-windows_amd64 + +.PHONY: binaries-darwin_amd64 +binaries-darwin_amd64: + GOOS=darwin GOARCH=amd64 $(MAKE) binaries + +.PHONY: binaries-linux_amd64 +binaries-linux_amd64: + GOOS=linux GOARCH=amd64 $(MAKE) binaries + +.PHONY: binaries-linux_arm64 +binaries-linux_arm64: + GOOS=linux GOARCH=arm64 $(MAKE) binaries + +.PHONY: binaries-windows_amd64 +binaries-windows_amd64: + GOOS=windows GOARCH=amd64 EXTENSION=.exe $(MAKE) binaries + +.PHONY: deb-rpm-package +%-package: ARCH ?= amd64 +%-package: + $(MAKE) binaries-linux_$(ARCH) + docker build -t otelcol-fpm internal/buildscripts/packaging/fpm + docker run --rm -v $(CURDIR):/repo -e PACKAGE=$* -e VERSION=$(VERSION) -e ARCH=$(ARCH) otelcol-fpm + +.PHONY: genmdata +genmdata: + $(MAKE) for-all CMD="go generate ./..." + +# Definitions for ProtoBuf generation. + +# The source directory for OTLP ProtoBufs. +OPENTELEMETRY_PROTO_SRC_DIR=internal/data/opentelemetry-proto + +# Find all .proto files. +OPENTELEMETRY_PROTO_FILES := $(subst $(OPENTELEMETRY_PROTO_SRC_DIR)/,,$(wildcard $(OPENTELEMETRY_PROTO_SRC_DIR)/opentelemetry/proto/*/v1/*.proto $(OPENTELEMETRY_PROTO_SRC_DIR)/opentelemetry/proto/collector/*/v1/*.proto)) + +# Target directory to write generated files to. +PROTO_TARGET_GEN_DIR=internal/data/opentelemetry-proto-gen + +# Go package name to use for generated files. +PROTO_PACKAGE=go.opentelemetry.io/collector/$(PROTO_TARGET_GEN_DIR) + +# Intermediate directory used during generation. +PROTO_INTERMEDIATE_DIR=internal/data/.patched-otlp-proto + +DOCKER_PROTOBUF ?= otel/build-protobuf:0.1.0 +PROTOC := docker run --rm -u ${shell id -u} -v${PWD}:${PWD} -w${PWD}/$(PROTO_INTERMEDIATE_DIR) ${DOCKER_PROTOBUF} --proto_path=${PWD} +PROTO_INCLUDES := -I/usr/include/github.com/gogo/protobuf -I./ + +# Generate OTLP Protobuf Go files. This will place generated files in PROTO_TARGET_GEN_DIR. +genproto: + git submodule update --init + # Call a sub-make to ensure OPENTELEMETRY_PROTO_FILES is populated after the submodule + # files are present. + $(MAKE) genproto_sub + $(MAKE) fmt + +genproto_sub: + @echo Generating code for the following files: + @$(foreach file,$(OPENTELEMETRY_PROTO_FILES),$(call exec-command,echo $(file))) + + @echo Delete intermediate directory. + @rm -rf $(PROTO_INTERMEDIATE_DIR) + + @echo Copy .proto file to intermediate directory. + mkdir -p $(PROTO_INTERMEDIATE_DIR)/opentelemetry + cp -R $(OPENTELEMETRY_PROTO_SRC_DIR)/opentelemetry/* $(PROTO_INTERMEDIATE_DIR)/opentelemetry + + # Patch proto files. See proto_patch.sed for patching rules. + @echo Modify them in the intermediate directory. + $(foreach file,$(OPENTELEMETRY_PROTO_FILES),$(call exec-command,sed -f proto_patch.sed $(OPENTELEMETRY_PROTO_SRC_DIR)/$(file) > $(PROTO_INTERMEDIATE_DIR)/$(file))) + + @echo Generate Go code from .proto files in intermediate directory. + $(foreach file,$(OPENTELEMETRY_PROTO_FILES),$(call exec-command,$(PROTOC) $(PROTO_INCLUDES) --gogofaster_out=plugins=grpc:./ $(file))) + + @echo Generate gRPC gateway code. + $(PROTOC) $(PROTO_INCLUDES) --grpc-gateway_out=logtostderr=true,grpc_api_configuration=opentelemetry/proto/collector/trace/v1/trace_service_http.yaml:./ opentelemetry/proto/collector/trace/v1/trace_service.proto + $(PROTOC) $(PROTO_INCLUDES) --grpc-gateway_out=logtostderr=true,grpc_api_configuration=opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml:./ opentelemetry/proto/collector/metrics/v1/metrics_service.proto + $(PROTOC) $(PROTO_INCLUDES) --grpc-gateway_out=logtostderr=true,grpc_api_configuration=opentelemetry/proto/collector/logs/v1/logs_service_http.yaml:./ opentelemetry/proto/collector/logs/v1/logs_service.proto + + @echo Move generated code to target directory. + mkdir -p $(PROTO_TARGET_GEN_DIR) + cp -R $(PROTO_INTERMEDIATE_DIR)/$(PROTO_PACKAGE)/* $(PROTO_TARGET_GEN_DIR)/ + rm -rf $(PROTO_INTERMEDIATE_DIR)/go.opentelemetry.io + + @rm -rf $(OPENTELEMETRY_PROTO_SRC_DIR)/* + @rm -rf $(OPENTELEMETRY_PROTO_SRC_DIR)/.* > /dev/null 2>&1 || true + +# Generate structs, functions and tests for pdata package. Must be used after any changes +# to proto and after running `make genproto` +genpdata: + go run cmd/pdatagen/main.go + $(MAKE) fmt + +# Checks that the HEAD of the contrib repo checked out in CONTRIB_PATH compiles +# against the current version of this repo. +.PHONY: check-contrib +check-contrib: + @echo Setting contrib at $(CONTRIB_PATH) to use this core checkout + make -C $(CONTRIB_PATH) for-all CMD="go mod edit -replace go.opentelemetry.io/collector=$(CURDIR)" + make -C $(CONTRIB_PATH) test + @echo Restoring contrib to no longer use this core checkout + make -C $(CONTRIB_PATH) for-all CMD="go mod edit -dropreplace go.opentelemetry.io/collector" + +# List of directories where certificates are stored for unit tests. +CERT_DIRS := config/configgrpc/testdata \ + config/confighttp/testdata \ + receiver/jaegerreceiver/testdata \ + exporter/jaegerexporter/testdata + +# Generate certificates for unit tests relying on certificates. +.PHONY: certs +certs: + $(foreach dir, $(CERT_DIRS), $(call exec-command, @internal/buildscripts/gen-certs.sh -o $(dir))) + +# Generate certificates for unit tests relying on certificates without copying certs to specific test directories. +.PHONY: certs-dryrun +certs-dryrun: + @internal/buildscripts/gen-certs.sh -d diff --git a/internal/otel_collector/Makefile.Common b/internal/otel_collector/Makefile.Common new file mode 100644 index 00000000000..cfa9892c93b --- /dev/null +++ b/internal/otel_collector/Makefile.Common @@ -0,0 +1,94 @@ +# SRC_ROOT is the top of the source tree. +SRC_ROOT := $(realpath $(dir $(lastword $(MAKEFILE_LIST)))) + +ALL_SRC := $(shell find . -name '*.go' \ + -not -path '*/third_party/*' \ + -type f | sort) + +# ALL_PKGS is the list of all packages where ALL_SRC files reside. +ALL_PKGS := $(shell go list ./...) + +# All source code and documents. Used in spell check. +ALL_DOC := $(shell find . \( -name "*.md" -o -name "*.yaml" \) \ + -type f | sort) + +GOTEST_OPT?= -v -race -timeout 180s +GO_ACC=go-acc +GOTEST=go test +ADDLICENCESE=addlicense +MISSPELL=misspell -error +MISSPELL_CORRECTION=misspell -w +STATIC_CHECK=staticcheck +LINT=golangci-lint +IMPI=impi + +all-license-srcs: + @echo $(ALL_SRC) | tr ' ' '\n' | sort + +.PHONY: test +test: + @echo "running go unit test ./... in `pwd`" + @echo $(ALL_PKGS) | xargs -n 10 $(GOTEST) $(GOTEST_OPT) + +.PHONY: benchmark +benchmark: + $(GOTEST) -bench=. -run=notests ./... + +.PHONY: addlicense +addlicense: + @ADDLICENCESEOUT=`$(ADDLICENCESE) -y "" -c "The OpenTelemetry Authors" $(ALL_SRC) 2>&1`; \ + if [ "$$ADDLICENCESEOUT" ]; then \ + echo "$(ADDLICENCESE) FAILED => add License errors:\n"; \ + echo "$$ADDLICENCESEOUT\n"; \ + exit 1; \ + else \ + echo "Add License finished successfully"; \ + fi + +.PHONY: checklicense +checklicense: + @ADDLICENCESEOUT=`$(ADDLICENCESE) -check $(ALL_SRC) 2>&1`; \ + if [ "$$ADDLICENCESEOUT" ]; then \ + echo "$(ADDLICENCESE) FAILED => add License errors:\n"; \ + echo "$$ADDLICENCESEOUT\n"; \ + echo "Use 'make addlicense' to fix this."; \ + exit 1; \ + else \ + echo "Check License finished successfully"; \ + fi + +.PHONY: fmt +fmt: + gofmt -w -s ./ + goimports -w -local go.opentelemetry.io/collector ./ + +.PHONY: lint-static-check +lint-static-check: + @STATIC_CHECK_OUT=`$(STATIC_CHECK) $(ALL_PKGS) 2>&1`; \ + if [ "$$STATIC_CHECK_OUT" ]; then \ + echo "$(STATIC_CHECK) FAILED => static check errors:\n"; \ + echo "$$STATIC_CHECK_OUT\n"; \ + exit 1; \ + else \ + echo "Static check finished successfully"; \ + fi + +.PHONY: lint +lint: lint-static-check + $(LINT) run --allow-parallel-runners + +.PHONY: misspell +misspell: +ifdef ALL_DOC + $(MISSPELL) $(ALL_DOC) +endif + +.PHONY: misspell-correction +misspell-correction: +ifdef ALL_DOC + $(MISSPELL_CORRECTION) $(ALL_DOC) +endif + +.PHONY: impi +impi: + @$(IMPI) --local go.opentelemetry.io/collector --scheme stdThirdPartyLocal ./... diff --git a/internal/otel_collector/README.md b/internal/otel_collector/README.md new file mode 100644 index 00000000000..e8340aef62c --- /dev/null +++ b/internal/otel_collector/README.md @@ -0,0 +1,90 @@ +--- + +

+ + Getting Started +   •   + Getting Involved +   •   + Getting In Touch + +

+ +

+ + Go Report Card + + + Build Status + + + Codecov Status + + + GitHub release (latest by date including pre-releases) + + Beta +

+ +

+ + Contributing +   •   + Vision +   •   + Design +   •   + Monitoring +   •   + Performance +   •   + Roadmap + +

+ +--- + +# OpenTelemetry Icon OpenTelemetry Collector + +The OpenTelemetry Collector offers a vendor-agnostic implementation on how to +receive, process and export telemetry data. In addition, it removes the need +to run, operate and maintain multiple agents/collectors in order to support +open-source telemetry data formats (e.g. Jaeger, Prometheus, etc.) sending to +multiple open-source or commercial back-ends. + +Objectives: + +- Usable: Reasonable default configuration, supports popular protocols, runs and collects out of the box. +- Performant: Highly stable and performant under varying loads and configurations. +- Observable: An exemplar of an observable service. +- Extensible: Customizable without touching the core code. +- Unified: Single codebase, deployable as an agent or collector with support for traces, metrics and logs. + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md). + +Triagers ([@open-telemetry/collector-triagers](https://github.com/orgs/open-telemetry/teams/collector-triager)): +- [Andrew Hsu](https://github.com/andrewhsu), Lightstep +- [Steve Flanders](https://github.com/flands), Splunk + +Approvers ([@open-telemetry/collector-approvers](https://github.com/orgs/open-telemetry/teams/collector-approvers)): + +- [Dmitrii Anoshin](https://github.com/dmitryax), Splunk +- [James Bebbington](https://github.com/james-bebbington), Google +- [Jay Camp](https://github.com/jrcamp), Splunk +- [Nail Islamov](https://github.com/nilebox), Google +- [Owais Lone](https://github.com/owais), Splunk + +Maintainers ([@open-telemetry/collector-maintainers](https://github.com/orgs/open-telemetry/teams/collector-maintainers)): + +- [Bogdan Drutu](https://github.com/BogdanDrutu), Splunk +- [Tigran Najaryan](https://github.com/tigrannajaryan), Splunk + +Learn more about roles in the [community repository](https://github.com/open-telemetry/community/blob/master/community-membership.md). + +Thanks to all the people who already contributed! + + + + diff --git a/internal/otel_collector/client/client.go b/internal/otel_collector/client/client.go new file mode 100644 index 00000000000..6b134a30abf --- /dev/null +++ b/internal/otel_collector/client/client.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client contains generic representations of clients connecting to different receivers +package client + +import ( + "context" + "net" + "net/http" + + "google.golang.org/grpc/peer" +) + +type ctxKey struct{} + +// Client represents a generic client that sends data to any receiver supported by the OT receiver +type Client struct { + IP string +} + +// NewContext takes an existing context and derives a new context with the client value stored on it +func NewContext(ctx context.Context, c *Client) context.Context { + return context.WithValue(ctx, ctxKey{}, c) +} + +// FromContext takes a context and returns a Client value from it, if present. +func FromContext(ctx context.Context) (*Client, bool) { + c, ok := ctx.Value(ctxKey{}).(*Client) + return c, ok +} + +// FromGRPC takes a GRPC context and tries to extract client information from it +func FromGRPC(ctx context.Context) (*Client, bool) { + if p, ok := peer.FromContext(ctx); ok { + ip := parseIP(p.Addr.String()) + if ip != "" { + return &Client{ip}, true + } + } + return nil, false +} + +// FromHTTP takes a net/http Request object and tries to extract client information from it +func FromHTTP(r *http.Request) (*Client, bool) { + ip := parseIP(r.RemoteAddr) + if ip == "" { + return nil, false + } + return &Client{ip}, true +} + +func parseIP(source string) string { + ipstr, _, err := net.SplitHostPort(source) + if err == nil { + return ipstr + } + ip := net.ParseIP(source) + if ip != nil { + return ip.String() + } + return "" +} diff --git a/internal/otel_collector/client/client_test.go b/internal/otel_collector/client/client_test.go new file mode 100644 index 00000000000..2aaa13f4544 --- /dev/null +++ b/internal/otel_collector/client/client_test.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client contains generic representations of clients connecting to different receivers +package client + +import ( + "context" + "net" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/peer" +) + +func TestClientContext(t *testing.T) { + ips := []string{ + "1.1.1.1", "127.0.0.1", "1111", "ip", + } + for _, ip := range ips { + ctx := NewContext(context.Background(), &Client{ip}) + c, ok := FromContext(ctx) + assert.True(t, ok) + assert.NotNil(t, c) + assert.Equal(t, c.IP, ip) + } +} + +func TestParsingGRPC(t *testing.T) { + grpcCtx := peer.NewContext(context.Background(), &peer.Peer{ + Addr: &net.TCPAddr{ + IP: net.ParseIP("192.168.1.1"), + Port: 80, + }, + }) + + client, ok := FromGRPC(grpcCtx) + assert.True(t, ok) + assert.NotNil(t, client) + assert.Equal(t, client.IP, "192.168.1.1") +} + +func TestParsingHTTP(t *testing.T) { + client, ok := FromHTTP(&http.Request{RemoteAddr: "192.168.1.2"}) + assert.True(t, ok) + assert.NotNil(t, client) + assert.Equal(t, client.IP, "192.168.1.2") +} diff --git a/internal/otel_collector/cmd/otelcol/Dockerfile b/internal/otel_collector/cmd/otelcol/Dockerfile new file mode 100644 index 00000000000..4e99c435a63 --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/Dockerfile @@ -0,0 +1,16 @@ +FROM alpine:3.12 as certs +RUN apk --update add ca-certificates + +FROM alpine:3.12 AS otelcol +COPY otelcol / +# Note that this shouldn't be necessary, but in some cases the file seems to be +# copied with the execute bit lost (see #1317) +RUN chmod 755 /otelcol + +FROM scratch +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=otelcol /otelcol / +COPY config.yaml /etc/otel/config.yaml +ENTRYPOINT ["/otelcol"] +CMD ["--config", "/etc/otel/config.yaml"] +EXPOSE 55678 55679 diff --git a/internal/otel_collector/cmd/otelcol/config.yaml b/internal/otel_collector/cmd/otelcol/config.yaml new file mode 100644 index 00000000000..ce9ba1a847a --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/config.yaml @@ -0,0 +1,55 @@ +extensions: + health_check: + pprof: + endpoint: 0.0.0.0:1777 + zpages: + endpoint: 0.0.0.0:55679 + +receivers: + otlp: + protocols: + grpc: + http: + + opencensus: + + # Collect own metrics + prometheus: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + + jaeger: + protocols: + grpc: + thrift_binary: + thrift_compact: + thrift_http: + + zipkin: + +processors: + batch: + +exporters: + logging: + logLevel: debug + +service: + + pipelines: + + traces: + receivers: [otlp, opencensus, jaeger, zipkin] + processors: [batch] + exporters: [logging] + + metrics: + receivers: [otlp, opencensus, prometheus] + processors: [batch] + exporters: [logging] + + extensions: [health_check, pprof, zpages] diff --git a/internal/otel_collector/cmd/otelcol/main.go b/internal/otel_collector/cmd/otelcol/main.go new file mode 100644 index 00000000000..f0a00053790 --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/main.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Program otelcol is the OpenTelemetry Collector that collects stats +// and traces and exports to a configured backend. +package main + +import ( + "fmt" + "log" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/internal/version" + "go.opentelemetry.io/collector/service" + "go.opentelemetry.io/collector/service/defaultcomponents" +) + +func main() { + factories, err := defaultcomponents.Components() + if err != nil { + log.Fatalf("failed to build default components: %v", err) + } + + info := component.ApplicationStartInfo{ + ExeName: "otelcol", + LongName: "OpenTelemetry Collector", + Version: version.Version, + GitHash: version.GitHash, + } + + if err := run(service.Parameters{ApplicationStartInfo: info, Factories: factories}); err != nil { + log.Fatal(err) + } +} + +func runInteractive(params service.Parameters) error { + app, err := service.New(params) + if err != nil { + return fmt.Errorf("failed to construct the application: %w", err) + } + + err = app.Run() + if err != nil { + return fmt.Errorf("application run finished with error: %w", err) + } + + return nil +} diff --git a/internal/otel_collector/cmd/otelcol/main_others.go b/internal/otel_collector/cmd/otelcol/main_others.go new file mode 100644 index 00000000000..e1957c2fb71 --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/main_others.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package main + +import "go.opentelemetry.io/collector/service" + +func run(params service.Parameters) error { + return runInteractive(params) +} diff --git a/internal/otel_collector/cmd/otelcol/main_windows.go b/internal/otel_collector/cmd/otelcol/main_windows.go new file mode 100644 index 00000000000..e30e866e95a --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/main_windows.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package main + +import ( + "fmt" + + "golang.org/x/sys/windows/svc" + + "go.opentelemetry.io/collector/service" +) + +func run(params service.Parameters) error { + isInteractive, err := svc.IsAnInteractiveSession() + if err != nil { + return fmt.Errorf("failed to determine if we are running in an interactive session %w", err) + } + + if isInteractive { + return runInteractive(params) + } else { + return runService(params) + } +} + +func runService(params service.Parameters) error { + // do not need to supply service name when startup is invoked through Service Control Manager directly + if err := svc.Run("", service.NewWindowsService(params)); err != nil { + return fmt.Errorf("failed to start service %w", err) + } + + return nil +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/base_fields.go b/internal/otel_collector/cmd/pdatagen/internal/base_fields.go new file mode 100644 index 00000000000..32b9d05bd52 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/base_fields.go @@ -0,0 +1,343 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "os" + "strings" +) + +const accessorSliceTemplate = `// ${fieldName} returns the ${originFieldName} associated with this ${structName}. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ${structName}) ${fieldName}() ${returnType} { + return new${returnType}(&(*ms.orig).${originFieldName}) +}` + +const accessorsSliceTestTemplate = `func Test${structName}_${fieldName}(t *testing.T) { + ms := New${structName}() + assert.EqualValues(t, New${returnType}(), ms.${fieldName}()) + fillTest${returnType}(ms.${fieldName}()) + testVal${fieldName} := generateTest${returnType}() + assert.EqualValues(t, testVal${fieldName}, ms.${fieldName}()) +}` + +const accessorsMessageValueTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ${structName}) ${fieldName}() ${returnType} { + return new${returnType}(&(*ms.orig).${originFieldName}) +}` + +const accessorsMessageValueTestTemplate = `func Test${structName}_${fieldName}(t *testing.T) { + ms := New${structName}() + fillTest${returnType}(ms.${fieldName}()) + assert.EqualValues(t, generateTest${returnType}(), ms.${fieldName}()) +}` + +const accessorsPrimitiveTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ${structName}) ${fieldName}() ${returnType} { + return (*ms.orig).${originFieldName} +} + +// Set${fieldName} replaces the ${lowerFieldName} associated with this ${structName}. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ${structName}) Set${fieldName}(v ${returnType}) { + (*ms.orig).${originFieldName} = v +}` + +const accessorsPrimitiveTestTemplate = `func Test${structName}_${fieldName}(t *testing.T) { + ms := New${structName}() + assert.EqualValues(t, ${defaultVal}, ms.${fieldName}()) + testVal${fieldName} := ${testValue} + ms.Set${fieldName}(testVal${fieldName}) + assert.EqualValues(t, testVal${fieldName}, ms.${fieldName}()) +}` + +const accessorsPrimitiveTypedTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ${structName}) ${fieldName}() ${returnType} { + return ${returnType}((*ms.orig).${originFieldName}) +} + +// Set${fieldName} replaces the ${lowerFieldName} associated with this ${structName}. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ${structName}) Set${fieldName}(v ${returnType}) { + (*ms.orig).${originFieldName} = ${rawType}(v) +}` + +const accessorsPrimitiveWithoutSetterTypedTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ${structName}) ${fieldName}() ${returnType} { + return ${returnType}((*ms.orig).${originFieldName}) +}` + +type baseField interface { + generateAccessors(ms baseStruct, sb *strings.Builder) + + generateAccessorsTest(ms baseStruct, sb *strings.Builder) + + generateSetWithTestValue(sb *strings.Builder) + + generateCopyToValue(sb *strings.Builder) +} + +type sliceField struct { + fieldName string + originFieldName string + returnSlice baseSlice +} + +func (sf *sliceField) generateAccessors(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorSliceTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return sf.fieldName + case "returnType": + return sf.returnSlice.getName() + case "originFieldName": + return sf.originFieldName + default: + panic(name) + } + })) +} + +func (sf *sliceField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsSliceTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return sf.fieldName + case "returnType": + return sf.returnSlice.getName() + default: + panic(name) + } + })) +} + +func (sf *sliceField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\tfillTest" + sf.returnSlice.getName() + "(tv." + sf.fieldName + "())") +} + +func (sf *sliceField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tms." + sf.fieldName + "().CopyTo(dest." + sf.fieldName + "())") +} + +var _ baseField = (*sliceField)(nil) + +type messageValueField struct { + fieldName string + originFieldName string + returnMessage *messageValueStruct +} + +func (mf *messageValueField) generateAccessors(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsMessageValueTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return mf.fieldName + case "lowerFieldName": + return strings.ToLower(mf.fieldName) + case "returnType": + return mf.returnMessage.structName + case "structOriginFullName": + return mf.returnMessage.originFullName + case "originFieldName": + return mf.originFieldName + default: + panic(name) + } + })) +} + +func (mf *messageValueField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsMessageValueTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return mf.fieldName + case "returnType": + return mf.returnMessage.structName + default: + panic(name) + } + })) +} + +func (mf *messageValueField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\tfillTest" + mf.returnMessage.structName + "(tv." + mf.fieldName + "())") +} + +func (mf *messageValueField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tms." + mf.fieldName + "().CopyTo(dest." + mf.fieldName + "())") +} + +var _ baseField = (*messageValueField)(nil) + +type primitiveField struct { + fieldName string + originFieldName string + returnType string + defaultVal string + testVal string +} + +func (pf *primitiveField) generateAccessors(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return pf.fieldName + case "lowerFieldName": + return strings.ToLower(pf.fieldName) + case "returnType": + return pf.returnType + case "originFieldName": + return pf.originFieldName + default: + panic(name) + } + })) +} + +func (pf *primitiveField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "defaultVal": + return pf.defaultVal + case "fieldName": + return pf.fieldName + case "testValue": + return pf.testVal + default: + panic(name) + } + })) +} + +func (pf *primitiveField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\ttv.Set" + pf.fieldName + "(" + pf.testVal + ")") +} + +func (pf *primitiveField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tdest.Set" + pf.fieldName + "(ms." + pf.fieldName + "())") +} + +var _ baseField = (*primitiveField)(nil) + +// Types that has defined a custom type (e.g. "type TimestampUnixNano uint64") +type primitiveTypedField struct { + fieldName string + originFieldName string + returnType string + defaultVal string + testVal string + rawType string + manualSetter bool +} + +func (ptf *primitiveTypedField) generateAccessors(ms baseStruct, sb *strings.Builder) { + template := accessorsPrimitiveTypedTemplate + if ptf.manualSetter { + // Generate code without setter. Setter will be manually coded. + template = accessorsPrimitiveWithoutSetterTypedTemplate + } + + sb.WriteString(os.Expand(template, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return ptf.fieldName + case "lowerFieldName": + return strings.ToLower(ptf.fieldName) + case "returnType": + return ptf.returnType + case "rawType": + return ptf.rawType + case "originFieldName": + return ptf.originFieldName + default: + panic(name) + } + })) +} + +func (ptf *primitiveTypedField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "defaultVal": + return ptf.defaultVal + case "fieldName": + return ptf.fieldName + case "testValue": + return ptf.testVal + default: + panic(name) + } + })) +} + +func (ptf *primitiveTypedField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\ttv.Set" + ptf.fieldName + "(" + ptf.testVal + ")") +} + +func (ptf *primitiveTypedField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tdest.Set" + ptf.fieldName + "(ms." + ptf.fieldName + "())") +} + +var _ baseField = (*primitiveTypedField)(nil) + +// oneofField is used in case where the proto defines an "oneof". +type oneofField struct { + copyFuncName string + originFieldName string + testVal string + fillTestName string +} + +func (one oneofField) generateAccessors(baseStruct, *strings.Builder) {} + +func (one oneofField) generateAccessorsTest(baseStruct, *strings.Builder) {} + +func (one oneofField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\t(*tv.orig)." + one.originFieldName + " = " + one.testVal + "\n") + sb.WriteString("\tfillTest" + one.fillTestName + "(tv." + one.fillTestName + "())") +} + +func (one oneofField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\t" + one.copyFuncName + "(ms.orig, dest.orig)") +} + +var _ baseField = (*oneofField)(nil) diff --git a/internal/otel_collector/cmd/pdatagen/internal/base_slices.go b/internal/otel_collector/cmd/pdatagen/internal/base_slices.go new file mode 100644 index 00000000000..5a0e8bdf144 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/base_slices.go @@ -0,0 +1,609 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "os" + "strings" +) + +const slicePtrTemplate = `// ${structName} logically represents a slice of ${elementName}. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use New${structName} function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ${structName} struct { + // orig points to the slice ${originName} field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*${originName} +} + +func new${structName}(orig *[]*${originName}) ${structName} { + return ${structName}{orig} +} + +// New${structName} creates a ${structName} with 0 elements. +// Can use "Resize" to initialize with a given length. +func New${structName}() ${structName} { + orig := []*${originName}(nil) + return ${structName}{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "New${structName}()". +func (es ${structName}) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ${structName}) At(ix int) ${elementName} { + return new${elementName}((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ${structName}) MoveAndAppendTo(dest ${structName}) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es ${structName}) CopyTo(dest ${structName}) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + new${elementName}((*es.orig)[i]).CopyTo(new${elementName}((*dest.orig)[i])) + } + return + } + origs := make([]${originName}, srcLen) + wrappers := make([]*${originName}, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + new${elementName}((*es.orig)[i]).CopyTo(new${elementName}(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new ${structName} can be initialized: +// es := New${structName}() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es ${structName}) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*${originName}, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]${originName}, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the ${structName} by one and set the +// given ${elementName} at that new position. The original ${elementName} +// could still be referenced so do not reuse it after passing it to this +// method. +func (es ${structName}) Append(e ${elementName}) { + *es.orig = append(*es.orig, e.orig) +}` + +const slicePtrTestTemplate = `func Test${structName}(t *testing.T) { + es := New${structName}() + assert.EqualValues(t, 0, es.Len()) + es = new${structName}(&[]*${originName}{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := New${elementName}() + testVal := generateTest${elementName}() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTest${elementName}(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func Test${structName}_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTest${structName}() + dest := New${structName}() + src := generateTest${structName}() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTest${structName}().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func Test${structName}_CopyTo(t *testing.T) { + dest := New${structName}() + // Test CopyTo to empty + New${structName}().CopyTo(dest) + assert.EqualValues(t, New${structName}(), dest) + + // Test CopyTo larger slice + generateTest${structName}().CopyTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + + // Test CopyTo same size slice + generateTest${structName}().CopyTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) +} + +func Test${structName}_Resize(t *testing.T) { + es := generateTest${structName}() + emptyVal := New${elementName}() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*${originName}]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*${originName}]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*${originName}]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*${originName}]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func Test${structName}_Append(t *testing.T) { + es := generateTest${structName}() + + emptyVal := New${elementName}() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := New${elementName}() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +}` + +const slicePtrGenerateTest = `func generateTest${structName}() ${structName} { + tv := New${structName}() + fillTest${structName}(tv) + return tv +} + +func fillTest${structName}(tv ${structName}) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTest${elementName}(tv.At(i)) + } +}` + +const sliceValueTemplate = `// ${structName} logically represents a slice of ${elementName}. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use New${structName} function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ${structName} struct { + // orig points to the slice ${originName} field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]${originName} +} + +func new${structName}(orig *[]${originName}) ${structName} { + return ${structName}{orig} +} + +// New${structName} creates a ${structName} with 0 elements. +// Can use "Resize" to initialize with a given length. +func New${structName}() ${structName} { + orig := []${originName}(nil) + return ${structName}{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "New${structName}()". +func (es ${structName}) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ${structName}) At(ix int) ${elementName} { + return new${elementName}(&(*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ${structName}) MoveAndAppendTo(dest ${structName}) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es ${structName}) CopyTo(dest ${structName}) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + } else { + (*dest.orig) = make([]${originName}, srcLen) + } + + for i := range *es.orig { + new${elementName}(&(*es.orig)[i]).CopyTo(new${elementName}(&(*dest.orig)[i])) + } +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new ${structName} can be initialized: +// es := New${structName}() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es ${structName}) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]${originName}, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + empty := otlpcommon.AnyValue{} + for i := oldLen; i < newLen; i++ { + *es.orig = append(*es.orig, empty) + } +} + +// Append will increase the length of the ${structName} by one and set the +// given ${elementName} at that new position. The original ${elementName} +// could still be referenced so do not reuse it after passing it to this +// method. +func (es ${structName}) Append(e ${elementName}) { + *es.orig = append(*es.orig, *e.orig) +}` + +const sliceValueTestTemplate = `func Test${structName}(t *testing.T) { + es := New${structName}() + assert.EqualValues(t, 0, es.Len()) + es = new${structName}(&[]${originName}{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := New${elementName}() + testVal := generateTest${elementName}() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTest${elementName}(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func Test${structName}_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTest${structName}() + dest := New${structName}() + src := generateTest${structName}() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTest${structName}().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func Test${structName}_CopyTo(t *testing.T) { + dest := New${structName}() + // Test CopyTo to empty + New${structName}().CopyTo(dest) + assert.EqualValues(t, New${structName}(), dest) + + // Test CopyTo larger slice + generateTest${structName}().CopyTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + + // Test CopyTo same size slice + generateTest${structName}().CopyTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) +} + +func Test${structName}_Resize(t *testing.T) { + es := generateTest${structName}() + emptyVal := New${elementName}() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*${originName}]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*${originName}]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*${originName}]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*${originName}]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func Test${structName}_Append(t *testing.T) { + es := generateTest${structName}() + + emptyVal := New${elementName}() + es.Append(emptyVal) + assert.EqualValues(t, *(es.At(7)).orig, *emptyVal.orig) + + emptyVal2 := New${elementName}() + es.Append(emptyVal2) + assert.EqualValues(t, *(es.At(8)).orig, *emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +}` + +const sliceValueGenerateTest = `func generateTest${structName}() ${structName} { + tv := New${structName}() + fillTest${structName}(tv) + return tv +} + +func fillTest${structName}(tv ${structName}) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTest${elementName}(tv.At(i)) + } +}` + +type baseSlice interface { + getName() string +} + +// Will generate code only for a slice of pointer fields. +type sliceOfPtrs struct { + structName string + element *messageValueStruct +} + +func (ss *sliceOfPtrs) getName() string { + return ss.structName +} + +func (ss *sliceOfPtrs) generateStruct(sb *strings.Builder) { + sb.WriteString(os.Expand(slicePtrTemplate, func(name string) string { + switch name { + case "structName": + return ss.structName + case "elementName": + return ss.element.structName + case "originName": + return ss.element.originFullName + default: + panic(name) + } + })) +} + +func (ss *sliceOfPtrs) generateTests(sb *strings.Builder) { + sb.WriteString(os.Expand(slicePtrTestTemplate, func(name string) string { + switch name { + case "structName": + return ss.structName + case "elementName": + return ss.element.structName + case "originName": + return ss.element.originFullName + default: + panic(name) + } + })) +} + +func (ss *sliceOfPtrs) generateTestValueHelpers(sb *strings.Builder) { + sb.WriteString(os.Expand(slicePtrGenerateTest, func(name string) string { + switch name { + case "structName": + return ss.structName + case "elementName": + return ss.element.structName + default: + panic(name) + } + })) +} + +var _ baseStruct = (*sliceOfPtrs)(nil) + +// Will generate code only for a slice of value fields. +type sliceOfValues struct { + structName string + element *messageValueStruct +} + +func (ss *sliceOfValues) getName() string { + return ss.structName +} + +func (ss *sliceOfValues) generateStruct(sb *strings.Builder) { + sb.WriteString(os.Expand(sliceValueTemplate, func(name string) string { + switch name { + case "structName": + return ss.structName + case "elementName": + return ss.element.structName + case "originName": + return ss.element.originFullName + default: + panic(name) + } + })) +} + +func (ss *sliceOfValues) generateTests(sb *strings.Builder) { + sb.WriteString(os.Expand(sliceValueTestTemplate, func(name string) string { + switch name { + case "structName": + return ss.structName + case "elementName": + return ss.element.structName + case "originName": + return ss.element.originFullName + default: + panic(name) + } + })) +} + +func (ss *sliceOfValues) generateTestValueHelpers(sb *strings.Builder) { + sb.WriteString(os.Expand(sliceValueGenerateTest, func(name string) string { + switch name { + case "structName": + return ss.structName + case "elementName": + return ss.element.structName + default: + panic(name) + } + })) +} + +var _ baseStruct = (*sliceOfValues)(nil) diff --git a/internal/otel_collector/cmd/pdatagen/internal/base_structs.go b/internal/otel_collector/cmd/pdatagen/internal/base_structs.go new file mode 100644 index 00000000000..c2686679577 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/base_structs.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "os" + "strings" +) + +const messageValueTemplate = `${description} +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use New${structName} function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ${structName} struct { + orig *${originName} +} + +func new${structName}(orig *${originName}) ${structName} { + return ${structName}{orig: orig} +} + +// New${structName} creates a new empty ${structName}. +// +// This must be used only in testing code since no "Set" method available. +func New${structName}() ${structName} { + return new${structName}(&${originName}{}) +}` + +const messageValueCopyToHeaderTemplate = `// CopyTo copies all properties from the current struct to the dest. +func (ms ${structName}) CopyTo(dest ${structName}) {` + +const messageValueCopyToFooterTemplate = `}` + +const messageValueTestTemplate = ` +func Test${structName}_CopyTo(t *testing.T) { + ms := New${structName}() + generateTest${structName}().CopyTo(ms) + assert.EqualValues(t, generateTest${structName}(), ms) +}` + +const messageValueGenerateTestTemplate = `func generateTest${structName}() ${structName} { + tv := New${structName}() + fillTest${structName}(tv) + return tv +}` + +const messageValueFillTestHeaderTemplate = `func fillTest${structName}(tv ${structName}) {` +const messageValueFillTestFooterTemplate = `}` + +const newLine = "\n" + +type baseStruct interface { + getName() string + + generateStruct(sb *strings.Builder) + + generateTests(sb *strings.Builder) + + generateTestValueHelpers(sb *strings.Builder) +} + +type messageValueStruct struct { + structName string + description string + originFullName string + fields []baseField +} + +func (ms *messageValueStruct) getName() string { + return ms.structName +} + +func (ms *messageValueStruct) generateStruct(sb *strings.Builder) { + sb.WriteString(os.Expand(messageValueTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + case "originName": + return ms.originFullName + case "description": + return ms.description + default: + panic(name) + } + })) + // Write accessors for the struct + for _, f := range ms.fields { + sb.WriteString(newLine + newLine) + f.generateAccessors(ms, sb) + } + sb.WriteString(newLine + newLine) + sb.WriteString(os.Expand(messageValueCopyToHeaderTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + default: + panic(name) + } + })) + // Write accessors CopyTo for the struct + for _, f := range ms.fields { + sb.WriteString(newLine) + f.generateCopyToValue(sb) + } + sb.WriteString(newLine) + sb.WriteString(os.Expand(messageValueCopyToFooterTemplate, func(name string) string { + panic(name) + })) +} + +func (ms *messageValueStruct) generateTests(sb *strings.Builder) { + sb.WriteString(os.Expand(messageValueTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + default: + panic(name) + } + })) + // Write accessors tests for the struct + for _, f := range ms.fields { + sb.WriteString(newLine + newLine) + f.generateAccessorsTest(ms, sb) + } +} + +func (ms *messageValueStruct) generateTestValueHelpers(sb *strings.Builder) { + sb.WriteString(os.Expand(messageValueGenerateTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + case "originName": + return ms.originFullName + default: + panic(name) + } + })) + sb.WriteString(newLine + newLine) + sb.WriteString(os.Expand(messageValueFillTestHeaderTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + default: + panic(name) + } + })) + // Write accessors test value for the struct + for _, f := range ms.fields { + sb.WriteString(newLine) + f.generateSetWithTestValue(sb) + } + sb.WriteString(newLine) + sb.WriteString(os.Expand(messageValueFillTestFooterTemplate, func(name string) string { + panic(name) + })) +} + +var _ baseStruct = (*messageValueStruct)(nil) diff --git a/internal/otel_collector/cmd/pdatagen/internal/common_structs.go b/internal/otel_collector/cmd/pdatagen/internal/common_structs.go new file mode 100644 index 00000000000..1c3868acd35 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/common_structs.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var commonFile = &File{ + Name: "common", + imports: []string{ + `otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1"`, + }, + structs: []baseStruct{ + instrumentationLibrary, + anyValueArray, + }, +} + +var instrumentationLibrary = &messageValueStruct{ + structName: "InstrumentationLibrary", + description: "// InstrumentationLibrary is a message representing the instrumentation library information.", + originFullName: "otlpcommon.InstrumentationLibrary", + fields: []baseField{ + nameField, + &primitiveField{ + fieldName: "Version", + originFieldName: "Version", + returnType: "string", + defaultVal: `""`, + testVal: `"test_version"`, + }, + }, +} + +// This will not be generated by this class. +// Defined here just to be available as returned message for the fields. +var stringMap = &sliceOfPtrs{ + structName: "StringMap", + element: stringKeyValue, +} + +var stringKeyValue = &messageValueStruct{} + +// This will not be generated by this class. +// Defined here just to be available as returned message for the fields. +var attributeMap = &sliceOfPtrs{ + structName: "AttributeMap", + element: attributeKeyValue, +} + +var attributeKeyValue = &messageValueStruct{} + +var instrumentationLibraryField = &messageValueField{ + fieldName: "InstrumentationLibrary", + originFieldName: "InstrumentationLibrary", + returnMessage: instrumentationLibrary, +} + +var startTimeField = &primitiveTypedField{ + fieldName: "StartTime", + originFieldName: "StartTimeUnixNano", + returnType: "TimestampUnixNano", + rawType: "uint64", + defaultVal: "TimestampUnixNano(0)", + testVal: "TimestampUnixNano(1234567890)", +} + +var timeField = &primitiveTypedField{ + fieldName: "Timestamp", + originFieldName: "TimeUnixNano", + returnType: "TimestampUnixNano", + rawType: "uint64", + defaultVal: "TimestampUnixNano(0)", + testVal: "TimestampUnixNano(1234567890)", +} + +var endTimeField = &primitiveTypedField{ + fieldName: "EndTime", + originFieldName: "EndTimeUnixNano", + returnType: "TimestampUnixNano", + rawType: "uint64", + defaultVal: "TimestampUnixNano(0)", + testVal: "TimestampUnixNano(1234567890)", +} + +var attributes = &sliceField{ + fieldName: "Attributes", + originFieldName: "Attributes", + returnSlice: attributeMap, +} + +var nameField = &primitiveField{ + fieldName: "Name", + originFieldName: "Name", + returnType: "string", + defaultVal: `""`, + testVal: `"test_name"`, +} + +var anyValue = &messageValueStruct{ + structName: "AttributeValue", + originFullName: "otlpcommon.AnyValue", +} + +var anyValueArray = &sliceOfValues{ + structName: "AnyValueArray", + element: anyValue, +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/files.go b/internal/otel_collector/cmd/pdatagen/internal/files.go new file mode 100644 index 00000000000..4971e1187ef --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/files.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "strings" + +const header = `// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata` + +// AllFiles is a list of all files that needs to be generated. +var AllFiles = []*File{ + commonFile, + metricsFile, + resourceFile, + traceFile, + logFile, +} + +// File represents the struct for one generated file. +type File struct { + Name string + imports []string + testImports []string + // Can be any of sliceOfPtrs, sliceOfValues, messageValueStruct, or messagePtrStruct + structs []baseStruct +} + +// GenerateFile generates the file string. +func (f *File) GenerateFile() string { + var sb strings.Builder + + // Write headers + sb.WriteString(header) + sb.WriteString(newLine + newLine) + // Add imports + sb.WriteString("import (" + newLine) + for _, i := range f.imports { + sb.WriteString("\t" + i + newLine) + } + sb.WriteString(")") + // Write all structs + for _, s := range f.structs { + sb.WriteString(newLine + newLine) + s.generateStruct(&sb) + } + sb.WriteString(newLine) + return sb.String() +} + +func (f *File) GenerateTestFile() string { + var sb strings.Builder + + // Write headers + sb.WriteString(header) + sb.WriteString(newLine + newLine) + // Add imports + sb.WriteString("import (" + newLine) + for _, imp := range f.testImports { + if imp != "" { + sb.WriteString("\t" + imp + newLine) + } else { + sb.WriteString(newLine) + } + } + sb.WriteString(")") + // Write all tests + for _, s := range f.structs { + sb.WriteString(newLine + newLine) + s.generateTests(&sb) + } + // Write all tests generate value + for _, s := range f.structs { + sb.WriteString(newLine + newLine) + s.generateTestValueHelpers(&sb) + } + sb.WriteString(newLine) + return sb.String() +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/log_structs.go b/internal/otel_collector/cmd/pdatagen/internal/log_structs.go new file mode 100644 index 00000000000..35b6a7522cb --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/log_structs.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var logFile = &File{ + Name: "log", + imports: []string{ + `otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1"`, + `otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1"`, + }, + structs: []baseStruct{ + resourceLogsSlice, + resourceLogs, + instrumentationLibraryLogsSlice, + instrumentationLibraryLogs, + logSlice, + logRecord, + }, +} + +var resourceLogsSlice = &sliceOfPtrs{ + structName: "ResourceLogsSlice", + element: resourceLogs, +} + +var resourceLogs = &messageValueStruct{ + structName: "ResourceLogs", + description: "// ResourceLogs is a collection of logs from a Resource.", + originFullName: "otlplogs.ResourceLogs", + fields: []baseField{ + resourceField, + &sliceField{ + fieldName: "InstrumentationLibraryLogs", + originFieldName: "InstrumentationLibraryLogs", + returnSlice: instrumentationLibraryLogsSlice, + }, + }, +} + +var instrumentationLibraryLogsSlice = &sliceOfPtrs{ + structName: "InstrumentationLibraryLogsSlice", + element: instrumentationLibraryLogs, +} + +var instrumentationLibraryLogs = &messageValueStruct{ + structName: "InstrumentationLibraryLogs", + description: "// InstrumentationLibraryLogs is a collection of logs from a LibraryInstrumentation.", + originFullName: "otlplogs.InstrumentationLibraryLogs", + fields: []baseField{ + instrumentationLibraryField, + &sliceField{ + fieldName: "Logs", + originFieldName: "Logs", + returnSlice: logSlice, + }, + }, +} + +var logSlice = &sliceOfPtrs{ + structName: "LogSlice", + element: logRecord, +} + +var logRecord = &messageValueStruct{ + structName: "LogRecord", + description: "// LogRecord are experimental implementation of OpenTelemetry Log Data Model.\n", + originFullName: "otlplogs.LogRecord", + fields: []baseField{ + &primitiveTypedField{ + fieldName: "Timestamp", + originFieldName: "TimeUnixNano", + returnType: "TimestampUnixNano", + rawType: "uint64", + defaultVal: "TimestampUnixNano(0)", + testVal: "TimestampUnixNano(1234567890)", + }, + traceIDField, + spanIDField, + &primitiveTypedField{ + fieldName: "Flags", + originFieldName: "Flags", + returnType: "uint32", + rawType: "uint32", + defaultVal: `uint32(0)`, + testVal: `uint32(0x01)`, + }, + &primitiveField{ + fieldName: "SeverityText", + originFieldName: "SeverityText", + returnType: "string", + defaultVal: `""`, + testVal: `"INFO"`, + }, + &primitiveTypedField{ + fieldName: "SeverityNumber", + originFieldName: "SeverityNumber", + returnType: "SeverityNumber", + rawType: "otlplogs.SeverityNumber", + defaultVal: `SeverityNumberUNDEFINED`, + testVal: `SeverityNumberINFO`, + }, + &primitiveField{ + fieldName: "Name", + originFieldName: "Name", + returnType: "string", + defaultVal: `""`, + testVal: `"test_name"`, + }, + bodyField, + attributes, + droppedAttributesCount, + }, +} + +var bodyField = &messageValueField{ + fieldName: "Body", + originFieldName: "Body", + returnMessage: anyValue, +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go b/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go new file mode 100644 index 00000000000..d38a3182add --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go @@ -0,0 +1,493 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var metricsFile = &File{ + Name: "metrics", + imports: []string{ + `otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1"`, + }, + structs: []baseStruct{ + resourceMetricsSlice, + resourceMetrics, + instrumentationLibraryMetricsSlice, + instrumentationLibraryMetrics, + metricSlice, + metric, + intGauge, + doubleGauge, + intSum, + doubleSum, + intHistogram, + doubleHistogram, + doubleSummary, + intDataPointSlice, + intDataPoint, + doubleDataPointSlice, + doubleDataPoint, + intHistogramDataPointSlice, + intHistogramDataPoint, + doubleHistogramDataPointSlice, + doubleHistogramDataPoint, + doubleSummaryDataPointSlice, + doubleSummaryDataPoint, + quantileValuesSlice, + quantileValues, + intExemplarSlice, + intExemplar, + doubleExemplarSlice, + doubleExemplar, + }, +} + +var resourceMetricsSlice = &sliceOfPtrs{ + structName: "ResourceMetricsSlice", + element: resourceMetrics, +} + +var resourceMetrics = &messageValueStruct{ + structName: "ResourceMetrics", + description: "// InstrumentationLibraryMetrics is a collection of metrics from a LibraryInstrumentation.", + originFullName: "otlpmetrics.ResourceMetrics", + fields: []baseField{ + resourceField, + &sliceField{ + fieldName: "InstrumentationLibraryMetrics", + originFieldName: "InstrumentationLibraryMetrics", + returnSlice: instrumentationLibraryMetricsSlice, + }, + }, +} + +var instrumentationLibraryMetricsSlice = &sliceOfPtrs{ + structName: "InstrumentationLibraryMetricsSlice", + element: instrumentationLibraryMetrics, +} + +var instrumentationLibraryMetrics = &messageValueStruct{ + structName: "InstrumentationLibraryMetrics", + description: "// InstrumentationLibraryMetrics is a collection of metrics from a LibraryInstrumentation.", + originFullName: "otlpmetrics.InstrumentationLibraryMetrics", + fields: []baseField{ + instrumentationLibraryField, + &sliceField{ + fieldName: "Metrics", + originFieldName: "Metrics", + returnSlice: metricSlice, + }, + }, +} + +var metricSlice = &sliceOfPtrs{ + structName: "MetricSlice", + element: metric, +} + +var metric = &messageValueStruct{ + structName: "Metric", + description: "// Metric represents one metric as a collection of datapoints.\n" + + "// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/master/opentelemetry/proto/metrics/v1/metrics.proto", + originFullName: "otlpmetrics.Metric", + fields: []baseField{ + nameField, + &primitiveField{ + fieldName: "Description", + originFieldName: "Description", + returnType: "string", + defaultVal: `""`, + testVal: `"test_description"`, + }, + &primitiveField{ + fieldName: "Unit", + originFieldName: "Unit", + returnType: "string", + defaultVal: `""`, + testVal: `"1"`, + }, + oneofDataField, + }, +} + +var intGauge = &messageValueStruct{ + structName: "IntGauge", + description: "// IntGauge represents the type of a int scalar metric that always exports the \"current value\" for every data point.", + originFullName: "otlpmetrics.IntGauge", + fields: []baseField{ + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: intDataPointSlice, + }, + }, +} + +var doubleGauge = &messageValueStruct{ + structName: "DoubleGauge", + description: "// DoubleGauge represents the type of a double scalar metric that always exports the \"current value\" for every data point.", + originFullName: "otlpmetrics.DoubleGauge", + fields: []baseField{ + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: doubleDataPointSlice, + }, + }, +} + +var intSum = &messageValueStruct{ + structName: "IntSum", + description: "// IntSum represents the type of a numeric int scalar metric that is calculated as a sum of all reported measurements over a time interval.", + originFullName: "otlpmetrics.IntSum", + fields: []baseField{ + aggregationTemporalityField, + isMonotonicField, + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: intDataPointSlice, + }, + }, +} + +var doubleSum = &messageValueStruct{ + structName: "DoubleSum", + description: "// DoubleSum represents the type of a numeric double scalar metric that is calculated as a sum of all reported measurements over a time interval.", + originFullName: "otlpmetrics.DoubleSum", + fields: []baseField{ + aggregationTemporalityField, + isMonotonicField, + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: doubleDataPointSlice, + }, + }, +} + +var intHistogram = &messageValueStruct{ + structName: "IntHistogram", + description: "// IntHistogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported double measurements over a time interval.", + originFullName: "otlpmetrics.IntHistogram", + fields: []baseField{ + aggregationTemporalityField, + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: intHistogramDataPointSlice, + }, + }, +} + +var doubleHistogram = &messageValueStruct{ + structName: "DoubleHistogram", + description: "// DoubleHistogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported double measurements over a time interval.", + originFullName: "otlpmetrics.DoubleHistogram", + fields: []baseField{ + aggregationTemporalityField, + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: doubleHistogramDataPointSlice, + }, + }, +} + +var doubleSummary = &messageValueStruct{ + structName: "DoubleSummary", + description: "// DoubleSummary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.", + originFullName: "otlpmetrics.DoubleSummary", + fields: []baseField{ + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: doubleSummaryDataPointSlice, + }, + }, +} + +var intDataPointSlice = &sliceOfPtrs{ + structName: "IntDataPointSlice", + element: intDataPoint, +} + +var intDataPoint = &messageValueStruct{ + structName: "IntDataPoint", + description: "// IntDataPoint is a single data point in a timeseries that describes the time-varying values of a scalar int metric.", + originFullName: "otlpmetrics.IntDataPoint", + fields: []baseField{ + labelsField, + startTimeField, + timeField, + valueInt64Field, + intExemplarsField, + }, +} + +var doubleDataPointSlice = &sliceOfPtrs{ + structName: "DoubleDataPointSlice", + element: doubleDataPoint, +} + +var doubleDataPoint = &messageValueStruct{ + structName: "DoubleDataPoint", + description: "// DoubleDataPoint is a single data point in a timeseries that describes the time-varying value of a double metric.", + originFullName: "otlpmetrics.DoubleDataPoint", + fields: []baseField{ + labelsField, + startTimeField, + timeField, + valueFloat64Field, + doubleExemplarsField, + }, +} + +var intHistogramDataPointSlice = &sliceOfPtrs{ + structName: "IntHistogramDataPointSlice", + element: intHistogramDataPoint, +} + +var intHistogramDataPoint = &messageValueStruct{ + structName: "IntHistogramDataPoint", + description: "// IntHistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of int values.", + originFullName: "otlpmetrics.IntHistogramDataPoint", + fields: []baseField{ + labelsField, + startTimeField, + timeField, + countField, + intSumField, + bucketCountsField, + explicitBoundsField, + intExemplarsField, + }, +} + +var doubleHistogramDataPointSlice = &sliceOfPtrs{ + structName: "DoubleHistogramDataPointSlice", + element: doubleHistogramDataPoint, +} + +var doubleHistogramDataPoint = &messageValueStruct{ + structName: "DoubleHistogramDataPoint", + description: "// DoubleHistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of double values.", + originFullName: "otlpmetrics.DoubleHistogramDataPoint", + fields: []baseField{ + labelsField, + startTimeField, + timeField, + countField, + doubleSumField, + bucketCountsField, + explicitBoundsField, + doubleExemplarsField, + }, +} + +var doubleSummaryDataPointSlice = &sliceOfPtrs{ + structName: "DoubleSummaryDataPointSlice", + element: doubleSummaryDataPoint, +} + +var doubleSummaryDataPoint = &messageValueStruct{ + structName: "DoubleSummaryDataPoint", + description: "// DoubleSummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values.", + originFullName: "otlpmetrics.DoubleSummaryDataPoint", + fields: []baseField{ + labelsField, + startTimeField, + timeField, + countField, + doubleSumField, + &sliceField{ + fieldName: "QuantileValues", + originFieldName: "QuantileValues", + returnSlice: quantileValuesSlice, + }, + }, +} + +var quantileValuesSlice = &sliceOfPtrs{ + structName: "ValueAtQuantileSlice", + element: quantileValues, +} + +var quantileValues = &messageValueStruct{ + structName: "ValueAtQuantile", + description: "// ValueAtQuantile is a quantile value within a Summary data point", + originFullName: "otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile", + fields: []baseField{ + quantileField, + valueFloat64Field, + }, +} + +var intExemplarSlice = &sliceOfPtrs{ + structName: "IntExemplarSlice", + element: intExemplar, +} + +var intExemplar = &messageValueStruct{ + structName: "IntExemplar", + description: "// IntExemplar is a sample input int measurement.\n//\n" + + "// Exemplars also hold information about the environment when the measurement was recorded,\n" + + "// for example the span and trace ID of the active span when the exemplar was recorded.", + + originFullName: "otlpmetrics.IntExemplar", + fields: []baseField{ + timeField, + valueInt64Field, + &sliceField{ + fieldName: "FilteredLabels", + originFieldName: "FilteredLabels", + returnSlice: stringMap, + }, + }, +} + +var doubleExemplarSlice = &sliceOfPtrs{ + structName: "DoubleExemplarSlice", + element: doubleExemplar, +} + +var doubleExemplar = &messageValueStruct{ + structName: "DoubleExemplar", + description: "// DoubleExemplar is a sample input double measurement.\n//\n" + + "// Exemplars also hold information about the environment when the measurement was recorded,\n" + + "// for example the span and trace ID of the active span when the exemplar was recorded.", + + originFullName: "otlpmetrics.DoubleExemplar", + fields: []baseField{ + timeField, + valueFloat64Field, + &sliceField{ + fieldName: "FilteredLabels", + originFieldName: "FilteredLabels", + returnSlice: stringMap, + }, + }, +} + +var labelsField = &sliceField{ + fieldName: "LabelsMap", + originFieldName: "Labels", + returnSlice: stringMap, +} + +var intExemplarsField = &sliceField{ + fieldName: "Exemplars", + originFieldName: "Exemplars", + returnSlice: intExemplarSlice, +} + +var doubleExemplarsField = &sliceField{ + fieldName: "Exemplars", + originFieldName: "Exemplars", + returnSlice: doubleExemplarSlice, +} + +var countField = &primitiveField{ + fieldName: "Count", + originFieldName: "Count", + returnType: "uint64", + defaultVal: "uint64(0)", + testVal: "uint64(17)", +} + +var intSumField = &primitiveField{ + fieldName: "Sum", + originFieldName: "Sum", + returnType: "int64", + defaultVal: "int64(0.0)", + testVal: "int64(1713)", +} + +var doubleSumField = &primitiveField{ + fieldName: "Sum", + originFieldName: "Sum", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", +} + +var valueInt64Field = &primitiveField{ + fieldName: "Value", + originFieldName: "Value", + returnType: "int64", + defaultVal: "int64(0)", + testVal: "int64(-17)", +} + +var valueFloat64Field = &primitiveField{ + fieldName: "Value", + originFieldName: "Value", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", +} + +var bucketCountsField = &primitiveField{ + fieldName: "BucketCounts", + originFieldName: "BucketCounts", + returnType: "[]uint64", + defaultVal: "[]uint64(nil)", + testVal: "[]uint64{1, 2, 3}", +} + +var explicitBoundsField = &primitiveField{ + fieldName: "ExplicitBounds", + originFieldName: "ExplicitBounds", + returnType: "[]float64", + defaultVal: "[]float64(nil)", + testVal: "[]float64{1, 2, 3}", +} + +var quantileField = &primitiveField{ + fieldName: "Quantile", + originFieldName: "Quantile", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", +} + +var isMonotonicField = &primitiveField{ + fieldName: "IsMonotonic", + originFieldName: "IsMonotonic", + returnType: "bool", + defaultVal: "false", + testVal: "true", +} + +var aggregationTemporalityField = &primitiveTypedField{ + fieldName: "AggregationTemporality", + originFieldName: "AggregationTemporality", + returnType: "AggregationTemporality", + rawType: "otlpmetrics.AggregationTemporality", + defaultVal: "AggregationTemporalityUnspecified", + testVal: "AggregationTemporalityCumulative", +} + +var oneofDataField = &oneofField{ + copyFuncName: "copyData", + originFieldName: "Data", + testVal: "&otlpmetrics.Metric_IntGauge{IntGauge: &otlpmetrics.IntGauge{}}", + fillTestName: "IntGauge", +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go b/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go new file mode 100644 index 00000000000..9e48aa170db --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var resourceFile = &File{ + Name: "resource", + imports: []string{ + `otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + }, + structs: []baseStruct{ + resource, + }, +} + +var resource = &messageValueStruct{ + structName: "Resource", + description: "// Resource information.", + originFullName: "otlpresource.Resource", + fields: []baseField{ + attributes, + }, +} + +var resourceField = &messageValueField{ + fieldName: "Resource", + originFieldName: "Resource", + returnMessage: resource, +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go b/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go new file mode 100644 index 00000000000..f5c3b739784 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go @@ -0,0 +1,259 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var traceFile = &File{ + Name: "trace", + imports: []string{ + `"go.opentelemetry.io/collector/internal/data"`, + `otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1"`, + `otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1"`, + }, + structs: []baseStruct{ + resourceSpansSlice, + resourceSpans, + instrumentationLibrarySpansSlice, + instrumentationLibrarySpans, + spanSlice, + span, + spanEventSlice, + spanEvent, + spanLinkSlice, + spanLink, + spanStatus, + }, +} + +var resourceSpansSlice = &sliceOfPtrs{ + structName: "ResourceSpansSlice", + element: resourceSpans, +} + +var resourceSpans = &messageValueStruct{ + structName: "ResourceSpans", + description: "// InstrumentationLibrarySpans is a collection of spans from a LibraryInstrumentation.", + originFullName: "otlptrace.ResourceSpans", + fields: []baseField{ + resourceField, + &sliceField{ + fieldName: "InstrumentationLibrarySpans", + originFieldName: "InstrumentationLibrarySpans", + returnSlice: instrumentationLibrarySpansSlice, + }, + }, +} + +var instrumentationLibrarySpansSlice = &sliceOfPtrs{ + structName: "InstrumentationLibrarySpansSlice", + element: instrumentationLibrarySpans, +} + +var instrumentationLibrarySpans = &messageValueStruct{ + structName: "InstrumentationLibrarySpans", + description: "// InstrumentationLibrarySpans is a collection of spans from a LibraryInstrumentation.", + originFullName: "otlptrace.InstrumentationLibrarySpans", + fields: []baseField{ + instrumentationLibraryField, + &sliceField{ + fieldName: "Spans", + originFieldName: "Spans", + returnSlice: spanSlice, + }, + }, +} + +var spanSlice = &sliceOfPtrs{ + structName: "SpanSlice", + element: span, +} + +var span = &messageValueStruct{ + structName: "Span", + description: "// Span represents a single operation within a trace.\n" + + "// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/master/opentelemetry/proto/trace/v1/trace.proto#L37", + originFullName: "otlptrace.Span", + fields: []baseField{ + traceIDField, + spanIDField, + traceStateField, + parentSpanIDField, + nameField, + &primitiveTypedField{ + fieldName: "Kind", + originFieldName: "Kind", + returnType: "SpanKind", + rawType: "otlptrace.Span_SpanKind", + defaultVal: "SpanKindUNSPECIFIED", + testVal: "SpanKindSERVER", + }, + startTimeField, + endTimeField, + attributes, + droppedAttributesCount, + &sliceField{ + fieldName: "Events", + originFieldName: "Events", + returnSlice: spanEventSlice, + }, + &primitiveField{ + fieldName: "DroppedEventsCount", + originFieldName: "DroppedEventsCount", + returnType: "uint32", + defaultVal: "uint32(0)", + testVal: "uint32(17)", + }, + &sliceField{ + fieldName: "Links", + originFieldName: "Links", + returnSlice: spanLinkSlice, + }, + &primitiveField{ + fieldName: "DroppedLinksCount", + originFieldName: "DroppedLinksCount", + returnType: "uint32", + defaultVal: "uint32(0)", + testVal: "uint32(17)", + }, + &messageValueField{ + fieldName: "Status", + originFieldName: "Status", + returnMessage: spanStatus, + }, + }, +} + +var spanEventSlice = &sliceOfPtrs{ + structName: "SpanEventSlice", + element: spanEvent, +} + +var spanEvent = &messageValueStruct{ + structName: "SpanEvent", + description: "// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied\n" + + "// text description and key-value pairs. See OTLP for event definition.", + originFullName: "otlptrace.Span_Event", + fields: []baseField{ + timeField, + nameField, + attributes, + droppedAttributesCount, + }, +} + +var spanLinkSlice = &sliceOfPtrs{ + structName: "SpanLinkSlice", + element: spanLink, +} + +var spanLink = &messageValueStruct{ + structName: "SpanLink", + description: "// SpanLink is a pointer from the current span to another span in the same trace or in a\n" + + "// different trace. See OTLP for link definition.", + originFullName: "otlptrace.Span_Link", + fields: []baseField{ + traceIDField, + spanIDField, + traceStateField, + attributes, + droppedAttributesCount, + }, +} + +var spanStatus = &messageValueStruct{ + structName: "SpanStatus", + description: "// SpanStatus is an optional final status for this span. Semantically when Status wasn't set\n" + + "// it is means span ended without errors and assume Status.Ok (code = 0).", + originFullName: "otlptrace.Status", + fields: []baseField{ + &primitiveTypedField{ + fieldName: "Code", + originFieldName: "Code", + returnType: "StatusCode", + rawType: "otlptrace.Status_StatusCode", + defaultVal: "StatusCode(0)", + testVal: "StatusCode(1)", + // Generate code without setter. Setter will be manually coded since we + // need to also change DeprecatedCode when Code is changed according + // to OTLP spec https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L231 + manualSetter: true, + }, + &primitiveTypedField{ + fieldName: "DeprecatedCode", + originFieldName: "DeprecatedCode", + returnType: "DeprecatedStatusCode", + rawType: "otlptrace.Status_DeprecatedStatusCode", + defaultVal: "DeprecatedStatusCode(0)", + testVal: "DeprecatedStatusCode(1)", + }, + &primitiveField{ + fieldName: "Message", + originFieldName: "Message", + returnType: "string", + defaultVal: `""`, + testVal: `"cancelled"`, + }, + }, +} + +var traceIDField = &primitiveTypedField{ + fieldName: "TraceID", + originFieldName: "TraceId", + returnType: "TraceID", + rawType: "data.TraceID", + defaultVal: "NewTraceID([16]byte{})", + testVal: "NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})", +} + +var spanIDField = &primitiveTypedField{ + fieldName: "SpanID", + originFieldName: "SpanId", + returnType: "SpanID", + rawType: "data.SpanID", + defaultVal: "NewSpanID([8]byte{})", + testVal: "NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})", +} + +var parentSpanIDField = &primitiveTypedField{ + fieldName: "ParentSpanID", + originFieldName: "ParentSpanId", + returnType: "SpanID", + rawType: "data.SpanID", + defaultVal: "NewSpanID([8]byte{})", + testVal: "NewSpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})", +} + +var traceStateField = &primitiveTypedField{ + fieldName: "TraceState", + originFieldName: "TraceState", + returnType: "TraceState", + rawType: "string", + defaultVal: `TraceState("")`, + testVal: `TraceState("congo=congos")`, +} + +var droppedAttributesCount = &primitiveField{ + fieldName: "DroppedAttributesCount", + originFieldName: "DroppedAttributesCount", + returnType: "uint32", + defaultVal: "uint32(0)", + testVal: "uint32(17)", +} diff --git a/internal/otel_collector/cmd/pdatagen/main.go b/internal/otel_collector/cmd/pdatagen/main.go new file mode 100644 index 00000000000..02a4fcabad4 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/main.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "os" + + "go.opentelemetry.io/collector/cmd/pdatagen/internal" +) + +func check(e error) { + if e != nil { + panic(e) + } +} + +func main() { + for _, fp := range internal.AllFiles { + f, err := os.Create("./consumer/pdata/generated_" + fp.Name + ".go") + check(err) + _, err = f.WriteString(fp.GenerateFile()) + check(err) + check(f.Close()) + f, err = os.Create("./consumer/pdata/generated_" + fp.Name + "_test.go") + check(err) + _, err = f.WriteString(fp.GenerateTestFile()) + check(err) + check(f.Close()) + } +} diff --git a/internal/otel_collector/component/component.go b/internal/otel_collector/component/component.go new file mode 100644 index 00000000000..c591879b51f --- /dev/null +++ b/internal/otel_collector/component/component.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "github.com/spf13/viper" + + "go.opentelemetry.io/collector/config/configmodels" +) + +// Component is either a receiver, exporter, processor or extension. +type Component interface { + // Start tells the component to start. Host parameter can be used for communicating + // with the host after Start() has already returned. If error is returned by + // Start() then the collector startup will be aborted. + // If this is an exporter component it may prepare for exporting + // by connecting to the endpoint. + // + // If the component needs to perform a long-running starting operation then it is recommended + // that Start() returns quickly and the long-running operation is performed in background. + // In that case make sure that the long-running operation does not use the context passed + // to Start() function since that context will be cancelled soon and can abort the long-running operation. + // Create a new context from the context.Background() for long-running operations. + Start(ctx context.Context, host Host) error + + // Shutdown is invoked during service shutdown. + // + // If there are any background operations running by the component they must be aborted as soon as possible. + // Remember that if you started any long-running background operation from the Start() method that operation + // must be also cancelled. + Shutdown(ctx context.Context) error +} + +// Kind specified one of the 4 components kinds, see consts below. +type Kind int + +const ( + _ Kind = iota // skip 0, start types from 1. + KindReceiver + KindProcessor + KindExporter + KindExtension +) + +// Host represents the entity that is hosting a Component. It is used to allow communication +// between the Component and its host (normally the service.Application is the host). +type Host interface { + // ReportFatalError is used to report to the host that the extension + // encountered a fatal error (i.e.: an error that the instance can't recover + // from) after its start function had already returned. + ReportFatalError(err error) + + // GetFactory of the specified kind. Returns the factory for a component type. + // This allows components to create other components. For example: + // func (r MyReceiver) Start(host component.Host) error { + // apacheFactory := host.GetFactory(KindReceiver,"apache").(component.ReceiverFactory) + // receiver, err := apacheFactory.CreateMetricsReceiver(...) + // ... + // } + // GetFactory can be called by the component anytime after Start() begins and + // until Shutdown() is called. Note that the component is responsible for destroying + // other components that it creates. + GetFactory(kind Kind, componentType configmodels.Type) Factory + + // Return map of extensions. Only enabled and created extensions will be returned. + // Typically is used to find an extension by type or by full config name. Both cases + // can be done by iterating the returned map. There are typically very few extensions + // so there there is no performance implications due to iteration. + GetExtensions() map[configmodels.Extension]ServiceExtension + + // Return map of exporters. Only enabled and created exporters will be returned. + // Typically is used to find exporters by type or by full config name. Both cases + // can be done by iterating the returned map. There are typically very few exporters + // so there there is no performance implications due to iteration. + // This returns a map by DataType of maps by exporter configs to the exporter instance. + // Note that an exporter with the same name may be attached to multiple pipelines and + // thus we may have an instance of the exporter for multiple data types. + // This is an experimental function that may change or even be removed completely. + GetExporters() map[configmodels.DataType]map[configmodels.Exporter]Exporter +} + +// Factory interface must be implemented by all component factories. +type Factory interface { + // Type gets the type of the component created by this factory. + Type() configmodels.Type +} + +// ConfigUnmarshaler interface is an optional interface that if implemented by a Factory, +// the configuration loading system will use to unmarshal the config. +type ConfigUnmarshaler interface { + // Unmarshal is a function that un-marshals a viper data into a config struct in a custom way. + // componentViperSection *viper.Viper + // The config for this specific component. May be nil or empty if no config available. + // intoCfg interface{} + // An empty interface wrapping a pointer to the config struct to unmarshal into. + Unmarshal(componentViperSection *viper.Viper, intoCfg interface{}) error +} + +// CustomUnmarshaler is a function that un-marshals a viper data into a config struct +// in a custom way. +// componentViperSection *viper.Viper +// The config for this specific component. May be nil or empty if no config available. +// intoCfg interface{} +// An empty interface wrapping a pointer to the config struct to unmarshal into. +type CustomUnmarshaler func(componentViperSection *viper.Viper, intoCfg interface{}) error + +// ApplicationStartInfo is the information that is logged at the application start and +// passed into each component. This information can be overridden in custom builds. +type ApplicationStartInfo struct { + // Executable file name, e.g. "otelcol". + ExeName string + + // Long name, used e.g. in the logs. + LongName string + + // Version string. + Version string + + // Git hash of the source code. + GitHash string +} diff --git a/internal/otel_collector/component/component_test.go b/internal/otel_collector/component/component_test.go new file mode 100644 index 00000000000..3c3e46002d9 --- /dev/null +++ b/internal/otel_collector/component/component_test.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component diff --git a/internal/otel_collector/component/componenterror/errors.go b/internal/otel_collector/component/componenterror/errors.go new file mode 100644 index 00000000000..0a89c29b3f2 --- /dev/null +++ b/internal/otel_collector/component/componenterror/errors.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package oterr provides helper functions to create and process +// OpenTelemetry specific errors +package componenterror + +import ( + "errors" + "fmt" + "strings" + + "go.opentelemetry.io/collector/consumer/consumererror" +) + +var ( + // ErrAlreadyStarted indicates an error on starting an already-started component. + ErrAlreadyStarted = errors.New("already started") + + // ErrAlreadyStopped indicates an error on stoping an already-stopped component. + ErrAlreadyStopped = errors.New("already stopped") + + // ErrNilNextConsumer indicates an error on nil next consumer. + ErrNilNextConsumer = errors.New("nil nextConsumer") +) + +// CombineErrors converts a list of errors into one error. +func CombineErrors(errs []error) error { + numErrors := len(errs) + if numErrors == 0 { + // No errors + return nil + } + + if numErrors == 1 { + return errs[0] + } + + errMsgs := make([]string, 0, numErrors) + permanent := false + for _, err := range errs { + if !permanent && consumererror.IsPermanent(err) { + permanent = true + } + errMsgs = append(errMsgs, err.Error()) + } + err := fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) + if permanent { + err = consumererror.Permanent(err) + } + return err +} diff --git a/internal/otel_collector/component/componenterror/errors_test.go b/internal/otel_collector/component/componenterror/errors_test.go new file mode 100644 index 00000000000..c0c7b8e7bd8 --- /dev/null +++ b/internal/otel_collector/component/componenterror/errors_test.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenterror_test + +import ( + "fmt" + "testing" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +func TestCombineErrors(t *testing.T) { + testCases := []struct { + errors []error + expected string + expectNil bool + expectedPermanent bool + }{ + { + errors: []error{}, + expectNil: true, + }, + { + errors: []error{ + fmt.Errorf("foo"), + }, + expected: "foo", + }, + { + errors: []error{ + fmt.Errorf("foo"), + fmt.Errorf("bar"), + }, + expected: "[foo; bar]", + }, + { + errors: []error{ + fmt.Errorf("foo"), + fmt.Errorf("bar"), + consumererror.Permanent(fmt.Errorf("permanent"))}, + expected: "Permanent error: [foo; bar; Permanent error: permanent]", + }, + } + + for _, tc := range testCases { + got := componenterror.CombineErrors(tc.errors) + if (got == nil) != tc.expectNil { + t.Errorf("CombineErrors(%v) == nil? Got: %t. Want: %t", tc.errors, got == nil, tc.expectNil) + } + if got != nil && tc.expected != got.Error() { + t.Errorf("CombineErrors(%v) = %q. Want: %q", tc.errors, got, tc.expected) + } + if tc.expectedPermanent && !consumererror.IsPermanent(got) { + t.Errorf("CombineErrors(%v) = %q. Want: consumererror.permanent", tc.errors, got) + } + } +} diff --git a/internal/otel_collector/component/componenthelper/component.go b/internal/otel_collector/component/componenthelper/component.go new file mode 100644 index 00000000000..0ecc6e5a610 --- /dev/null +++ b/internal/otel_collector/component/componenthelper/component.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenthelper + +import ( + "context" + + "go.opentelemetry.io/collector/component" +) + +// Start specifies the function invoked when the exporter is being started. +type Start func(context.Context, component.Host) error + +// Shutdown specifies the function invoked when the exporter is being shutdown. +type Shutdown func(context.Context) error + +// ComponentSettings represents a settings struct to create components. +type ComponentSettings struct { + Start + Shutdown +} + +// DefaultComponentSettings returns the default settings for a component. The Start and Shutdown are no-op. +func DefaultComponentSettings() *ComponentSettings { + return &ComponentSettings{ + Start: func(ctx context.Context, host component.Host) error { return nil }, + Shutdown: func(ctx context.Context) error { return nil }, + } +} + +type baseComponent struct { + start Start + shutdown Shutdown +} + +// Start all senders and exporter and is invoked during service start. +func (be *baseComponent) Start(ctx context.Context, host component.Host) error { + return be.start(ctx, host) +} + +// Shutdown all senders and exporter and is invoked during service shutdown. +func (be *baseComponent) Shutdown(ctx context.Context) error { + return be.shutdown(ctx) +} + +// NewComponent returns a component.Component that calls the given Start and Shutdown. +func NewComponent(s *ComponentSettings) component.Component { + return &baseComponent{ + start: s.Start, + shutdown: s.Shutdown, + } +} diff --git a/internal/otel_collector/component/componenthelper/component_test.go b/internal/otel_collector/component/componenthelper/component_test.go new file mode 100644 index 00000000000..3d4ee6cfb49 --- /dev/null +++ b/internal/otel_collector/component/componenthelper/component_test.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenthelper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" +) + +func TestDefaultSettings(t *testing.T) { + st := DefaultComponentSettings() + require.NotNil(t, st) + cp := NewComponent(st) + require.NoError(t, cp.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, cp.Shutdown(context.Background())) +} + +func TestWithStart(t *testing.T) { + startCalled := false + st := DefaultComponentSettings() + st.Start = func(context.Context, component.Host) error { startCalled = true; return nil } + cp := NewComponent(st) + assert.NoError(t, cp.Start(context.Background(), componenttest.NewNopHost())) + assert.True(t, startCalled) +} + +func TestWithStart_ReturnError(t *testing.T) { + want := errors.New("my_error") + st := DefaultComponentSettings() + st.Start = func(context.Context, component.Host) error { return want } + cp := NewComponent(st) + assert.Equal(t, want, cp.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestWithShutdown(t *testing.T) { + shutdownCalled := false + st := DefaultComponentSettings() + st.Shutdown = func(context.Context) error { shutdownCalled = true; return nil } + cp := NewComponent(st) + assert.NoError(t, cp.Shutdown(context.Background())) + assert.True(t, shutdownCalled) +} + +func TestWithShutdown_ReturnError(t *testing.T) { + want := errors.New("my_error") + st := DefaultComponentSettings() + st.Shutdown = func(context.Context) error { return want } + cp := NewComponent(st) + assert.Equal(t, want, cp.Shutdown(context.Background())) +} diff --git a/internal/otel_collector/component/componenttest/application_start_info.go b/internal/otel_collector/component/componenttest/application_start_info.go new file mode 100644 index 00000000000..27f48f2d46c --- /dev/null +++ b/internal/otel_collector/component/componenttest/application_start_info.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/internal/version" +) + +func TestApplicationStartInfo() component.ApplicationStartInfo { + return component.ApplicationStartInfo{ + ExeName: "otelcol", + LongName: "InProcess Collector", + Version: version.Version, + GitHash: version.GitHash, + } +} diff --git a/internal/otel_collector/component/componenttest/doc.go b/internal/otel_collector/component/componenttest/doc.go new file mode 100644 index 00000000000..1762446116c --- /dev/null +++ b/internal/otel_collector/component/componenttest/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package componenttest define types and functions used to help test packages +// implementing the component package interfaces. +package componenttest diff --git a/internal/otel_collector/component/componenttest/docs.go b/internal/otel_collector/component/componenttest/docs.go new file mode 100644 index 00000000000..1bb45bc6566 --- /dev/null +++ b/internal/otel_collector/component/componenttest/docs.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "fmt" + "go/parser" + "go/token" + "os" + "path/filepath" + "strings" +) + +const ( + readMeFileName = "README.md" +) + +// CheckDocs returns an error if README.md for at least one +// enabled component is missing. "projectPath" is the absolute path to the root +// of the project to which the components belong. "defaultComponentsFilePath" is +// the path to the file that contains imports to all required components, +// "goModule" is the Go module to which the imports belong. This method is intended +// to be used only to verify documentation in Opentelemetry core and contrib +// repositories. Examples, +// 1) Usage in the core repo: +// +// componenttest.CheckDocs( +// "path/to/project", +// "service/defaultcomponents/defaults.go", +// "go.opentelemetry.io/collector", +// ) +// +// 2) Usage in the contrib repo: +// componenttest.CheckDocs( +// "path/to/project", +// "cmd/otelcontrib/components.go", +// "github.com/open-telemetry/opentelemetry-collector-contrib", +// ). +func CheckDocs(projectPath string, relativeComponentsPath string, projectGoModule string) error { + defaultComponentsFilePath := filepath.Join(projectPath, relativeComponentsPath) + _, err := os.Stat(defaultComponentsFilePath) + if err != nil { + return fmt.Errorf("failed to load file %s: %v", defaultComponentsFilePath, err) + } + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, defaultComponentsFilePath, nil, parser.ImportsOnly) + if err != nil { + return fmt.Errorf("failed to load imports: %v", err) + } + + importPrefixesToCheck := getImportPrefixesToCheck(projectGoModule) + + for _, i := range f.Imports { + importPath := strings.Trim(i.Path.Value, `"`) + + if isComponentImport(importPath, importPrefixesToCheck) { + relativeComponentPath := strings.Replace(importPath, projectGoModule, "", 1) + readmePath := filepath.Join(projectPath, relativeComponentPath, readMeFileName) + _, err := os.Stat(readmePath) + if err != nil { + return fmt.Errorf("README does not exist at %s, add one", readmePath) + } + } + } + return nil +} + +var componentTypes = []string{"extension", "receiver", "processor", "exporter"} + +// getImportPrefixesToCheck returns a slice of strings that are relevant import +// prefixes for components in the given module. +func getImportPrefixesToCheck(module string) []string { + out := make([]string, len(componentTypes)) + for i, typ := range componentTypes { + out[i] = strings.Join([]string{strings.TrimRight(module, "/"), typ}, "/") + } + return out +} + +// isComponentImport returns true if the import corresponds to a Otel component, +// i.e. an extension, exporter, processor or a receiver. +func isComponentImport(importStr string, importPrefixesToCheck []string) bool { + for _, prefix := range importPrefixesToCheck { + if strings.HasPrefix(importStr, prefix) { + return true + } + } + return false +} diff --git a/internal/otel_collector/component/componenttest/docs_test.go b/internal/otel_collector/component/componenttest/docs_test.go new file mode 100644 index 00000000000..bc5a8ef85cf --- /dev/null +++ b/internal/otel_collector/component/componenttest/docs_test.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsComponentImport(t *testing.T) { + type args struct { + importStr string + importPrefixesToCheck []string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "Match", + args: args{ + importStr: "matching/prefix", + importPrefixesToCheck: []string{ + "some/prefix", + "matching/prefix", + }, + }, + want: true, + }, + { + name: "No match", + args: args{ + importStr: "some/prefix", + importPrefixesToCheck: []string{ + "expecting/prefix", + }, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isComponentImport(tt.args.importStr, tt.args.importPrefixesToCheck); got != tt.want { + t.Errorf("isComponentImport() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetImportPrefixesToCheck(t *testing.T) { + tests := []struct { + name string + module string + want []string + }{ + { + name: "Get import prefixes - 1", + module: "test", + want: []string{ + "test/extension", + "test/receiver", + "test/processor", + "test/exporter", + }, + }, + { + name: "Get import prefixes - 2", + module: "test/", + want: []string{ + "test/extension", + "test/receiver", + "test/processor", + "test/exporter", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getImportPrefixesToCheck(tt.module); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getImportPrefixesToCheck() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCheckDocs(t *testing.T) { + type args struct { + projectPath string + relativeDefaultComponentsPath string + projectGoModule string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "Invalid project path", + args: args{ + projectPath: "invalid/project", + relativeDefaultComponentsPath: "invalid/file", + projectGoModule: "go.opentelemetry.io/collector", + }, + wantErr: true, + }, + { + name: "Valid files", + args: args{ + projectPath: getProjectPath(t), + relativeDefaultComponentsPath: "service/defaultcomponents/defaults.go", + projectGoModule: "go.opentelemetry.io/collector", + }, + wantErr: false, + }, + { + name: "Invalid files", + args: args{ + projectPath: getProjectPath(t), + relativeDefaultComponentsPath: "service/defaultcomponents/invalid.go", + projectGoModule: "go.opentelemetry.io/collector", + }, + wantErr: true, + }, + { + name: "Invalid imports", + args: args{ + projectPath: getProjectPath(t), + relativeDefaultComponentsPath: "component/componenttest/testdata/invalid_go.txt", + projectGoModule: "go.opentelemetry.io/collector", + }, + wantErr: true, + }, + { + name: "README does not exist", + args: args{ + projectPath: getProjectPath(t), + relativeDefaultComponentsPath: "component/componenttest/testdata/valid_go.txt", + projectGoModule: "go.opentelemetry.io/collector", + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := CheckDocs(tt.args.projectPath, tt.args.relativeDefaultComponentsPath, tt.args.projectGoModule); (err != nil) != tt.wantErr { + t.Errorf("CheckDocs() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func getProjectPath(t *testing.T) string { + wd, err := os.Getwd() + require.NoError(t, err, "failed to get working directory: %v") + + // Absolute path to the project root directory + projectPath := filepath.Join(wd, "../../") + + return projectPath +} diff --git a/internal/otel_collector/component/componenttest/error_waiting_host.go b/internal/otel_collector/component/componenttest/error_waiting_host.go new file mode 100644 index 00000000000..ee3c16fbeec --- /dev/null +++ b/internal/otel_collector/component/componenttest/error_waiting_host.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" +) + +// ErrorWaitingHost mocks an component.Host for test purposes. +type ErrorWaitingHost struct { + errorChan chan error +} + +var _ component.Host = (*ErrorWaitingHost)(nil) + +// NewErrorWaitingHost returns a new instance of ErrorWaitingHost with proper defaults for most +// tests. +func NewErrorWaitingHost() *ErrorWaitingHost { + return &ErrorWaitingHost{ + errorChan: make(chan error, 1), + } +} + +// ReportFatalError is used to report to the host that the extension encountered +// a fatal error (i.e.: an error that the instance can't recover from) after +// its start function has already returned. +func (ews *ErrorWaitingHost) ReportFatalError(err error) { + ews.errorChan <- err +} + +// WaitForFatalError waits the given amount of time until an error is reported via +// ReportFatalError. It returns the error, if any, and a bool to indicated if +// an error was received before the time out. +func (ews *ErrorWaitingHost) WaitForFatalError(timeout time.Duration) (receivedError bool, err error) { + select { + case err = <-ews.errorChan: + receivedError = true + case <-time.After(timeout): + } + + return +} + +// GetFactory of the specified kind. Returns the factory for a component type. +func (ews *ErrorWaitingHost) GetFactory(_ component.Kind, _ configmodels.Type) component.Factory { + return nil +} + +func (ews *ErrorWaitingHost) GetExtensions() map[configmodels.Extension]component.ServiceExtension { + return nil +} + +func (ews *ErrorWaitingHost) GetExporters() map[configmodels.DataType]map[configmodels.Exporter]component.Exporter { + return nil +} diff --git a/internal/otel_collector/component/componenttest/error_waiting_host_test.go b/internal/otel_collector/component/componenttest/error_waiting_host_test.go new file mode 100644 index 00000000000..513c608cba4 --- /dev/null +++ b/internal/otel_collector/component/componenttest/error_waiting_host_test.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" +) + +func TestNewErrorWaitingHost(t *testing.T) { + mh := NewErrorWaitingHost() + require.NotNil(t, mh) + + reportedErr := errors.New("TestError") + go mh.ReportFatalError(reportedErr) + + receivedError, receivedErr := mh.WaitForFatalError(100 * time.Millisecond) + require.True(t, receivedError) + require.Equal(t, reportedErr, receivedErr) + + receivedError, _ = mh.WaitForFatalError(100 * time.Millisecond) + require.False(t, receivedError) +} + +func TestNewErrorWaitingHost_Noop(t *testing.T) { + mh := NewErrorWaitingHost() + require.NotNil(t, mh) + + assert.Nil(t, mh.GetExporters()) + assert.Nil(t, mh.GetExtensions()) + assert.Nil(t, mh.GetFactory(component.KindReceiver, "test")) +} diff --git a/internal/otel_collector/component/componenttest/example_factories.go b/internal/otel_collector/component/componenttest/example_factories.go new file mode 100644 index 00000000000..1eda999ddd1 --- /dev/null +++ b/internal/otel_collector/component/componenttest/example_factories.go @@ -0,0 +1,508 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "context" + "fmt" + + "github.com/spf13/viper" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// ExampleReceiver is for testing purposes. We are defining an example config and factory +// for "examplereceiver" receiver type. +type ExampleReceiver struct { + configmodels.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + // Configures the receiver server protocol. + confignet.TCPAddr `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + ExtraSetting string `mapstructure:"extra"` + ExtraMapSetting map[string]string `mapstructure:"extra_map"` + ExtraListSetting []string `mapstructure:"extra_list"` + + // FailTraceCreation causes CreateTracesReceiver to fail. Useful for testing. + FailTraceCreation bool `mapstructure:"-"` + + // FailMetricsCreation causes CreateMetricsReceiver to fail. Useful for testing. + FailMetricsCreation bool `mapstructure:"-"` +} + +// ExampleReceiverFactory is factory for ExampleReceiver. +type ExampleReceiverFactory struct { +} + +var _ component.ReceiverFactory = (*ExampleReceiverFactory)(nil) + +// Type gets the type of the Receiver config created by this factory. +func (f *ExampleReceiverFactory) Type() configmodels.Type { + return "examplereceiver" +} + +// CreateDefaultConfig creates the default configuration for the Receiver. +func (f *ExampleReceiverFactory) CreateDefaultConfig() configmodels.Receiver { + return &ExampleReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: f.Type(), + NameVal: string(f.Type()), + }, + TCPAddr: confignet.TCPAddr{ + Endpoint: "localhost:1000", + }, + ExtraSetting: "some string", + ExtraMapSetting: nil, + ExtraListSetting: nil, + } +} + +// CustomUnmarshaler implements the deprecated way to provide custom unmarshalers. +func (f *ExampleReceiverFactory) CustomUnmarshaler() component.CustomUnmarshaler { + return nil +} + +// CreateTraceReceiver creates a trace receiver based on this config. +func (f *ExampleReceiverFactory) CreateTracesReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.TracesConsumer, +) (component.TracesReceiver, error) { + if cfg.(*ExampleReceiver).FailTraceCreation { + return nil, configerror.ErrDataTypeIsNotSupported + } + + receiver := f.createReceiver(cfg) + receiver.TraceConsumer = nextConsumer + + return receiver, nil +} + +func (f *ExampleReceiverFactory) createReceiver(cfg configmodels.Receiver) *ExampleReceiverProducer { + // There must be one receiver for all data types. We maintain a map of + // receivers per config. + + // Check to see if there is already a receiver for this config. + receiver, ok := exampleReceivers[cfg] + if !ok { + receiver = &ExampleReceiverProducer{} + // Remember the receiver in the map + exampleReceivers[cfg] = receiver + } + + return receiver +} + +// CreateMetricsReceiver creates a metrics receiver based on this config. +func (f *ExampleReceiverFactory) CreateMetricsReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.MetricsConsumer, +) (component.MetricsReceiver, error) { + if cfg.(*ExampleReceiver).FailMetricsCreation { + return nil, configerror.ErrDataTypeIsNotSupported + } + + receiver := f.createReceiver(cfg) + receiver.MetricsConsumer = nextConsumer + + return receiver, nil +} + +func (f *ExampleReceiverFactory) CreateLogsReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.LogsConsumer, +) (component.LogsReceiver, error) { + receiver := f.createReceiver(cfg) + receiver.LogConsumer = nextConsumer + + return receiver, nil +} + +// ExampleReceiverProducer allows producing traces and metrics for testing purposes. +type ExampleReceiverProducer struct { + Started bool + Stopped bool + TraceConsumer consumer.TracesConsumer + MetricsConsumer consumer.MetricsConsumer + LogConsumer consumer.LogsConsumer +} + +// Start tells the receiver to start its processing. +func (erp *ExampleReceiverProducer) Start(_ context.Context, _ component.Host) error { + erp.Started = true + return nil +} + +// Shutdown tells the receiver that should stop reception, +func (erp *ExampleReceiverProducer) Shutdown(context.Context) error { + erp.Stopped = true + return nil +} + +// This is the map of already created example receivers for particular configurations. +// We maintain this map because the ReceiverFactory is asked trace and metric receivers separately +// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// create separate objects, they must use one Receiver object per configuration. +var exampleReceivers = map[configmodels.Receiver]*ExampleReceiverProducer{} + +// MultiProtoReceiver is for testing purposes. We are defining an example multi protocol +// config and factory for "multireceiver" receiver type. +type MultiProtoReceiver struct { + configmodels.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + Protocols map[string]MultiProtoReceiverOneCfg `mapstructure:"protocols"` +} + +// MultiProtoReceiverOneCfg is multi proto receiver config. +type MultiProtoReceiverOneCfg struct { + Endpoint string `mapstructure:"endpoint"` + ExtraSetting string `mapstructure:"extra"` +} + +// MultiProtoReceiverFactory is factory for MultiProtoReceiver. +type MultiProtoReceiverFactory struct { +} + +var _ component.ReceiverFactory = (*MultiProtoReceiverFactory)(nil) + +// Type gets the type of the Receiver config created by this factory. +func (f *MultiProtoReceiverFactory) Type() configmodels.Type { + return "multireceiver" +} + +// Unmarshal implements the ConfigUnmarshaler interface. +func (f *MultiProtoReceiverFactory) Unmarshal(componentViperSection *viper.Viper, intoCfg interface{}) error { + return componentViperSection.UnmarshalExact(intoCfg) +} + +// CreateDefaultConfig creates the default configuration for the Receiver. +func (f *MultiProtoReceiverFactory) CreateDefaultConfig() configmodels.Receiver { + return &MultiProtoReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: f.Type(), + NameVal: string(f.Type()), + }, + Protocols: map[string]MultiProtoReceiverOneCfg{ + "http": { + Endpoint: "example.com:8888", + ExtraSetting: "extra string 1", + }, + "tcp": { + Endpoint: "omnition.com:9999", + ExtraSetting: "extra string 2", + }, + }, + } +} + +// CreateTraceReceiver creates a trace receiver based on this config. +func (f *MultiProtoReceiverFactory) CreateTracesReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + _ configmodels.Receiver, + _ consumer.TracesConsumer, +) (component.TracesReceiver, error) { + // Not used for this test, just return nil + return nil, nil +} + +// CreateMetricsReceiver creates a metrics receiver based on this config. +func (f *MultiProtoReceiverFactory) CreateMetricsReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + _ configmodels.Receiver, + _ consumer.MetricsConsumer, +) (component.MetricsReceiver, error) { + // Not used for this test, just return nil + return nil, nil +} + +// CreateMetricsReceiver creates a metrics receiver based on this config. +func (f *MultiProtoReceiverFactory) CreateLogsReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + _ configmodels.Receiver, + _ consumer.LogsConsumer, +) (component.LogsReceiver, error) { + // Not used for this test, just return nil + return nil, nil +} + +// ExampleExporter is for testing purposes. We are defining an example config and factory +// for "exampleexporter" exporter type. +type ExampleExporter struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + ExtraInt int32 `mapstructure:"extra_int"` + ExtraSetting string `mapstructure:"extra"` + ExtraMapSetting map[string]string `mapstructure:"extra_map"` + ExtraListSetting []string `mapstructure:"extra_list"` +} + +// ExampleExporterFactory is factory for ExampleExporter. +type ExampleExporterFactory struct { +} + +// Type gets the type of the Exporter config created by this factory. +func (f *ExampleExporterFactory) Type() configmodels.Type { + return "exampleexporter" +} + +// CreateDefaultConfig creates the default configuration for the Exporter. +func (f *ExampleExporterFactory) CreateDefaultConfig() configmodels.Exporter { + return &ExampleExporter{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: f.Type(), + NameVal: string(f.Type()), + }, + ExtraSetting: "some export string", + ExtraMapSetting: nil, + ExtraListSetting: nil, + } +} + +// CustomUnmarshaler implements the deprecated way to provide custom unmarshalers. +func (f *ExampleExporterFactory) CustomUnmarshaler() component.CustomUnmarshaler { + return func(componentViperSection *viper.Viper, intoCfg interface{}) error { + return componentViperSection.UnmarshalExact(intoCfg) + } +} + +// CreateTraceExporter creates a trace exporter based on this config. +func (f *ExampleExporterFactory) CreateTracesExporter( + _ context.Context, + _ component.ExporterCreateParams, + _ configmodels.Exporter, +) (component.TracesExporter, error) { + return &ExampleExporterConsumer{}, nil +} + +// CreateMetricsExporter creates a metrics exporter based on this config. +func (f *ExampleExporterFactory) CreateMetricsExporter( + _ context.Context, + _ component.ExporterCreateParams, + _ configmodels.Exporter, +) (component.MetricsExporter, error) { + return &ExampleExporterConsumer{}, nil +} + +func (f *ExampleExporterFactory) CreateLogsExporter( + _ context.Context, + _ component.ExporterCreateParams, + _ configmodels.Exporter, +) (component.LogsExporter, error) { + return &ExampleExporterConsumer{}, nil +} + +// ExampleExporterConsumer stores consumed traces and metrics for testing purposes. +type ExampleExporterConsumer struct { + Traces []pdata.Traces + Metrics []pdata.Metrics + Logs []pdata.Logs + ExporterStarted bool + ExporterShutdown bool +} + +// Start tells the exporter to start. The exporter may prepare for exporting +// by connecting to the endpoint. Host parameter can be used for communicating +// with the host after Start() has already returned. +func (exp *ExampleExporterConsumer) Start(_ context.Context, _ component.Host) error { + exp.ExporterStarted = true + return nil +} + +// ConsumeTraceData receives consumerdata.TraceData for processing by the TracesConsumer. +func (exp *ExampleExporterConsumer) ConsumeTraces(_ context.Context, td pdata.Traces) error { + exp.Traces = append(exp.Traces, td) + return nil +} + +// ConsumeMetricsData receives consumerdata.MetricsData for processing by the MetricsConsumer. +func (exp *ExampleExporterConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + exp.Metrics = append(exp.Metrics, md) + return nil +} + +func (exp *ExampleExporterConsumer) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + exp.Logs = append(exp.Logs, ld) + return nil +} + +// Shutdown is invoked during shutdown. +func (exp *ExampleExporterConsumer) Shutdown(context.Context) error { + exp.ExporterShutdown = true + return nil +} + +// ExampleProcessorCfg is for testing purposes. We are defining an example config and factory +// for "exampleprocessor" processor type. +type ExampleProcessorCfg struct { + configmodels.ProcessorSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + ExtraSetting string `mapstructure:"extra"` + ExtraMapSetting map[string]string `mapstructure:"extra_map"` + ExtraListSetting []string `mapstructure:"extra_list"` +} + +// ExampleProcessorFactory is factory for ExampleProcessor. +type ExampleProcessorFactory struct { +} + +// Type gets the type of the Processor config created by this factory. +func (f *ExampleProcessorFactory) Type() configmodels.Type { + return "exampleprocessor" +} + +// CreateDefaultConfig creates the default configuration for the Processor. +func (f *ExampleProcessorFactory) CreateDefaultConfig() configmodels.Processor { + return &ExampleProcessorCfg{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: f.Type(), + NameVal: string(f.Type()), + }, + ExtraSetting: "some export string", + ExtraMapSetting: nil, + ExtraListSetting: nil, + } +} + +// CreateTraceProcessor creates a trace processor based on this config. +func (f *ExampleProcessorFactory) CreateTracesProcessor(ctx context.Context, params component.ProcessorCreateParams, cfg configmodels.Processor, nextConsumer consumer.TracesConsumer) (component.TracesProcessor, error) { + return &ExampleProcessor{nextTraces: nextConsumer}, nil +} + +// CreateMetricsProcessor creates a metrics processor based on this config. +func (f *ExampleProcessorFactory) CreateMetricsProcessor(ctx context.Context, params component.ProcessorCreateParams, cfg configmodels.Processor, nextConsumer consumer.MetricsConsumer) (component.MetricsProcessor, error) { + return &ExampleProcessor{nextMetrics: nextConsumer}, nil +} + +func (f *ExampleProcessorFactory) CreateLogsProcessor( + _ context.Context, + _ component.ProcessorCreateParams, + _ configmodels.Processor, + nextConsumer consumer.LogsConsumer, +) (component.LogsProcessor, error) { + return &ExampleProcessor{nextLogs: nextConsumer}, nil +} + +type ExampleProcessor struct { + nextTraces consumer.TracesConsumer + nextMetrics consumer.MetricsConsumer + nextLogs consumer.LogsConsumer +} + +func (ep *ExampleProcessor) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (ep *ExampleProcessor) Shutdown(_ context.Context) error { + return nil +} + +func (ep *ExampleProcessor) GetCapabilities() component.ProcessorCapabilities { + return component.ProcessorCapabilities{MutatesConsumedData: false} +} + +func (ep *ExampleProcessor) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + return ep.nextTraces.ConsumeTraces(ctx, td) +} + +func (ep *ExampleProcessor) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + return ep.nextMetrics.ConsumeMetrics(ctx, md) +} + +func (ep *ExampleProcessor) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { + return ep.nextLogs.ConsumeLogs(ctx, ld) +} + +// ExampleExtensionCfg is for testing purposes. We are defining an example config and factory +// for "exampleextension" extension type. +type ExampleExtensionCfg struct { + configmodels.ExtensionSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + ExtraSetting string `mapstructure:"extra"` + ExtraMapSetting map[string]string `mapstructure:"extra_map"` + ExtraListSetting []string `mapstructure:"extra_list"` +} + +type ExampleExtension struct { +} + +func (e *ExampleExtension) Start(_ context.Context, _ component.Host) error { return nil } + +func (e *ExampleExtension) Shutdown(_ context.Context) error { return nil } + +// ExampleExtensionFactory is factory for ExampleExtensionCfg. +type ExampleExtensionFactory struct { + FailCreation bool +} + +// Type gets the type of the Extension config created by this factory. +func (f *ExampleExtensionFactory) Type() configmodels.Type { + return "exampleextension" +} + +// CreateDefaultConfig creates the default configuration for the Extension. +func (f *ExampleExtensionFactory) CreateDefaultConfig() configmodels.Extension { + return &ExampleExtensionCfg{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: f.Type(), + NameVal: string(f.Type()), + }, + ExtraSetting: "extra string setting", + ExtraMapSetting: nil, + ExtraListSetting: nil, + } +} + +// CreateExtension creates an Extension based on this config. +func (f *ExampleExtensionFactory) CreateExtension(_ context.Context, _ component.ExtensionCreateParams, _ configmodels.Extension) (component.ServiceExtension, error) { + if f.FailCreation { + return nil, fmt.Errorf("cannot create %q extension type", f.Type()) + } + return &ExampleExtension{}, nil +} + +// ExampleComponents registers example factories. This is only used by tests. +func ExampleComponents() ( + factories component.Factories, + err error, +) { + if factories.Extensions, err = component.MakeExtensionFactoryMap(&ExampleExtensionFactory{}); err != nil { + return + } + + factories.Receivers, err = component.MakeReceiverFactoryMap( + &ExampleReceiverFactory{}, + &MultiProtoReceiverFactory{}, + ) + if err != nil { + return + } + + factories.Exporters, err = component.MakeExporterFactoryMap(&ExampleExporterFactory{}) + if err != nil { + return + } + + factories.Processors, err = component.MakeProcessorFactoryMap(&ExampleProcessorFactory{}) + + return +} diff --git a/internal/otel_collector/component/componenttest/example_factories_test.go b/internal/otel_collector/component/componenttest/example_factories_test.go new file mode 100644 index 00000000000..77b2c467d6a --- /dev/null +++ b/internal/otel_collector/component/componenttest/example_factories_test.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestExampleExporterConsumer(t *testing.T) { + exp := &ExampleExporterConsumer{} + host := NewNopHost() + assert.False(t, exp.ExporterStarted) + err := exp.Start(context.Background(), host) + assert.NoError(t, err) + assert.True(t, exp.ExporterStarted) + + assert.Equal(t, 0, len(exp.Traces)) + err = exp.ConsumeTraces(context.Background(), pdata.Traces{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(exp.Traces)) + + assert.Equal(t, 0, len(exp.Metrics)) + err = exp.ConsumeMetrics(context.Background(), pdata.Metrics{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(exp.Metrics)) + + assert.False(t, exp.ExporterShutdown) + err = exp.Shutdown(context.Background()) + assert.NoError(t, err) + assert.True(t, exp.ExporterShutdown) +} + +func TestExampleReceiverProducer(t *testing.T) { + rcv := &ExampleReceiverProducer{} + host := NewNopHost() + assert.False(t, rcv.Started) + err := rcv.Start(context.Background(), host) + assert.NoError(t, err) + assert.True(t, rcv.Started) + + err = rcv.Shutdown(context.Background()) + assert.NoError(t, err) + assert.True(t, rcv.Started) +} diff --git a/internal/otel_collector/component/componenttest/nop_host.go b/internal/otel_collector/component/componenttest/nop_host.go new file mode 100644 index 00000000000..3120c06ea9f --- /dev/null +++ b/internal/otel_collector/component/componenttest/nop_host.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" +) + +// NopHost mocks a receiver.ReceiverHost for test purposes. +type NopHost struct { +} + +var _ component.Host = (*NopHost)(nil) + +// NewNopHost returns a new instance of NopHost with proper defaults for most +// tests. +func NewNopHost() component.Host { + return &NopHost{} +} + +// ReportFatalError is used to report to the host that the receiver encountered +// a fatal error (i.e.: an error that the instance can't recover from) after +// its start function has already returned. +func (nh *NopHost) ReportFatalError(_ error) { + // Do nothing for now. +} + +// GetFactory of the specified kind. Returns the factory for a component type. +func (nh *NopHost) GetFactory(_ component.Kind, _ configmodels.Type) component.Factory { + return nil +} + +func (nh *NopHost) GetExtensions() map[configmodels.Extension]component.ServiceExtension { + return nil +} + +func (nh *NopHost) GetExporters() map[configmodels.DataType]map[configmodels.Exporter]component.Exporter { + return nil +} diff --git a/internal/otel_collector/component/componenttest/nop_host_test.go b/internal/otel_collector/component/componenttest/nop_host_test.go new file mode 100644 index 00000000000..6e9eb922af7 --- /dev/null +++ b/internal/otel_collector/component/componenttest/nop_host_test.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" +) + +func TestNewNopHost(t *testing.T) { + nh := NewNopHost() + require.NotNil(t, nh) + require.IsType(t, &NopHost{}, nh) + + nh.ReportFatalError(errors.New("TestError")) + assert.Nil(t, nh.GetExporters()) + assert.Nil(t, nh.GetExtensions()) + assert.Nil(t, nh.GetFactory(component.KindReceiver, "test")) +} diff --git a/internal/otel_collector/component/componenttest/testdata/invalid_go.txt b/internal/otel_collector/component/componenttest/testdata/invalid_go.txt new file mode 100644 index 00000000000..50ecec8f181 --- /dev/null +++ b/internal/otel_collector/component/componenttest/testdata/invalid_go.txt @@ -0,0 +1,6 @@ +package testdata + + +import ( + "import +) \ No newline at end of file diff --git a/internal/otel_collector/component/componenttest/testdata/valid_go.txt b/internal/otel_collector/component/componenttest/testdata/valid_go.txt new file mode 100644 index 00000000000..2c88748717d --- /dev/null +++ b/internal/otel_collector/component/componenttest/testdata/valid_go.txt @@ -0,0 +1,9 @@ +package testdata + + +import ( + "go.opentelemetry.io/collector/exporter/exporter1" +) + +func main() { +} \ No newline at end of file diff --git a/internal/otel_collector/component/exporter.go b/internal/otel_collector/component/exporter.go new file mode 100644 index 00000000000..d95c520ee22 --- /dev/null +++ b/internal/otel_collector/component/exporter.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +// Exporter defines functions that all exporters must implement. +type Exporter interface { + Component +} + +// TracesExporter is a Exporter that can consume traces. +type TracesExporter interface { + Exporter + consumer.TracesConsumer +} + +// MetricsExporter is an Exporter that can consume metrics. +type MetricsExporter interface { + Exporter + consumer.MetricsConsumer +} + +// LogsExporter is an Exporter that can consume logs. +type LogsExporter interface { + Exporter + consumer.LogsConsumer +} + +// ExporterCreateParams is passed to Create*Exporter functions. +type ExporterCreateParams struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // ApplicationStartInfo can be used by components for informational purposes + ApplicationStartInfo ApplicationStartInfo +} + +// ExporterFactory can create TracesExporter and MetricsExporter. This is the +// new factory type that can create new style exporters. +type ExporterFactory interface { + Factory + + // CreateDefaultConfig creates the default configuration for the Exporter. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Exporter. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have such check in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() configmodels.Exporter + + // CreateTracesExporter creates a trace exporter based on this config. + // If the exporter type does not support tracing or if the config is not valid + // error will be returned instead. + CreateTracesExporter( + ctx context.Context, + params ExporterCreateParams, + cfg configmodels.Exporter, + ) (TracesExporter, error) + + // CreateMetricsExporter creates a metrics exporter based on this config. + // If the exporter type does not support metrics or if the config is not valid + // error will be returned instead. + CreateMetricsExporter( + ctx context.Context, + params ExporterCreateParams, + cfg configmodels.Exporter, + ) (MetricsExporter, error) + + // CreateLogsExporter creates an exporter based on the config. + // If the exporter type does not support logs or if the config is not valid + // error will be returned instead. + CreateLogsExporter( + ctx context.Context, + params ExporterCreateParams, + cfg configmodels.Exporter, + ) (LogsExporter, error) +} diff --git a/internal/otel_collector/component/exporter_test.go b/internal/otel_collector/component/exporter_test.go new file mode 100644 index 00000000000..8c0efe81d85 --- /dev/null +++ b/internal/otel_collector/component/exporter_test.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" +) + +type TestExporterFactory struct { + name string +} + +// Type gets the type of the Exporter config created by this factory. +func (f *TestExporterFactory) Type() configmodels.Type { + return configmodels.Type(f.name) +} + +// CreateDefaultConfig creates the default configuration for the Exporter. +func (f *TestExporterFactory) CreateDefaultConfig() configmodels.Exporter { + return nil +} + +// CreateTraceExporter creates a trace exporter based on this config. +func (f *TestExporterFactory) CreateTracesExporter(context.Context, ExporterCreateParams, configmodels.Exporter) (TracesExporter, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsExporter creates a metrics exporter based on this config. +func (f *TestExporterFactory) CreateMetricsExporter(context.Context, ExporterCreateParams, configmodels.Exporter) (MetricsExporter, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsExporter creates a logs exporter based on this config. +func (f *TestExporterFactory) CreateLogsExporter(context.Context, ExporterCreateParams, configmodels.Exporter) (LogsExporter, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +func TestBuildExporters(t *testing.T) { + type testCase struct { + in []ExporterFactory + out map[configmodels.Type]ExporterFactory + } + + testCases := []testCase{ + { + in: []ExporterFactory{ + &TestExporterFactory{"exp1"}, + &TestExporterFactory{"exp2"}, + }, + out: map[configmodels.Type]ExporterFactory{ + "exp1": &TestExporterFactory{"exp1"}, + "exp2": &TestExporterFactory{"exp2"}, + }, + }, + { + in: []ExporterFactory{ + &TestExporterFactory{"exp1"}, + &TestExporterFactory{"exp1"}, + }, + }, + } + + for _, c := range testCases { + out, err := MakeExporterFactoryMap(c.in...) + if c.out == nil { + assert.Error(t, err) + continue + } + assert.NoError(t, err) + assert.Equal(t, c.out, out) + } +} diff --git a/internal/otel_collector/component/extension.go b/internal/otel_collector/component/extension.go new file mode 100644 index 00000000000..9fb811388b8 --- /dev/null +++ b/internal/otel_collector/component/extension.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configmodels" +) + +// ServiceExtension is the interface for objects hosted by the OpenTelemetry Collector that +// don't participate directly on data pipelines but provide some functionality +// to the service, examples: health check endpoint, z-pages, etc. +type ServiceExtension interface { + Component +} + +// PipelineWatcher is an extra interface for ServiceExtension hosted by the OpenTelemetry +// Collector that is to be implemented by extensions interested in changes to pipeline +// states. Typically this will be used by extensions that change their behavior if data is +// being ingested or not, e.g.: a k8s readiness probe. +type PipelineWatcher interface { + // Ready notifies the ServiceExtension that all pipelines were built and the + // receivers were started, i.e.: the service is ready to receive data + // (notice that it may already have received data when this method is called). + Ready() error + + // NotReady notifies the ServiceExtension that all receivers are about to be stopped, + // i.e.: pipeline receivers will not accept new data. + // This is sent before receivers are stopped, so the ServiceExtension can take any + // appropriate action before that happens. + NotReady() error +} + +// ExtensionCreateParams is passed to ExtensionFactory.Create* functions. +type ExtensionCreateParams struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // ApplicationStartInfo can be used by components for informational purposes + ApplicationStartInfo ApplicationStartInfo +} + +// ExtensionFactory is a factory interface for extensions to the service. +type ExtensionFactory interface { + Factory + + // CreateDefaultConfig creates the default configuration for the Extension. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Extension. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have such check in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() configmodels.Extension + + // CreateExtension creates a service extension based on the given config. + CreateExtension(ctx context.Context, params ExtensionCreateParams, cfg configmodels.Extension) (ServiceExtension, error) +} diff --git a/internal/otel_collector/component/factories.go b/internal/otel_collector/component/factories.go new file mode 100644 index 00000000000..860b5c57063 --- /dev/null +++ b/internal/otel_collector/component/factories.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "fmt" + + "go.opentelemetry.io/collector/config/configmodels" +) + +// Factories struct holds in a single type all component factories that +// can be handled by the Config. +type Factories struct { + // Receivers maps receiver type names in the config to the respective factory. + Receivers map[configmodels.Type]ReceiverFactory + + // Processors maps processor type names in the config to the respective factory. + Processors map[configmodels.Type]ProcessorFactory + + // Exporters maps exporter type names in the config to the respective factory. + Exporters map[configmodels.Type]ExporterFactory + + // Extensions maps extension type names in the config to the respective factory. + Extensions map[configmodels.Type]ExtensionFactory +} + +// MakeReceiverFactoryMap takes a list of receiver factories and returns a map +// with factory type as keys. It returns a non-nil error when more than one factories +// have the same type. +func MakeReceiverFactoryMap(factories ...ReceiverFactory) (map[configmodels.Type]ReceiverFactory, error) { + fMap := map[configmodels.Type]ReceiverFactory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate receiver factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} + +// MakeProcessorFactoryMap takes a list of processor factories and returns a map +// with factory type as keys. It returns a non-nil error when more than one factories +// have the same type. +func MakeProcessorFactoryMap(factories ...ProcessorFactory) (map[configmodels.Type]ProcessorFactory, error) { + fMap := map[configmodels.Type]ProcessorFactory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate processor factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} + +// MakeExporterFactoryMap takes a list of exporter factories and returns a map +// with factory type as keys. It returns a non-nil error when more than one factories +// have the same type. +func MakeExporterFactoryMap(factories ...ExporterFactory) (map[configmodels.Type]ExporterFactory, error) { + fMap := map[configmodels.Type]ExporterFactory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate exporter factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} + +// MakeExtensionFactoryMap takes a list of extension factories and returns a map +// with factory type as keys. It returns a non-nil error when more than one factories +// have the same type. +func MakeExtensionFactoryMap(factories ...ExtensionFactory) (map[configmodels.Type]ExtensionFactory, error) { + fMap := map[configmodels.Type]ExtensionFactory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate extension factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} diff --git a/internal/otel_collector/component/processor.go b/internal/otel_collector/component/processor.go new file mode 100644 index 00000000000..8d7779c6a74 --- /dev/null +++ b/internal/otel_collector/component/processor.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +// Processor defines the common functions that must be implemented by TracesProcessor +// and MetricsProcessor. +type Processor interface { + Component + + // GetCapabilities must return the capabilities of the processor. + GetCapabilities() ProcessorCapabilities +} + +// TracesProcessor is a processor that can consume traces. +type TracesProcessor interface { + Processor + consumer.TracesConsumer +} + +// MetricsProcessor is a processor that can consume metrics. +type MetricsProcessor interface { + Processor + consumer.MetricsConsumer +} + +// LogsProcessor is a processor that can consume logs. +type LogsProcessor interface { + Processor + consumer.LogsConsumer +} + +// ProcessorCapabilities describes the capabilities of a Processor. +type ProcessorCapabilities struct { + // MutatesConsumedData is set to true if Consume* function of the + // processor modifies the input TraceData or MetricsData argument. + // Processors which modify the input data MUST set this flag to true. If the processor + // does not modify the data it MUST set this flag to false. If the processor creates + // a copy of the data before modifying then this flag can be safely set to false. + MutatesConsumedData bool +} + +// ProcessorCreateParams is passed to Create* functions in ProcessorFactory. +type ProcessorCreateParams struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // ApplicationStartInfo can be used by components for informational purposes + ApplicationStartInfo ApplicationStartInfo +} + +// ProcessorFactory is factory interface for processors. This is the +// new factory type that can create new style processors. +type ProcessorFactory interface { + Factory + + // CreateDefaultConfig creates the default configuration for the Processor. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Processor. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have such check in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() configmodels.Processor + + // CreateTraceProcessor creates a trace processor based on this config. + // If the processor type does not support tracing or if the config is not valid + // error will be returned instead. + CreateTracesProcessor( + ctx context.Context, + params ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.TracesConsumer, + ) (TracesProcessor, error) + + // CreateMetricsProcessor creates a metrics processor based on this config. + // If the processor type does not support metrics or if the config is not valid + // error will be returned instead. + CreateMetricsProcessor( + ctx context.Context, + params ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.MetricsConsumer, + ) (MetricsProcessor, error) + + // CreateLogsProcessor creates a processor based on the config. + // If the processor type does not support logs or if the config is not valid + // error will be returned instead. + CreateLogsProcessor( + ctx context.Context, + params ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.LogsConsumer, + ) (LogsProcessor, error) +} diff --git a/internal/otel_collector/component/processor_test.go b/internal/otel_collector/component/processor_test.go new file mode 100644 index 00000000000..7849f1006e9 --- /dev/null +++ b/internal/otel_collector/component/processor_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +type TestProcessorFactory struct { + name string +} + +// Type gets the type of the Processor config created by this factory. +func (f *TestProcessorFactory) Type() configmodels.Type { + return configmodels.Type(f.name) +} + +// CreateDefaultConfig creates the default configuration for the Processor. +func (f *TestProcessorFactory) CreateDefaultConfig() configmodels.Processor { + return nil +} + +// CreateTraceProcessor creates a trace processor based on this config. +func (f *TestProcessorFactory) CreateTracesProcessor(context.Context, ProcessorCreateParams, configmodels.Processor, consumer.TracesConsumer) (TracesProcessor, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsProcessor creates a metrics processor based on this config. +func (f *TestProcessorFactory) CreateMetricsProcessor(context.Context, ProcessorCreateParams, configmodels.Processor, consumer.MetricsConsumer) (MetricsProcessor, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsProcessor creates a metrics processor based on this config. +func (f *TestProcessorFactory) CreateLogsProcessor(context.Context, ProcessorCreateParams, configmodels.Processor, consumer.LogsConsumer) (LogsProcessor, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +func TestFactoriesBuilder(t *testing.T) { + type testCase struct { + in []ProcessorFactory + out map[configmodels.Type]ProcessorFactory + } + + testCases := []testCase{ + { + in: []ProcessorFactory{ + &TestProcessorFactory{"p1"}, + &TestProcessorFactory{"p2"}, + }, + out: map[configmodels.Type]ProcessorFactory{ + "p1": &TestProcessorFactory{"p1"}, + "p2": &TestProcessorFactory{"p2"}, + }, + }, + { + in: []ProcessorFactory{ + &TestProcessorFactory{"p1"}, + &TestProcessorFactory{"p1"}, + }, + }, + } + + for _, c := range testCases { + out, err := MakeProcessorFactoryMap(c.in...) + if c.out == nil { + assert.Error(t, err) + continue + } + assert.NoError(t, err) + assert.Equal(t, c.out, out) + } +} diff --git a/internal/otel_collector/component/receiver.go b/internal/otel_collector/component/receiver.go new file mode 100644 index 00000000000..a0cd40dc889 --- /dev/null +++ b/internal/otel_collector/component/receiver.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +// Receiver defines functions that trace and metric receivers must implement. +type Receiver interface { + Component +} + +// A TracesReceiver is an "arbitrary data"-to-"internal format" converter. +// Its purpose is to translate data from the wild into internal trace format. +// TracesReceiver feeds a consumer.TracesConsumer with data. +// +// For example it could be Zipkin data source which translates +// Zipkin spans into consumerdata.TraceData. +type TracesReceiver interface { + Receiver +} + +// A MetricsReceiver is an "arbitrary data"-to-"internal format" converter. +// Its purpose is to translate data from the wild into internal metrics format. +// MetricsReceiver feeds a consumer.MetricsConsumer with data. +// +// For example it could be Prometheus data source which translates +// Prometheus metrics into consumerdata.MetricsData. +type MetricsReceiver interface { + Receiver +} + +// A LogsReceiver is a "log data"-to-"internal format" converter. +// Its purpose is to translate data from the wild into internal data format. +// LogsReceiver feeds a consumer.LogsConsumer with data. +type LogsReceiver interface { + Receiver +} + +// ReceiverCreateParams is passed to ReceiverFactory.Create* functions. +type ReceiverCreateParams struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // ApplicationStartInfo can be used by components for informational purposes + ApplicationStartInfo ApplicationStartInfo +} + +// ReceiverFactory can create TracesReceiver and MetricsReceiver. This is the +// new factory type that can create new style receivers. +type ReceiverFactory interface { + Factory + + // CreateDefaultConfig creates the default configuration for the Receiver. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Receiver. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have such check in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() configmodels.Receiver + + // CreateTraceReceiver creates a trace receiver based on this config. + // If the receiver type does not support tracing or if the config is not valid + // error will be returned instead. + CreateTracesReceiver(ctx context.Context, params ReceiverCreateParams, + cfg configmodels.Receiver, nextConsumer consumer.TracesConsumer) (TracesReceiver, error) + + // CreateMetricsReceiver creates a metrics receiver based on this config. + // If the receiver type does not support metrics or if the config is not valid + // error will be returned instead. + CreateMetricsReceiver(ctx context.Context, params ReceiverCreateParams, + cfg configmodels.Receiver, nextConsumer consumer.MetricsConsumer) (MetricsReceiver, error) + + // CreateLogsReceiver creates a log receiver based on this config. + // If the receiver type does not support the data type or if the config is not valid + // error will be returned instead. + CreateLogsReceiver(ctx context.Context, params ReceiverCreateParams, + cfg configmodels.Receiver, nextConsumer consumer.LogsConsumer) (LogsReceiver, error) +} diff --git a/internal/otel_collector/component/receiver_test.go b/internal/otel_collector/component/receiver_test.go new file mode 100644 index 00000000000..ee844afd4c2 --- /dev/null +++ b/internal/otel_collector/component/receiver_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +type TestReceiverFactory struct { + name configmodels.Type +} + +// Type gets the type of the Receiver config created by this factory. +func (f *TestReceiverFactory) Type() configmodels.Type { + return f.name +} + +// CreateDefaultConfig creates the default configuration for the Receiver. +func (f *TestReceiverFactory) CreateDefaultConfig() configmodels.Receiver { + return nil +} + +// CreateTraceReceiver creates a trace receiver based on this config. +func (f *TestReceiverFactory) CreateTracesReceiver(context.Context, ReceiverCreateParams, configmodels.Receiver, consumer.TracesConsumer) (TracesReceiver, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsReceiver creates a metrics receiver based on this config. +func (f *TestReceiverFactory) CreateMetricsReceiver(context.Context, ReceiverCreateParams, configmodels.Receiver, consumer.MetricsConsumer) (MetricsReceiver, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsReceiver creates a metrics receiver based on this config. +func (f *TestReceiverFactory) CreateLogsReceiver(context.Context, ReceiverCreateParams, configmodels.Receiver, consumer.LogsConsumer) (LogsReceiver, error) { + return nil, configerror.ErrDataTypeIsNotSupported +} + +func TestBuildReceivers(t *testing.T) { + type testCase struct { + in []ReceiverFactory + out map[configmodels.Type]ReceiverFactory + } + + testCases := []testCase{ + { + in: []ReceiverFactory{ + &TestReceiverFactory{"e1"}, + &TestReceiverFactory{"e2"}, + }, + out: map[configmodels.Type]ReceiverFactory{ + "e1": &TestReceiverFactory{"e1"}, + "e2": &TestReceiverFactory{"e2"}, + }, + }, + { + in: []ReceiverFactory{ + &TestReceiverFactory{"e1"}, + &TestReceiverFactory{"e1"}, + }, + }, + } + + for _, c := range testCases { + out, err := MakeReceiverFactoryMap(c.in...) + if c.out == nil { + assert.Error(t, err) + continue + } + assert.NoError(t, err) + assert.Equal(t, c.out, out) + } +} diff --git a/internal/otel_collector/config/config.go b/internal/otel_collector/config/config.go new file mode 100644 index 00000000000..5bbfcd0669c --- /dev/null +++ b/internal/otel_collector/config/config.go @@ -0,0 +1,780 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package config implements loading of configuration from Viper configuration. +// The implementation relies on registered factories that allow creating +// default configuration for each type of receiver/exporter/processor. +package config + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" + + "github.com/spf13/cast" + "github.com/spf13/viper" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" +) + +// These are errors that can be returned by Load(). Note that error codes are not part +// of Load()'s public API, they are for internal unit testing only. +type configErrorCode int + +const ( + _ configErrorCode = iota // skip 0, start errors codes from 1. + errInvalidSubConfig + errInvalidTypeAndNameKey + errUnknownType + errDuplicateName + errMissingPipelines + errPipelineMustHaveReceiver + errPipelineMustHaveExporter + errExtensionNotExists + errPipelineReceiverNotExists + errPipelineProcessorNotExists + errPipelineExporterNotExists + errMissingReceivers + errMissingExporters + errUnmarshalTopLevelStructureError +) + +const ( + // ViperDelimiter is used as the default key delimiter in the default viper instance + ViperDelimiter = "::" +) + +type configError struct { + msg string // human readable error message. + code configErrorCode // internal error code. +} + +func (e *configError) Error() string { + return e.msg +} + +// YAML top-level configuration keys +const ( + // extensionsKeyName is the configuration key name for extensions section. + extensionsKeyName = "extensions" + + // receiversKeyName is the configuration key name for receivers section. + receiversKeyName = "receivers" + + // exportersKeyName is the configuration key name for exporters section. + exportersKeyName = "exporters" + + // processorsKeyName is the configuration key name for processors section. + processorsKeyName = "processors" + + // pipelinesKeyName is the configuration key name for pipelines section. + pipelinesKeyName = "pipelines" +) + +type configSettings struct { + Receivers map[string]map[string]interface{} `mapstructure:"receivers"` + Processors map[string]map[string]interface{} `mapstructure:"processors"` + Exporters map[string]map[string]interface{} `mapstructure:"exporters"` + Extensions map[string]map[string]interface{} `mapstructure:"extensions"` + Service serviceSettings `mapstructure:"service"` +} + +type serviceSettings struct { + Extensions []string `mapstructure:"extensions"` + Pipelines map[string]pipelineSettings `mapstructure:"pipelines"` +} + +type pipelineSettings struct { + Receivers []string `mapstructure:"receivers"` + Processors []string `mapstructure:"processors"` + Exporters []string `mapstructure:"exporters"` +} + +// deprecatedUnmarshaler is the old/deprecated way to provide custom unmarshaler. +type deprecatedUnmarshaler interface { + // CustomUnmarshaler returns a custom unmarshaler for the configuration or nil if + // there is no need for custom unmarshaling. This is typically used if viper.UnmarshalExact() + // is not sufficient to unmarshal correctly. + CustomUnmarshaler() component.CustomUnmarshaler +} + +// typeAndNameSeparator is the separator that is used between type and name in type/name composite keys. +const typeAndNameSeparator = "/" + +// Creates a new Viper instance with a different key-delimitor "::" instead of the +// default ".". This way configs can have keys that contain ".". +func NewViper() *viper.Viper { + return viper.NewWithOptions(viper.KeyDelimiter(ViperDelimiter)) +} + +// Load loads a Config from Viper. +// After loading the config, need to check if it is valid by calling `ValidateConfig`. +func Load( + v *viper.Viper, + factories component.Factories, +) (*configmodels.Config, error) { + + var config configmodels.Config + + // Load the config. + + // Struct to validate top level sections. + var rawCfg configSettings + if err := v.UnmarshalExact(&rawCfg); err != nil { + return nil, &configError{ + code: errUnmarshalTopLevelStructureError, + msg: fmt.Sprintf("error reading top level configuration sections: %s", err.Error()), + } + } + + // In the following section use v.GetStringMap(xyzKeyName) instead of rawCfg.Xyz, because + // UnmarshalExact will not unmarshal entries in the map[string]interface{} with nil values. + // GetStringMap does the correct thing. + + // Start with the service extensions. + + extensions, err := loadExtensions(v.GetStringMap(extensionsKeyName), factories.Extensions) + if err != nil { + return nil, err + } + config.Extensions = extensions + + // Load data components (receivers, exporters, and processors). + + receivers, err := loadReceivers(v.GetStringMap(receiversKeyName), factories.Receivers) + if err != nil { + return nil, err + } + config.Receivers = receivers + + exporters, err := loadExporters(v.GetStringMap(exportersKeyName), factories.Exporters) + if err != nil { + return nil, err + } + config.Exporters = exporters + + processors, err := loadProcessors(v.GetStringMap(processorsKeyName), factories.Processors) + if err != nil { + return nil, err + } + config.Processors = processors + + // Load the service and its data pipelines. + service, err := loadService(rawCfg.Service) + if err != nil { + return nil, err + } + config.Service = service + + return &config, nil +} + +// DecodeTypeAndName decodes a key in type[/name] format into type and fullName. +// fullName is the key normalized such that type and name components have spaces trimmed. +// The "type" part must be present, the forward slash and "name" are optional. typeStr +// will be non-empty if err is nil. +func DecodeTypeAndName(key string) (typeStr configmodels.Type, fullName string, err error) { + items := strings.SplitN(key, typeAndNameSeparator, 2) + + if len(items) >= 1 { + typeStr = configmodels.Type(strings.TrimSpace(items[0])) + } + + if len(items) == 0 || typeStr == "" { + err = errors.New("type/name key must have the type part") + return + } + + var nameSuffix string + if len(items) > 1 { + // "name" part is present. + nameSuffix = strings.TrimSpace(items[1]) + if nameSuffix == "" { + err = errors.New("name part must be specified after " + typeAndNameSeparator + " in type/name key") + return + } + } else { + nameSuffix = "" + } + + // Create normalized fullName. + if nameSuffix == "" { + fullName = string(typeStr) + } else { + fullName = string(typeStr) + typeAndNameSeparator + nameSuffix + } + + err = nil + return +} + +func errorInvalidTypeAndNameKey(component, key string, err error) error { + return &configError{ + code: errInvalidTypeAndNameKey, + msg: fmt.Sprintf("invalid %s type and name key %q: %v", component, key, err), + } +} + +func errorUnknownType(component string, typeStr configmodels.Type, fullName string) error { + return &configError{ + code: errUnknownType, + msg: fmt.Sprintf("unknown %s type %q for %s", component, typeStr, fullName), + } +} + +func errorUnmarshalError(component string, fullName string, err error) error { + return &configError{ + code: errUnmarshalTopLevelStructureError, + msg: fmt.Sprintf("error reading %s configuration for %s: %v", component, fullName, err), + } +} + +func errorDuplicateName(component string, fullName string) error { + return &configError{ + code: errDuplicateName, + msg: fmt.Sprintf("duplicate %s name %s", component, fullName), + } +} + +func loadExtensions(exts map[string]interface{}, factories map[configmodels.Type]component.ExtensionFactory) (configmodels.Extensions, error) { + // Prepare resulting map. + extensions := make(configmodels.Extensions) + + // Iterate over extensions and create a config for each. + for key, value := range exts { + componentConfig := viperFromStringMap(cast.ToStringMap(value)) + expandEnvConfig(componentConfig) + + // Decode the key into type and fullName components. + typeStr, fullName, err := DecodeTypeAndName(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(extensionsKeyName, key, err) + } + + // Find extension factory based on "type" that we read from config source. + factory := factories[typeStr] + if factory == nil { + return nil, errorUnknownType(extensionsKeyName, typeStr, fullName) + } + + // Create the default config for this extension + extensionCfg := factory.CreateDefaultConfig() + extensionCfg.SetName(fullName) + expandEnvLoadedConfig(extensionCfg) + + // Now that the default config struct is created we can Unmarshal into it + // and it will apply user-defined config on top of the default. + unm := unmarshaler(factory) + if err := unm(componentConfig, extensionCfg); err != nil { + return nil, errorUnmarshalError(extensionsKeyName, fullName, err) + } + + if extensions[fullName] != nil { + return nil, errorDuplicateName(extensionsKeyName, fullName) + } + + extensions[fullName] = extensionCfg + } + + return extensions, nil +} + +func loadService(rawService serviceSettings) (configmodels.Service, error) { + var ret configmodels.Service + ret.Extensions = rawService.Extensions + + // Process the pipelines first so in case of error on them it can be properly + // reported. + pipelines, err := loadPipelines(rawService.Pipelines) + ret.Pipelines = pipelines + + return ret, err +} + +// LoadReceiver loads a receiver config from componentConfig using the provided factories. +func LoadReceiver(componentConfig *viper.Viper, typeStr configmodels.Type, fullName string, factory component.ReceiverFactory) (configmodels.Receiver, error) { + // Create the default config for this receiver. + receiverCfg := factory.CreateDefaultConfig() + receiverCfg.SetName(fullName) + expandEnvLoadedConfig(receiverCfg) + + // Now that the default config struct is created we can Unmarshal into it + // and it will apply user-defined config on top of the default. + unm := unmarshaler(factory) + if err := unm(componentConfig, receiverCfg); err != nil { + return nil, errorUnmarshalError(receiversKeyName, fullName, err) + } + + return receiverCfg, nil +} + +func loadReceivers(recvs map[string]interface{}, factories map[configmodels.Type]component.ReceiverFactory) (configmodels.Receivers, error) { + // Prepare resulting map + receivers := make(configmodels.Receivers) + + // Iterate over input map and create a config for each. + for key, value := range recvs { + componentConfig := viperFromStringMap(cast.ToStringMap(value)) + expandEnvConfig(componentConfig) + + // Decode the key into type and fullName components. + typeStr, fullName, err := DecodeTypeAndName(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(receiversKeyName, key, err) + } + + // Find receiver factory based on "type" that we read from config source + factory := factories[typeStr] + if factory == nil { + return nil, errorUnknownType(receiversKeyName, typeStr, fullName) + } + + receiverCfg, err := LoadReceiver(componentConfig, typeStr, fullName, factory) + + if err != nil { + // LoadReceiver already wraps the error. + return nil, err + } + + if receivers[receiverCfg.Name()] != nil { + return nil, errorDuplicateName(receiversKeyName, fullName) + } + receivers[receiverCfg.Name()] = receiverCfg + } + + return receivers, nil +} + +func loadExporters(exps map[string]interface{}, factories map[configmodels.Type]component.ExporterFactory) (configmodels.Exporters, error) { + // Prepare resulting map + exporters := make(configmodels.Exporters) + + // Iterate over Exporters and create a config for each. + for key, value := range exps { + componentConfig := viperFromStringMap(cast.ToStringMap(value)) + expandEnvConfig(componentConfig) + + // Decode the key into type and fullName components. + typeStr, fullName, err := DecodeTypeAndName(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(exportersKeyName, key, err) + } + + // Find exporter factory based on "type" that we read from config source + factory := factories[typeStr] + if factory == nil { + return nil, errorUnknownType(exportersKeyName, typeStr, fullName) + } + + // Create the default config for this exporter + exporterCfg := factory.CreateDefaultConfig() + exporterCfg.SetName(fullName) + expandEnvLoadedConfig(exporterCfg) + + // Now that the default config struct is created we can Unmarshal into it + // and it will apply user-defined config on top of the default. + unm := unmarshaler(factory) + if err := unm(componentConfig, exporterCfg); err != nil { + return nil, errorUnmarshalError(exportersKeyName, fullName, err) + } + + if exporters[fullName] != nil { + return nil, errorDuplicateName(exportersKeyName, fullName) + } + + exporters[fullName] = exporterCfg + } + + return exporters, nil +} + +func loadProcessors(procs map[string]interface{}, factories map[configmodels.Type]component.ProcessorFactory) (configmodels.Processors, error) { + // Prepare resulting map. + processors := make(configmodels.Processors) + + // Iterate over processors and create a config for each. + for key, value := range procs { + componentConfig := viperFromStringMap(cast.ToStringMap(value)) + expandEnvConfig(componentConfig) + + // Decode the key into type and fullName components. + typeStr, fullName, err := DecodeTypeAndName(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(processorsKeyName, key, err) + } + + // Find processor factory based on "type" that we read from config source. + factory := factories[typeStr] + if factory == nil { + return nil, errorUnknownType(processorsKeyName, typeStr, fullName) + } + + // Create the default config for this processor. + processorCfg := factory.CreateDefaultConfig() + processorCfg.SetName(fullName) + expandEnvLoadedConfig(processorCfg) + + // Now that the default config struct is created we can Unmarshal into it + // and it will apply user-defined config on top of the default. + unm := unmarshaler(factory) + if err := unm(componentConfig, processorCfg); err != nil { + return nil, errorUnmarshalError(processorsKeyName, fullName, err) + } + + if processors[fullName] != nil { + return nil, errorDuplicateName(processorsKeyName, fullName) + } + + processors[fullName] = processorCfg + } + + return processors, nil +} + +func loadPipelines(pipelinesConfig map[string]pipelineSettings) (configmodels.Pipelines, error) { + // Prepare resulting map. + pipelines := make(configmodels.Pipelines) + + // Iterate over input map and create a config for each. + for key, rawPipeline := range pipelinesConfig { + // Decode the key into type and name components. + typeStr, fullName, err := DecodeTypeAndName(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(pipelinesKeyName, key, err) + } + + // Create the config for this pipeline. + var pipelineCfg configmodels.Pipeline + + // Set the type. + pipelineCfg.InputType = configmodels.DataType(typeStr) + switch pipelineCfg.InputType { + case configmodels.TracesDataType: + case configmodels.MetricsDataType: + case configmodels.LogsDataType: + default: + return nil, errorUnknownType(pipelinesKeyName, typeStr, fullName) + } + + pipelineCfg.Name = fullName + pipelineCfg.Receivers = rawPipeline.Receivers + pipelineCfg.Processors = rawPipeline.Processors + pipelineCfg.Exporters = rawPipeline.Exporters + + if pipelines[fullName] != nil { + return nil, errorDuplicateName(pipelinesKeyName, fullName) + } + + pipelines[fullName] = &pipelineCfg + } + + return pipelines, nil +} + +// ValidateConfig validates config. +func ValidateConfig(cfg *configmodels.Config, _ *zap.Logger) error { + // This function performs basic validation of configuration. There may be more subtle + // invalid cases that we currently don't check for but which we may want to add in + // the future (e.g. disallowing receiving and exporting on the same endpoint). + + if err := validateReceivers(cfg); err != nil { + return err + } + + if err := validateExporters(cfg); err != nil { + return err + } + + if err := validateService(cfg); err != nil { + return err + } + + return nil +} + +func validateService(cfg *configmodels.Config) error { + if err := validatePipelines(cfg); err != nil { + return err + } + + return validateServiceExtensions(cfg) +} + +func validateServiceExtensions(cfg *configmodels.Config) error { + if len(cfg.Service.Extensions) == 0 { + return nil + } + + // Validate extensions. + for _, ref := range cfg.Service.Extensions { + // Check that the name referenced in the Service extensions exists in the top-level extensions + if cfg.Extensions[ref] == nil { + return &configError{ + code: errExtensionNotExists, + msg: fmt.Sprintf("Service references extension %q which does not exist", ref), + } + } + } + + return nil +} + +func validatePipelines(cfg *configmodels.Config) error { + // Must have at least one pipeline. + if len(cfg.Service.Pipelines) == 0 { + return &configError{code: errMissingPipelines, msg: "must have at least one pipeline"} + } + + // Validate pipelines. + for _, pipeline := range cfg.Service.Pipelines { + if err := validatePipeline(cfg, pipeline); err != nil { + return err + } + } + return nil +} + +func validatePipeline(cfg *configmodels.Config, pipeline *configmodels.Pipeline) error { + if err := validatePipelineReceivers(cfg, pipeline); err != nil { + return err + } + + if err := validatePipelineExporters(cfg, pipeline); err != nil { + return err + } + + if err := validatePipelineProcessors(cfg, pipeline); err != nil { + return err + } + + return nil +} + +func validatePipelineReceivers(cfg *configmodels.Config, pipeline *configmodels.Pipeline) error { + if len(pipeline.Receivers) == 0 { + return &configError{ + code: errPipelineMustHaveReceiver, + msg: fmt.Sprintf("pipeline %q must have at least one receiver", pipeline.Name), + } + } + + // Validate pipeline receiver name references. + for _, ref := range pipeline.Receivers { + // Check that the name referenced in the pipeline's receivers exists in the top-level receivers + if cfg.Receivers[ref] == nil { + return &configError{ + code: errPipelineReceiverNotExists, + msg: fmt.Sprintf("pipeline %q references receiver %q which does not exist", pipeline.Name, ref), + } + } + } + + return nil +} + +func validatePipelineExporters(cfg *configmodels.Config, pipeline *configmodels.Pipeline) error { + if len(pipeline.Exporters) == 0 { + return &configError{ + code: errPipelineMustHaveExporter, + msg: fmt.Sprintf("pipeline %q must have at least one exporter", pipeline.Name), + } + } + + // Validate pipeline exporter name references. + for _, ref := range pipeline.Exporters { + // Check that the name referenced in the pipeline's Exporters exists in the top-level Exporters + if cfg.Exporters[ref] == nil { + return &configError{ + code: errPipelineExporterNotExists, + msg: fmt.Sprintf("pipeline %q references exporter %q which does not exist", pipeline.Name, ref), + } + } + } + + return nil +} + +func validatePipelineProcessors(cfg *configmodels.Config, pipeline *configmodels.Pipeline) error { + if len(pipeline.Processors) == 0 { + return nil + } + + // Validate pipeline processor name references + for _, ref := range pipeline.Processors { + // Check that the name referenced in the pipeline's processors exists in the top-level processors. + if cfg.Processors[ref] == nil { + return &configError{ + code: errPipelineProcessorNotExists, + msg: fmt.Sprintf("pipeline %q references processor %s which does not exist", pipeline.Name, ref), + } + } + } + + return nil +} + +func validateReceivers(cfg *configmodels.Config) error { + // Currently there is no default receiver enabled. The configuration must specify at least one enabled receiver to + // be valid. + if len(cfg.Receivers) == 0 { + return &configError{ + code: errMissingReceivers, + msg: "no enabled receivers specified in config", + } + } + return nil +} + +func validateExporters(cfg *configmodels.Config) error { + // There must be at least one enabled exporter to be considered a valid configuration. + if len(cfg.Exporters) == 0 { + return &configError{ + code: errMissingExporters, + msg: "no enabled exporters specified in config", + } + } + return nil +} + +// expandEnvConfig creates a new viper config with expanded values for all the values (simple, list or map value). +// It does not expand the keys. +func expandEnvConfig(v *viper.Viper) { + for _, k := range v.AllKeys() { + v.Set(k, expandStringValues(v.Get(k))) + } +} + +func expandStringValues(value interface{}) interface{} { + switch v := value.(type) { + default: + return v + case string: + return expandEnv(v) + case []interface{}: + nslice := make([]interface{}, 0, len(v)) + for _, vint := range v { + nslice = append(nslice, expandStringValues(vint)) + } + return nslice + case map[interface{}]interface{}: + nmap := make(map[interface{}]interface{}, len(v)) + for k, vint := range v { + nmap[k] = expandStringValues(vint) + } + return nmap + } +} + +// expandEnvLoadedConfig is a utility function that goes recursively through a config object +// and tries to expand environment variables in its string fields. +func expandEnvLoadedConfig(s interface{}) { + expandEnvLoadedConfigPointer(s) +} + +func expandEnvLoadedConfigPointer(s interface{}) { + // Check that the value given is indeed a pointer, otherwise safely stop the search here + value := reflect.ValueOf(s) + if value.Kind() != reflect.Ptr { + return + } + // Run expandLoadedConfigValue on the value behind the pointer + expandEnvLoadedConfigValue(value.Elem()) +} + +func expandEnvLoadedConfigValue(value reflect.Value) { + // The value given is a string, we expand it (if allowed) + if value.Kind() == reflect.String && value.CanSet() { + value.SetString(expandEnv(value.String())) + } + // The value given is a struct, we go through its fields + if value.Kind() == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + field := value.Field(i) // Returns the content of the field + if field.CanSet() { // Only try to modify a field if it can be modified (eg. skip unexported private fields) + switch field.Kind() { + case reflect.String: // The current field is a string, we want to expand it + field.SetString(expandEnv(field.String())) // Expand env variables in the string + case reflect.Ptr: // The current field is a pointer + expandEnvLoadedConfigPointer(field.Interface()) // Run the expansion function on the pointer + case reflect.Struct: // The current field is a nested struct + expandEnvLoadedConfigValue(field) // Go through the nested struct + } + } + } + } +} + +func expandEnv(s string) string { + return os.Expand(s, func(str string) string { + // This allows escaping environment variable substitution via $$, e.g. + // - $FOO will be substituted with env var FOO + // - $$FOO will be replaced with $FOO + // - $$$FOO will be replaced with $ + substituted env var FOO + if str == "$" { + return "$" + } + return os.Getenv(str) + }) +} + +func unmarshaler(factory component.Factory) component.CustomUnmarshaler { + if fu, ok := factory.(component.ConfigUnmarshaler); ok { + return fu.Unmarshal + } + + if du, ok := factory.(deprecatedUnmarshaler); ok { + cu := du.CustomUnmarshaler() + if cu != nil { + return cu + } + } + + return defaultUnmarshaler +} + +func defaultUnmarshaler(componentViperSection *viper.Viper, intoCfg interface{}) error { + return componentViperSection.UnmarshalExact(intoCfg) +} + +// Copied from the Viper but changed to use the same delimiter +// and return error if the sub is not a map. +// See https://github.com/spf13/viper/issues/871 +func ViperSubExact(v *viper.Viper, key string) (*viper.Viper, error) { + data := v.Get(key) + if data == nil { + return NewViper(), nil + } + + if reflect.TypeOf(data).Kind() == reflect.Map { + subv := NewViper() + // Cannot return error because the subv is empty. + _ = subv.MergeConfigMap(cast.ToStringMap(data)) + return subv, nil + } + return nil, &configError{ + code: errInvalidSubConfig, + msg: fmt.Sprintf("unexpected sub-config value kind for key:%s value:%v kind:%v)", key, data, reflect.TypeOf(data).Kind()), + } +} + +func viperFromStringMap(data map[string]interface{}) *viper.Viper { + v := NewViper() + // Cannot return error because the subv is empty. + _ = v.MergeConfigMap(cast.ToStringMap(data)) + return v +} diff --git a/internal/otel_collector/config/config_test.go b/internal/otel_collector/config/config_test.go new file mode 100644 index 00000000000..e0ed1589c90 --- /dev/null +++ b/internal/otel_collector/config/config_test.go @@ -0,0 +1,797 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "os" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" +) + +func TestDecodeConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + // Load the config + config, err := loadConfigFile(t, path.Join(".", "testdata", "valid-config.yaml"), factories) + require.NoError(t, err, "Unable to load config") + + // Verify extensions. + assert.Equal(t, 3, len(config.Extensions)) + assert.Equal(t, "some string", config.Extensions["exampleextension/1"].(*componenttest.ExampleExtensionCfg).ExtraSetting) + + // Verify service. + assert.Equal(t, 2, len(config.Service.Extensions)) + assert.Equal(t, "exampleextension/0", config.Service.Extensions[0]) + assert.Equal(t, "exampleextension/1", config.Service.Extensions[1]) + + // Verify receivers + assert.Equal(t, 2, len(config.Receivers), "Incorrect receivers count") + + assert.Equal(t, + &componenttest.ExampleReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: "examplereceiver", + NameVal: "examplereceiver", + }, + TCPAddr: confignet.TCPAddr{ + Endpoint: "localhost:1000", + }, + ExtraSetting: "some string", + }, + config.Receivers["examplereceiver"], + "Did not load receiver config correctly") + + assert.Equal(t, + &componenttest.ExampleReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: "examplereceiver", + NameVal: "examplereceiver/myreceiver", + }, + TCPAddr: confignet.TCPAddr{ + Endpoint: "localhost:12345", + }, + ExtraSetting: "some string", + }, + config.Receivers["examplereceiver/myreceiver"], + "Did not load receiver config correctly") + + // Verify exporters + assert.Equal(t, 2, len(config.Exporters), "Incorrect exporters count") + + assert.Equal(t, + &componenttest.ExampleExporter{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "exampleexporter", + TypeVal: "exampleexporter", + }, + ExtraSetting: "some export string", + }, + config.Exporters["exampleexporter"], + "Did not load exporter config correctly") + + assert.Equal(t, + &componenttest.ExampleExporter{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "exampleexporter/myexporter", + TypeVal: "exampleexporter", + }, + ExtraSetting: "some export string 2", + }, + config.Exporters["exampleexporter/myexporter"], + "Did not load exporter config correctly") + + // Verify Processors + assert.Equal(t, 1, len(config.Processors), "Incorrect processors count") + + assert.Equal(t, + &componenttest.ExampleProcessorCfg{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "exampleprocessor", + NameVal: "exampleprocessor", + }, + ExtraSetting: "some export string", + }, + config.Processors["exampleprocessor"], + "Did not load processor config correctly") + + // Verify Pipelines + assert.Equal(t, 1, len(config.Service.Pipelines), "Incorrect pipelines count") + + assert.Equal(t, + &configmodels.Pipeline{ + Name: "traces", + InputType: configmodels.TracesDataType, + Receivers: []string{"examplereceiver"}, + Processors: []string{"exampleprocessor"}, + Exporters: []string{"exampleexporter"}, + }, + config.Service.Pipelines["traces"], + "Did not load pipeline config correctly") +} + +func TestSimpleConfig(t *testing.T) { + var testCases = []struct { + name string // test case name (also file name containing config yaml) + }{ + {name: "simple-config-with-no-env"}, + {name: "simple-config-with-partial-env"}, + {name: "simple-config-with-all-env"}, + } + + const extensionExtra = "some extension string" + const extensionExtraMapValue = "some extension map value" + const extensionExtraListElement = "some extension list value" + assert.NoError(t, os.Setenv("EXTENSIONS_EXAMPLEEXTENSION_EXTRA", extensionExtra)) + assert.NoError(t, os.Setenv("EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_1", extensionExtraMapValue+"_1")) + assert.NoError(t, os.Setenv("EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_2", extensionExtraMapValue+"_2")) + assert.NoError(t, os.Setenv("EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_1", extensionExtraListElement+"_1")) + assert.NoError(t, os.Setenv("EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_2", extensionExtraListElement+"_2")) + + const receiverExtra = "some receiver string" + const receiverExtraMapValue = "some receiver map value" + const receiverExtraListElement = "some receiver list value" + assert.NoError(t, os.Setenv("RECEIVERS_EXAMPLERECEIVER_EXTRA", receiverExtra)) + assert.NoError(t, os.Setenv("RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_1", receiverExtraMapValue+"_1")) + assert.NoError(t, os.Setenv("RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_2", receiverExtraMapValue+"_2")) + assert.NoError(t, os.Setenv("RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_1", receiverExtraListElement+"_1")) + assert.NoError(t, os.Setenv("RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_2", receiverExtraListElement+"_2")) + + const processorExtra = "some processor string" + const processorExtraMapValue = "some processor map value" + const processorExtraListElement = "some processor list value" + assert.NoError(t, os.Setenv("PROCESSORS_EXAMPLEPROCESSOR_EXTRA", processorExtra)) + assert.NoError(t, os.Setenv("PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_1", processorExtraMapValue+"_1")) + assert.NoError(t, os.Setenv("PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_2", processorExtraMapValue+"_2")) + assert.NoError(t, os.Setenv("PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1", processorExtraListElement+"_1")) + assert.NoError(t, os.Setenv("PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_2", processorExtraListElement+"_2")) + + const exporterExtra = "some exporter string" + const exporterExtraMapValue = "some exporter map value" + const exporterExtraListElement = "some exporter list value" + assert.NoError(t, os.Setenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA_INT", "65")) + assert.NoError(t, os.Setenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA", exporterExtra)) + assert.NoError(t, os.Setenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_1", exporterExtraMapValue+"_1")) + assert.NoError(t, os.Setenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_2", exporterExtraMapValue+"_2")) + assert.NoError(t, os.Setenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1", exporterExtraListElement+"_1")) + assert.NoError(t, os.Setenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_2", exporterExtraListElement+"_2")) + + defer func() { + assert.NoError(t, os.Unsetenv("EXTENSIONS_EXAMPLEEXTENSION_EXTRA")) + assert.NoError(t, os.Unsetenv("EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE")) + assert.NoError(t, os.Unsetenv("EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_1")) + + assert.NoError(t, os.Unsetenv("RECEIVERS_EXAMPLERECEIVER_EXTRA")) + assert.NoError(t, os.Unsetenv("RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE")) + assert.NoError(t, os.Unsetenv("RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_1")) + + assert.NoError(t, os.Unsetenv("PROCESSORS_EXAMPLEPROCESSOR_EXTRA")) + assert.NoError(t, os.Unsetenv("PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE")) + assert.NoError(t, os.Unsetenv("PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1")) + + assert.NoError(t, os.Unsetenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA_INT")) + assert.NoError(t, os.Unsetenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA")) + assert.NoError(t, os.Unsetenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE")) + assert.NoError(t, os.Unsetenv("EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1")) + }() + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + // Load the config + config, err := loadConfigFile(t, path.Join(".", "testdata", test.name+".yaml"), factories) + require.NoError(t, err, "Unable to load config") + + // Verify extensions. + assert.Equalf(t, 1, len(config.Extensions), "TEST[%s]", test.name) + assert.Equalf(t, + &componenttest.ExampleExtensionCfg{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: "exampleextension", + NameVal: "exampleextension", + }, + ExtraSetting: extensionExtra, + ExtraMapSetting: map[string]string{"ext-1": extensionExtraMapValue + "_1", "ext-2": extensionExtraMapValue + "_2"}, + ExtraListSetting: []string{extensionExtraListElement + "_1", extensionExtraListElement + "_2"}, + }, + config.Extensions["exampleextension"], + "TEST[%s] Did not load extension config correctly", test.name) + + // Verify service. + assert.Equalf(t, 1, len(config.Service.Extensions), "TEST[%s]", test.name) + assert.Equalf(t, "exampleextension", config.Service.Extensions[0], "TEST[%s]", test.name) + + // Verify receivers + assert.Equalf(t, 1, len(config.Receivers), "TEST[%s]", test.name) + + assert.Equalf(t, + &componenttest.ExampleReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: "examplereceiver", + NameVal: "examplereceiver", + }, + TCPAddr: confignet.TCPAddr{ + Endpoint: "localhost:1234", + }, + ExtraSetting: receiverExtra, + ExtraMapSetting: map[string]string{"recv.1": receiverExtraMapValue + "_1", "recv.2": receiverExtraMapValue + "_2"}, + ExtraListSetting: []string{receiverExtraListElement + "_1", receiverExtraListElement + "_2"}, + }, + config.Receivers["examplereceiver"], + "TEST[%s] Did not load receiver config correctly", test.name) + + // Verify exporters + assert.Equalf(t, 1, len(config.Exporters), "TEST[%s]", test.name) + + assert.Equalf(t, + &componenttest.ExampleExporter{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "exampleexporter", + TypeVal: "exampleexporter", + }, + ExtraInt: 65, + ExtraSetting: exporterExtra, + ExtraMapSetting: map[string]string{"exp_1": exporterExtraMapValue + "_1", "exp_2": exporterExtraMapValue + "_2"}, + ExtraListSetting: []string{exporterExtraListElement + "_1", exporterExtraListElement + "_2"}, + }, + config.Exporters["exampleexporter"], + "TEST[%s] Did not load exporter config correctly", test.name) + + // Verify Processors + assert.Equalf(t, 1, len(config.Processors), "TEST[%s]", test.name) + + assert.Equalf(t, + &componenttest.ExampleProcessorCfg{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "exampleprocessor", + NameVal: "exampleprocessor", + }, + ExtraSetting: processorExtra, + ExtraMapSetting: map[string]string{"proc_1": processorExtraMapValue + "_1", "proc_2": processorExtraMapValue + "_2"}, + ExtraListSetting: []string{processorExtraListElement + "_1", processorExtraListElement + "_2"}, + }, + config.Processors["exampleprocessor"], + "TEST[%s] Did not load processor config correctly", test.name) + + // Verify Pipelines + assert.Equalf(t, 1, len(config.Service.Pipelines), "TEST[%s]", test.name) + + assert.Equalf(t, + &configmodels.Pipeline{ + Name: "traces", + InputType: configmodels.TracesDataType, + Receivers: []string{"examplereceiver"}, + Processors: []string{"exampleprocessor"}, + Exporters: []string{"exampleexporter"}, + }, + config.Service.Pipelines["traces"], + "TEST[%s] Did not load pipeline config correctly", test.name) + }) + } +} + +func TestEscapedEnvVars(t *testing.T) { + const receiverExtraMapValue = "some receiver map value" + assert.NoError(t, os.Setenv("RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_2", receiverExtraMapValue)) + defer func() { + assert.NoError(t, os.Unsetenv("RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_2")) + }() + + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + // Load the config + config, err := loadConfigFile(t, path.Join(".", "testdata", "simple-config-with-escaped-env.yaml"), factories) + require.NoError(t, err, "Unable to load config") + + // Verify extensions. + assert.Equal(t, 1, len(config.Extensions)) + assert.Equal(t, + &componenttest.ExampleExtensionCfg{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: "exampleextension", + NameVal: "exampleextension", + }, + ExtraSetting: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA}", + ExtraMapSetting: map[string]string{"ext-1": "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_1}", "ext-2": "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_2}"}, + ExtraListSetting: []string{"${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_1}", "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_2}"}, + }, + config.Extensions["exampleextension"], + "Did not load extension config correctly") + + // Verify service. + assert.Equal(t, 1, len(config.Service.Extensions)) + assert.Equal(t, "exampleextension", config.Service.Extensions[0]) + + // Verify receivers + assert.Equal(t, 1, len(config.Receivers)) + + assert.Equal(t, + &componenttest.ExampleReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: "examplereceiver", + NameVal: "examplereceiver", + }, + TCPAddr: confignet.TCPAddr{ + Endpoint: "localhost:1234", + }, + ExtraSetting: "$RECEIVERS_EXAMPLERECEIVER_EXTRA", + ExtraMapSetting: map[string]string{ + // $$ -> escaped $ + "recv.1": "$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_1", + // $$$ -> escaped $ + substituted env var + "recv.2": "$" + receiverExtraMapValue, + // $$$$ -> two escaped $ + "recv.3": "$$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_3", + // escaped $ in the middle + "recv.4": "some${RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_4}text", + // $$$$ -> two escaped $ + "recv.5": "${ONE}${TWO}", + // trailing escaped $ + "recv.6": "text$", + // escaped $ alone + "recv.7": "$", + }, + ExtraListSetting: []string{"$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_1", "$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_2"}, + }, + config.Receivers["examplereceiver"], + "Did not load receiver config correctly") + + // Verify exporters + assert.Equal(t, 1, len(config.Exporters)) + + assert.Equal(t, + &componenttest.ExampleExporter{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "exampleexporter", + TypeVal: "exampleexporter", + }, + ExtraSetting: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA}", + ExtraMapSetting: map[string]string{"exp_1": "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_1}", "exp_2": "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_2}"}, + ExtraListSetting: []string{"${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1}", "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_2}"}, + }, + config.Exporters["exampleexporter"], + "Did not load exporter config correctly") + + // Verify Processors + assert.Equal(t, 1, len(config.Processors)) + + assert.Equal(t, + &componenttest.ExampleProcessorCfg{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "exampleprocessor", + NameVal: "exampleprocessor", + }, + ExtraSetting: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA", + ExtraMapSetting: map[string]string{"proc_1": "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_1", "proc_2": "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_2"}, + ExtraListSetting: []string{"$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1", "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_2"}, + }, + config.Processors["exampleprocessor"], + "Did not load processor config correctly") + + // Verify Pipelines + assert.Equal(t, 1, len(config.Service.Pipelines)) + + assert.Equal(t, + &configmodels.Pipeline{ + Name: "traces", + InputType: configmodels.TracesDataType, + Receivers: []string{"examplereceiver"}, + Processors: []string{"exampleprocessor"}, + Exporters: []string{"exampleexporter"}, + }, + config.Service.Pipelines["traces"], + "Did not load pipeline config correctly") +} + +func TestDecodeConfig_MultiProto(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + // Load the config + config, err := loadConfigFile(t, path.Join(".", "testdata", "multiproto-config.yaml"), factories) + require.NoError(t, err, "Unable to load config") + + // Verify receivers + assert.Equal(t, 2, len(config.Receivers), "Incorrect receivers count") + + assert.Equal(t, + &componenttest.MultiProtoReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: "multireceiver", + NameVal: "multireceiver", + }, + Protocols: map[string]componenttest.MultiProtoReceiverOneCfg{ + "http": { + Endpoint: "example.com:8888", + ExtraSetting: "extra string 1", + }, + "tcp": { + Endpoint: "omnition.com:9999", + ExtraSetting: "extra string 2", + }, + }, + }, + config.Receivers["multireceiver"], + "Did not load receiver config correctly") + + assert.Equal(t, + &componenttest.MultiProtoReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: "multireceiver", + NameVal: "multireceiver/myreceiver", + }, + Protocols: map[string]componenttest.MultiProtoReceiverOneCfg{ + "http": { + Endpoint: "localhost:12345", + ExtraSetting: "some string 1", + }, + "tcp": { + Endpoint: "0.0.0.0:4567", + ExtraSetting: "some string 2", + }, + }, + }, + config.Receivers["multireceiver/myreceiver"], + "Did not load receiver config correctly") +} + +func TestDecodeConfig_Invalid(t *testing.T) { + + var testCases = []struct { + name string // test case name (also file name containing config yaml) + expected configErrorCode // expected error (if nil any error is acceptable) + expectedMessage string // string that the error must contain + }{ + {name: "empty-config"}, + {name: "missing-all-sections"}, + {name: "missing-exporters", expected: errMissingExporters}, + {name: "missing-receivers", expected: errMissingReceivers}, + {name: "missing-processors"}, + {name: "invalid-extension-name", expected: errExtensionNotExists}, + {name: "invalid-receiver-name"}, + {name: "invalid-receiver-reference", expected: errPipelineReceiverNotExists}, + {name: "missing-extension-type", expected: errInvalidTypeAndNameKey}, + {name: "missing-receiver-type", expected: errInvalidTypeAndNameKey}, + {name: "missing-exporter-name-after-slash", expected: errInvalidTypeAndNameKey}, + {name: "missing-processor-type", expected: errInvalidTypeAndNameKey}, + {name: "missing-pipelines", expected: errMissingPipelines}, + {name: "pipeline-must-have-exporter", expected: errPipelineMustHaveExporter}, + {name: "pipeline-must-have-exporter2", expected: errPipelineMustHaveExporter}, + {name: "pipeline-must-have-receiver", expected: errPipelineMustHaveReceiver}, + {name: "pipeline-must-have-receiver2", expected: errPipelineMustHaveReceiver}, + {name: "pipeline-exporter-not-exists", expected: errPipelineExporterNotExists}, + {name: "pipeline-processor-not-exists", expected: errPipelineProcessorNotExists}, + {name: "unknown-extension-type", expected: errUnknownType, expectedMessage: "extensions"}, + {name: "unknown-receiver-type", expected: errUnknownType, expectedMessage: "receivers"}, + {name: "unknown-exporter-type", expected: errUnknownType, expectedMessage: "exporters"}, + {name: "unknown-processor-type", expected: errUnknownType, expectedMessage: "processors"}, + {name: "invalid-service-extensions-value", expected: errUnmarshalTopLevelStructureError, expectedMessage: "service"}, + {name: "invalid-sequence-value", expected: errUnmarshalTopLevelStructureError, expectedMessage: "pipelines"}, + {name: "invalid-pipeline-type", expected: errUnknownType, expectedMessage: "pipelines"}, + {name: "invalid-pipeline-type-and-name", expected: errInvalidTypeAndNameKey}, + {name: "duplicate-extension", expected: errDuplicateName, expectedMessage: "extensions"}, + {name: "duplicate-receiver", expected: errDuplicateName, expectedMessage: "receivers"}, + {name: "duplicate-exporter", expected: errDuplicateName, expectedMessage: "exporters"}, + {name: "duplicate-processor", expected: errDuplicateName, expectedMessage: "processors"}, + {name: "duplicate-pipeline", expected: errDuplicateName, expectedMessage: "pipelines"}, + {name: "invalid-top-level-section", expected: errUnmarshalTopLevelStructureError, expectedMessage: "top level"}, + {name: "invalid-extension-section", expected: errUnmarshalTopLevelStructureError, expectedMessage: "extensions"}, + {name: "invalid-service-section", expected: errUnmarshalTopLevelStructureError, expectedMessage: "service"}, + {name: "invalid-receiver-section", expected: errUnmarshalTopLevelStructureError, expectedMessage: "receivers"}, + {name: "invalid-processor-section", expected: errUnmarshalTopLevelStructureError, expectedMessage: "processors"}, + {name: "invalid-exporter-section", expected: errUnmarshalTopLevelStructureError, expectedMessage: "exporters"}, + {name: "invalid-pipeline-section", expected: errUnmarshalTopLevelStructureError, expectedMessage: "pipelines"}, + {name: "invalid-extension-sub-config", expected: errUnmarshalTopLevelStructureError}, + {name: "invalid-exporter-sub-config", expected: errUnmarshalTopLevelStructureError}, + {name: "invalid-processor-sub-config", expected: errUnmarshalTopLevelStructureError}, + {name: "invalid-receiver-sub-config", expected: errUnmarshalTopLevelStructureError}, + {name: "invalid-pipeline-sub-config", expected: errUnmarshalTopLevelStructureError}, + } + + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + _, err := loadConfigFile(t, path.Join(".", "testdata", test.name+".yaml"), factories) + if err == nil { + t.Error("expected error but succeeded") + } else if test.expected != 0 { + cfgErr, ok := err.(*configError) + if !ok { + t.Errorf("expected config error code %v but got a different error '%v'", test.expected, err) + } else { + assert.Equal(t, test.expected, cfgErr.code, err) + if test.expectedMessage != "" { + assert.Contains(t, cfgErr.Error(), test.expectedMessage) + } + assert.NotEmpty(t, cfgErr.Error(), "returned config error %v with empty error message", cfgErr.code) + } + } + }) + } +} + +func TestLoadEmptyConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + // Open the file for reading. + file, err := os.Open(path.Join(".", "testdata", "empty-config.yaml")) + require.NoError(t, err) + + defer func() { + require.NoError(t, file.Close()) + }() + + // Read yaml config from file + v := NewViper() + v.SetConfigType("yaml") + require.NoError(t, v.ReadConfig(file)) + + // Load the config from viper using the given factories. + _, err = Load(v, factories) + assert.NoError(t, err) +} + +func loadConfigFile(t *testing.T, fileName string, factories component.Factories) (*configmodels.Config, error) { + // Read yaml config from file + v := NewViper() + v.SetConfigFile(fileName) + require.NoErrorf(t, v.ReadInConfig(), "unable to read the file %v", fileName) + + // Load the config from viper using the given factories. + cfg, err := Load(v, factories) + if err != nil { + return nil, err + } + return cfg, ValidateConfig(cfg, zap.NewNop()) +} + +type nestedConfig struct { + NestedStringValue string + NestedIntValue int +} + +type testConfig struct { + configmodels.ExporterSettings + + NestedConfigPtr *nestedConfig + NestedConfigValue nestedConfig + StringValue string + StringPtrValue *string + IntValue int +} + +func TestExpandEnvLoadedConfig(t *testing.T) { + assert.NoError(t, os.Setenv("NESTED_VALUE", "replaced_nested_value")) + assert.NoError(t, os.Setenv("VALUE", "replaced_value")) + assert.NoError(t, os.Setenv("PTR_VALUE", "replaced_ptr_value")) + + defer func() { + assert.NoError(t, os.Unsetenv("NESTED_VALUE")) + assert.NoError(t, os.Unsetenv("VALUE")) + assert.NoError(t, os.Unsetenv("PTR_VALUE")) + }() + + testString := "$PTR_VALUE" + + config := &testConfig{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: configmodels.Type("test"), + NameVal: "test", + }, + NestedConfigPtr: &nestedConfig{ + NestedStringValue: "$NESTED_VALUE", + NestedIntValue: 1, + }, + NestedConfigValue: nestedConfig{ + NestedStringValue: "$NESTED_VALUE", + NestedIntValue: 2, + }, + StringValue: "$VALUE", + StringPtrValue: &testString, + IntValue: 3, + } + + expandEnvLoadedConfig(config) + + replacedTestString := "replaced_ptr_value" + + assert.Equal(t, &testConfig{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: configmodels.Type("test"), + NameVal: "test", + }, + NestedConfigPtr: &nestedConfig{ + NestedStringValue: "replaced_nested_value", + NestedIntValue: 1, + }, + NestedConfigValue: nestedConfig{ + NestedStringValue: "replaced_nested_value", + NestedIntValue: 2, + }, + StringValue: "replaced_value", + StringPtrValue: &replacedTestString, + IntValue: 3, + }, config) +} + +func TestExpandEnvLoadedConfigEscapedEnv(t *testing.T) { + assert.NoError(t, os.Setenv("NESTED_VALUE", "replaced_nested_value")) + assert.NoError(t, os.Setenv("ESCAPED_VALUE", "replaced_escaped_value")) + assert.NoError(t, os.Setenv("ESCAPED_PTR_VALUE", "replaced_escaped_pointer_value")) + + defer func() { + assert.NoError(t, os.Unsetenv("NESTED_VALUE")) + assert.NoError(t, os.Unsetenv("ESCAPED_VALUE")) + assert.NoError(t, os.Unsetenv("ESCAPED_PTR_VALUE")) + }() + + testString := "$$ESCAPED_PTR_VALUE" + + config := &testConfig{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: configmodels.Type("test"), + NameVal: "test", + }, + NestedConfigPtr: &nestedConfig{ + NestedStringValue: "$NESTED_VALUE", + NestedIntValue: 1, + }, + NestedConfigValue: nestedConfig{ + NestedStringValue: "$NESTED_VALUE", + NestedIntValue: 2, + }, + StringValue: "$$ESCAPED_VALUE", + StringPtrValue: &testString, + IntValue: 3, + } + + expandEnvLoadedConfig(config) + + replacedTestString := "$ESCAPED_PTR_VALUE" + + assert.Equal(t, &testConfig{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: configmodels.Type("test"), + NameVal: "test", + }, + NestedConfigPtr: &nestedConfig{ + NestedStringValue: "replaced_nested_value", + NestedIntValue: 1, + }, + NestedConfigValue: nestedConfig{ + NestedStringValue: "replaced_nested_value", + NestedIntValue: 2, + }, + StringValue: "$ESCAPED_VALUE", + StringPtrValue: &replacedTestString, + IntValue: 3, + }, config) +} + +func TestExpandEnvLoadedConfigMissingEnv(t *testing.T) { + assert.NoError(t, os.Setenv("NESTED_VALUE", "replaced_nested_value")) + + defer func() { + assert.NoError(t, os.Unsetenv("NESTED_VALUE")) + }() + + testString := "$PTR_VALUE" + + config := &testConfig{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: configmodels.Type("test"), + NameVal: "test", + }, + NestedConfigPtr: &nestedConfig{ + NestedStringValue: "$NESTED_VALUE", + NestedIntValue: 1, + }, + NestedConfigValue: nestedConfig{ + NestedStringValue: "$NESTED_VALUE", + NestedIntValue: 2, + }, + StringValue: "$VALUE", + StringPtrValue: &testString, + IntValue: 3, + } + + expandEnvLoadedConfig(config) + + replacedTestString := "" + + assert.Equal(t, &testConfig{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: configmodels.Type("test"), + NameVal: "test", + }, + NestedConfigPtr: &nestedConfig{ + NestedStringValue: "replaced_nested_value", + NestedIntValue: 1, + }, + NestedConfigValue: nestedConfig{ + NestedStringValue: "replaced_nested_value", + NestedIntValue: 2, + }, + StringValue: "", + StringPtrValue: &replacedTestString, + IntValue: 3, + }, config) +} + +func TestExpandEnvLoadedConfigNil(t *testing.T) { + var config *testConfig + + // This should safely do nothing + expandEnvLoadedConfig(config) + + assert.Equal(t, (*testConfig)(nil), config) +} + +func TestExpandEnvLoadedConfigNoPointer(t *testing.T) { + assert.NoError(t, os.Setenv("VALUE", "replaced_value")) + + config := testConfig{ + StringValue: "$VALUE", + } + + // This should do nothing as config is not a pointer + expandEnvLoadedConfig(config) + + assert.Equal(t, testConfig{ + StringValue: "$VALUE", + }, config) +} + +type testUnexportedConfig struct { + configmodels.ExporterSettings + + unexportedStringValue string + ExportedStringValue string +} + +func TestExpandEnvLoadedConfigUnexportedField(t *testing.T) { + assert.NoError(t, os.Setenv("VALUE", "replaced_value")) + + defer func() { + assert.NoError(t, os.Unsetenv("VALUE")) + }() + + config := &testUnexportedConfig{ + unexportedStringValue: "$VALUE", + ExportedStringValue: "$VALUE", + } + + expandEnvLoadedConfig(config) + + assert.Equal(t, &testUnexportedConfig{ + unexportedStringValue: "$VALUE", + ExportedStringValue: "replaced_value", + }, config) +} diff --git a/internal/otel_collector/config/configauth/README.md b/internal/otel_collector/config/configauth/README.md new file mode 100644 index 00000000000..18331b1a0fd --- /dev/null +++ b/internal/otel_collector/config/configauth/README.md @@ -0,0 +1,17 @@ +# Authentication configuration for receivers + +This module allows server types, such as gRPC and HTTP, to be configured to perform authentication for requests and/or RPCs. Each server type is responsible for getting the request/RPC metadata and passing down to the authenticator. Currently, only bearer token authentication is supported, although the module is ready to accept new authenticators. + +Examples: +```yaml +receivers: + somereceiver: + grpc: + authentication: + attribute: authorization + oidc: + issuer_url: https://auth.example.com/ + issuer_ca_path: /etc/pki/tls/cert.pem + client_id: my-oidc-client + username_claim: email +``` diff --git a/internal/otel_collector/config/configauth/authenticator.go b/internal/otel_collector/config/configauth/authenticator.go new file mode 100644 index 00000000000..62325a3e91e --- /dev/null +++ b/internal/otel_collector/config/configauth/authenticator.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + "errors" + "io" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +var ( + errNoOIDCProvided = errors.New("no OIDC information provided") + errMetadataNotFound = errors.New("no request metadata found") + defaultAttribute = "authorization" +) + +// Authenticator will authenticate the incoming request/RPC +type Authenticator interface { + io.Closer + + // Authenticate checks whether the given context contains valid auth data. Successfully authenticated calls will always return a nil error and a context with the auth data. + Authenticate(context.Context, map[string][]string) (context.Context, error) + + // Start will + Start(context.Context) error + + // UnaryInterceptor is a helper method to provide a gRPC-compatible UnaryInterceptor, typically calling the authenticator's Authenticate method. + UnaryInterceptor(context.Context, interface{}, *grpc.UnaryServerInfo, grpc.UnaryHandler) (interface{}, error) + + // StreamInterceptor is a helper method to provide a gRPC-compatible StreamInterceptor, typically calling the authenticator's Authenticate method. + StreamInterceptor(interface{}, grpc.ServerStream, *grpc.StreamServerInfo, grpc.StreamHandler) error +} + +type authenticateFunc func(context.Context, map[string][]string) (context.Context, error) +type unaryInterceptorFunc func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, authenticate authenticateFunc) (interface{}, error) +type streamInterceptorFunc func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler, authenticate authenticateFunc) error + +// NewAuthenticator creates an authenticator based on the given configuration +func NewAuthenticator(cfg Authentication) (Authenticator, error) { + if cfg.OIDC == nil { + return nil, errNoOIDCProvided + } + + if len(cfg.Attribute) == 0 { + cfg.Attribute = defaultAttribute + } + + return newOIDCAuthenticator(cfg) +} + +func defaultUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, authenticate authenticateFunc) (interface{}, error) { + headers, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMetadataNotFound + } + + ctx, err := authenticate(ctx, headers) + if err != nil { + return nil, err + } + + return handler(ctx, req) +} + +func defaultStreamInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler, authenticate authenticateFunc) error { + ctx := stream.Context() + headers, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errMetadataNotFound + } + + // TODO: how to replace the context from the stream? + _, err := authenticate(ctx, headers) + if err != nil { + return err + } + + return handler(srv, stream) +} diff --git a/internal/otel_collector/config/configauth/authenticator_test.go b/internal/otel_collector/config/configauth/authenticator_test.go new file mode 100644 index 00000000000..b148485285e --- /dev/null +++ b/internal/otel_collector/config/configauth/authenticator_test.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func TestNewAuthenticator(t *testing.T) { + // test + p, err := NewAuthenticator(Authentication{ + OIDC: &OIDC{ + Audience: "some-audience", + IssuerURL: "http://example.com", + }, + }) + + // verify + assert.NotNil(t, p) + assert.NoError(t, err) +} + +func TestMissingOIDC(t *testing.T) { + // test + p, err := NewAuthenticator(Authentication{}) + + // verify + assert.Nil(t, p) + assert.Equal(t, errNoOIDCProvided, err) +} + +func TestDefaultUnaryInterceptorAuthSucceeded(t *testing.T) { + // prepare + handlerCalled := false + authCalled := false + authFunc := func(context.Context, map[string][]string) (context.Context, error) { + authCalled = true + return context.Background(), nil + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + handlerCalled = true + return nil, nil + } + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", "some-auth-data")) + + // test + res, err := defaultUnaryInterceptor(ctx, nil, &grpc.UnaryServerInfo{}, handler, authFunc) + + // verify + assert.Nil(t, res) + assert.NoError(t, err) + assert.True(t, authCalled) + assert.True(t, handlerCalled) +} + +func TestDefaultUnaryInterceptorAuthFailure(t *testing.T) { + // prepare + authCalled := false + expectedErr := fmt.Errorf("not authenticated") + authFunc := func(context.Context, map[string][]string) (context.Context, error) { + authCalled = true + return context.Background(), expectedErr + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + assert.FailNow(t, "the handler should not have been called on auth failure!") + return nil, nil + } + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", "some-auth-data")) + + // test + res, err := defaultUnaryInterceptor(ctx, nil, &grpc.UnaryServerInfo{}, handler, authFunc) + + // verify + assert.Nil(t, res) + assert.Equal(t, expectedErr, err) + assert.True(t, authCalled) +} + +func TestDefaultUnaryInterceptorMissingMetadata(t *testing.T) { + // prepare + authFunc := func(context.Context, map[string][]string) (context.Context, error) { + assert.FailNow(t, "the auth func should not have been called!") + return context.Background(), nil + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + assert.FailNow(t, "the handler should not have been called!") + return nil, nil + } + + // test + res, err := defaultUnaryInterceptor(context.Background(), nil, &grpc.UnaryServerInfo{}, handler, authFunc) + + // verify + assert.Nil(t, res) + assert.Equal(t, errMetadataNotFound, err) +} + +func TestDefaultStreamInterceptorAuthSucceeded(t *testing.T) { + // prepare + handlerCalled := false + authCalled := false + authFunc := func(context.Context, map[string][]string) (context.Context, error) { + authCalled = true + return context.Background(), nil + } + handler := func(srv interface{}, stream grpc.ServerStream) error { + handlerCalled = true + return nil + } + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", "some-auth-data")) + streamServer := &mockServerStream{ + ctx: ctx, + } + + // test + err := defaultStreamInterceptor(nil, streamServer, &grpc.StreamServerInfo{}, handler, authFunc) + + // verify + assert.NoError(t, err) + assert.True(t, authCalled) + assert.True(t, handlerCalled) +} + +func TestDefaultStreamInterceptorAuthFailure(t *testing.T) { + // prepare + authCalled := false + expectedErr := fmt.Errorf("not authenticated") + authFunc := func(context.Context, map[string][]string) (context.Context, error) { + authCalled = true + return context.Background(), expectedErr + } + handler := func(srv interface{}, stream grpc.ServerStream) error { + assert.FailNow(t, "the handler should not have been called on auth failure!") + return nil + } + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", "some-auth-data")) + streamServer := &mockServerStream{ + ctx: ctx, + } + + // test + err := defaultStreamInterceptor(nil, streamServer, &grpc.StreamServerInfo{}, handler, authFunc) + + // verify + assert.Equal(t, expectedErr, err) + assert.True(t, authCalled) +} + +func TestDefaultStreamInterceptorMissingMetadata(t *testing.T) { + // prepare + authFunc := func(context.Context, map[string][]string) (context.Context, error) { + assert.FailNow(t, "the auth func should not have been called!") + return context.Background(), nil + } + handler := func(srv interface{}, stream grpc.ServerStream) error { + assert.FailNow(t, "the handler should not have been called!") + return nil + } + streamServer := &mockServerStream{ + ctx: context.Background(), + } + + // test + err := defaultStreamInterceptor(nil, streamServer, &grpc.StreamServerInfo{}, handler, authFunc) + + // verify + assert.Equal(t, errMetadataNotFound, err) +} + +type mockServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (m *mockServerStream) Context() context.Context { + return m.ctx +} diff --git a/internal/otel_collector/config/configauth/configauth.go b/internal/otel_collector/config/configauth/configauth.go new file mode 100644 index 00000000000..b76597ac901 --- /dev/null +++ b/internal/otel_collector/config/configauth/configauth.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + + "google.golang.org/grpc" +) + +// Authentication defines the auth settings for the receiver +type Authentication struct { + // The attribute (header name) to look for auth data. Optional, default value: "authentication". + Attribute string `mapstructure:"attribute"` + + // OIDC configures this receiver to use the given OIDC provider as the backend for the authentication mechanism. + // Required. + OIDC *OIDC `mapstructure:"oidc"` +} + +// OIDC defines the OpenID Connect properties for this processor +type OIDC struct { + // IssuerURL is the base URL for the OIDC provider. + // Required. + IssuerURL string `mapstructure:"issuer_url"` + + // Audience of the token, used during the verification. + // For example: "https://accounts.google.com" or "https://login.salesforce.com". + // Required. + Audience string `mapstructure:"audience"` + + // The local path for the issuer CA's TLS server cert. + // Optional. + IssuerCAPath string `mapstructure:"issuer_ca_path"` + + // The claim to use as the username, in case the token's 'sub' isn't the suitable source. + // Optional. + UsernameClaim string `mapstructure:"username_claim"` + + // The claim that holds the subject's group membership information. + // Optional. + GroupsClaim string `mapstructure:"groups_claim"` +} + +// ToServerOptions builds a set of server options ready to be used by the gRPC server +func (a *Authentication) ToServerOptions() ([]grpc.ServerOption, error) { + auth, err := NewAuthenticator(*a) + if err != nil { + return nil, err + } + + // perhaps we should use a timeout here? + // TODO: we need a hook to call auth.Close() + if err := auth.Start(context.Background()); err != nil { + return nil, err + } + + return []grpc.ServerOption{ + grpc.UnaryInterceptor(auth.UnaryInterceptor), + grpc.StreamInterceptor(auth.StreamInterceptor), + }, nil +} diff --git a/internal/otel_collector/config/configauth/configauth_test.go b/internal/otel_collector/config/configauth/configauth_test.go new file mode 100644 index 00000000000..c04efaf15d8 --- /dev/null +++ b/internal/otel_collector/config/configauth/configauth_test.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestToServerOptions(t *testing.T) { + // prepare + oidcServer, err := newOIDCServer() + require.NoError(t, err) + oidcServer.Start() + defer oidcServer.Close() + + config := Authentication{ + OIDC: &OIDC{ + IssuerURL: oidcServer.URL, + Audience: "unit-test", + GroupsClaim: "memberships", + }, + } + + // test + opts, err := config.ToServerOptions() + + // verify + assert.NoError(t, err) + assert.NotNil(t, opts) + assert.Len(t, opts, 2) // we have two interceptors +} + +func TestInvalidConfigurationFailsOnToServerOptions(t *testing.T) { + + for _, tt := range []struct { + cfg Authentication + }{ + { + Authentication{}, + }, + { + Authentication{ + OIDC: &OIDC{ + IssuerURL: "http://oidc.acme.invalid", + Audience: "unit-test", + GroupsClaim: "memberships", + }, + }, + }, + } { + // test + opts, err := tt.cfg.ToServerOptions() + + // verify + assert.Error(t, err) + assert.Nil(t, opts) + } + +} diff --git a/internal/otel_collector/config/configauth/context.go b/internal/otel_collector/config/configauth/context.go new file mode 100644 index 00000000000..a7e9eb2376c --- /dev/null +++ b/internal/otel_collector/config/configauth/context.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import "context" + +var ( + subjectKey = subjectType{} + groupsKey = groupsType{} +) + +type subjectType struct{} +type groupsType struct{} + +// SubjectFromContext returns a list of groups the subject in the context belongs to +func SubjectFromContext(ctx context.Context) (string, bool) { + value, ok := ctx.Value(subjectKey).(string) + return value, ok +} + +// GroupsFromContext returns a list of groups the subject in the context belongs to +func GroupsFromContext(ctx context.Context) ([]string, bool) { + value, ok := ctx.Value(groupsKey).([]string) + return value, ok +} diff --git a/internal/otel_collector/config/configauth/context_test.go b/internal/otel_collector/config/configauth/context_test.go new file mode 100644 index 00000000000..61dec7ab0ba --- /dev/null +++ b/internal/otel_collector/config/configauth/context_test.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSubjectFromContext(t *testing.T) { + // prepare + ctx := context.WithValue(context.Background(), subjectKey, "my-subject") + + // test + sub, ok := SubjectFromContext(ctx) + + // verify + assert.Equal(t, "my-subject", sub) + assert.True(t, ok) +} + +func TestSubjectFromContextNotPresent(t *testing.T) { + // prepare + ctx := context.Background() + + // test + sub, ok := SubjectFromContext(ctx) + + // verify + assert.False(t, ok) + assert.Empty(t, sub) +} + +func TestSubjectFromContextWrongType(t *testing.T) { + // prepare + ctx := context.WithValue(context.Background(), subjectKey, 123) + + // test + sub, ok := SubjectFromContext(ctx) + + // verify + assert.False(t, ok) + assert.Empty(t, sub) +} + +func TestGroupsFromContext(t *testing.T) { + // prepare + ctx := context.WithValue(context.Background(), groupsKey, []string{"my-groups"}) + + // test + groups, ok := GroupsFromContext(ctx) + + // verify + assert.Equal(t, []string{"my-groups"}, groups) + assert.True(t, ok) +} + +func TestGroupsFromContextNotPresent(t *testing.T) { + // prepare + ctx := context.Background() + + // test + sub, ok := GroupsFromContext(ctx) + + // verify + assert.False(t, ok) + assert.Empty(t, sub) +} + +func TestGroupsFromContextWrongType(t *testing.T) { + // prepare + ctx := context.WithValue(context.Background(), subjectKey, 123) + + // test + sub, ok := GroupsFromContext(ctx) + + // verify + assert.False(t, ok) + assert.Empty(t, sub) +} diff --git a/internal/otel_collector/config/configauth/oidc_authenticator.go b/internal/otel_collector/config/configauth/oidc_authenticator.go new file mode 100644 index 00000000000..a0deadcd03a --- /dev/null +++ b/internal/otel_collector/config/configauth/oidc_authenticator.go @@ -0,0 +1,245 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "strings" + "time" + + "github.com/coreos/go-oidc" + "google.golang.org/grpc" +) + +type oidcAuthenticator struct { + attribute string + config OIDC + provider *oidc.Provider + verifier *oidc.IDTokenVerifier + + unaryInterceptor unaryInterceptorFunc + streamInterceptor streamInterceptorFunc +} + +var ( + _ Authenticator = (*oidcAuthenticator)(nil) + + errNoClientIDProvided = errors.New("no ClientID provided for the OIDC configuration") + errNoIssuerURL = errors.New("no IssuerURL provided for the OIDC configuration") + errInvalidAuthenticationHeaderFormat = errors.New("invalid authorization header format") + errFailedToObtainClaimsFromToken = errors.New("failed to get the subject from the token issued by the OIDC provider") + errClaimNotFound = errors.New("username claim from the OIDC configuration not found on the token returned by the OIDC provider") + errUsernameNotString = errors.New("the username returned by the OIDC provider isn't a regular string") + errGroupsClaimNotFound = errors.New("groups claim from the OIDC configuration not found on the token returned by the OIDC provider") + errNotAuthenticated = errors.New("authentication didn't succeed") +) + +func newOIDCAuthenticator(cfg Authentication) (*oidcAuthenticator, error) { + if cfg.OIDC.Audience == "" { + return nil, errNoClientIDProvided + } + if cfg.OIDC.IssuerURL == "" { + return nil, errNoIssuerURL + } + if cfg.Attribute == "" { + cfg.Attribute = defaultAttribute + } + + return &oidcAuthenticator{ + attribute: cfg.Attribute, + config: *cfg.OIDC, + unaryInterceptor: defaultUnaryInterceptor, + streamInterceptor: defaultStreamInterceptor, + }, nil +} + +func (o *oidcAuthenticator) Authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) { + authHeaders := headers[o.attribute] + if len(authHeaders) == 0 { + return ctx, errNotAuthenticated + } + + // we only use the first header, if multiple values exist + parts := strings.Split(authHeaders[0], " ") + if len(parts) != 2 { + return ctx, errInvalidAuthenticationHeaderFormat + } + + idToken, err := o.verifier.Verify(ctx, parts[1]) + if err != nil { + return ctx, fmt.Errorf("failed to verify token: %w", err) + } + + claims := map[string]interface{}{} + if err = idToken.Claims(&claims); err != nil { + // currently, this isn't a valid condition, the Verify call a few lines above + // will already attempt to parse the payload as a json and set it as the claims + // for the token. As we are using a map to hold the claims, there's no way to fail + // to read the claims. It could fail if we were using a custom struct. Instead of + // swalling the error, it's better to make this future-proof, in case the underlying + // code changes + return ctx, errFailedToObtainClaimsFromToken + } + + sub, err := getSubjectFromClaims(claims, o.config.UsernameClaim, idToken.Subject) + if err != nil { + return ctx, fmt.Errorf("failed to get subject from claims in the token: %w", err) + } + ctx = context.WithValue(ctx, subjectKey, sub) + + gr, err := getGroupsFromClaims(claims, o.config.GroupsClaim) + if err != nil { + return ctx, fmt.Errorf("failed to get groups from claims in the token: %w", err) + } + ctx = context.WithValue(ctx, groupsKey, gr) + + return ctx, nil +} + +func (o *oidcAuthenticator) Start(context.Context) error { + provider, err := getProviderForConfig(o.config) + if err != nil { + return fmt.Errorf("failed to get configuration from the auth server: %w", err) + } + o.provider = provider + + o.verifier = o.provider.Verifier(&oidc.Config{ + ClientID: o.config.Audience, + }) + + return nil +} + +func (o *oidcAuthenticator) Close() error { + // no-op at the moment + // once we implement caching of the tokens we might need this + return nil +} + +func (o *oidcAuthenticator) UnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return o.unaryInterceptor(ctx, req, info, handler, o.Authenticate) +} + +func (o *oidcAuthenticator) StreamInterceptor(srv interface{}, str grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return o.streamInterceptor(srv, str, info, handler, o.Authenticate) +} + +func getSubjectFromClaims(claims map[string]interface{}, usernameClaim string, fallback string) (string, error) { + if len(usernameClaim) > 0 { + username, found := claims[usernameClaim] + if !found { + return "", errClaimNotFound + } + + sUsername, ok := username.(string) + if !ok { + return "", errUsernameNotString + } + + return sUsername, nil + } + + return fallback, nil +} + +func getGroupsFromClaims(claims map[string]interface{}, groupsClaim string) ([]string, error) { + if len(groupsClaim) > 0 { + var groups []string + rawGroup, ok := claims[groupsClaim] + if !ok { + return nil, errGroupsClaimNotFound + } + switch v := rawGroup.(type) { + case string: + groups = append(groups, v) + case []string: + groups = v + case []interface{}: + groups = make([]string, 0, len(v)) + for i := range v { + groups = append(groups, fmt.Sprintf("%v", v[i])) + } + } + + return groups, nil + } + + return []string{}, nil +} + +func getProviderForConfig(config OIDC) (*oidc.Provider, error) { + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 5 * time.Second, + KeepAlive: 10 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 5 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + + cert, err := getIssuerCACertFromPath(config.IssuerCAPath) + if err != nil { + return nil, err // the errors from this path have enough context already + } + + if cert != nil { + t.TLSClientConfig = &tls.Config{ + RootCAs: x509.NewCertPool(), + } + t.TLSClientConfig.RootCAs.AddCert(cert) + } + + client := &http.Client{ + Timeout: 5 * time.Second, + Transport: t, + } + oidcContext := oidc.ClientContext(context.Background(), client) + return oidc.NewProvider(oidcContext, config.IssuerURL) +} + +func getIssuerCACertFromPath(path string) (*x509.Certificate, error) { + if path == "" { + return nil, nil + } + + rawCA, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("could not read the CA file %q: %w", path, err) + } + + if len(rawCA) == 0 { + return nil, fmt.Errorf("could not read the CA file %q: empty file", path) + } + + block, _ := pem.Decode(rawCA) + if block == nil { + return nil, fmt.Errorf("cannot decode the contents of the CA file %q: %w", path, err) + } + + return x509.ParseCertificate(block.Bytes) +} diff --git a/internal/otel_collector/config/configauth/oidc_authenticator_test.go b/internal/otel_collector/config/configauth/oidc_authenticator_test.go new file mode 100644 index 00000000000..7f4879c6c28 --- /dev/null +++ b/internal/otel_collector/config/configauth/oidc_authenticator_test.go @@ -0,0 +1,548 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "math/big" + "net" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func TestOIDCAuthenticationSucceeded(t *testing.T) { + // prepare + oidcServer, err := newOIDCServer() + require.NoError(t, err) + oidcServer.Start() + defer oidcServer.Close() + + config := Authentication{ + OIDC: &OIDC{ + IssuerURL: oidcServer.URL, + Audience: "unit-test", + GroupsClaim: "memberships", + }, + } + p, err := newOIDCAuthenticator(config) + require.NoError(t, err) + + err = p.Start(context.Background()) + require.NoError(t, err) + + payload, _ := json.Marshal(map[string]interface{}{ + "sub": "jdoe@example.com", + "name": "jdoe", + "iss": oidcServer.URL, + "aud": "unit-test", + "exp": time.Now().Add(time.Minute).Unix(), + "memberships": []string{"department-1", "department-2"}, + }) + token, err := oidcServer.token(payload) + require.NoError(t, err) + + // test + ctx, err := p.Authenticate(context.Background(), map[string][]string{"authorization": {fmt.Sprintf("Bearer %s", token)}}) + + // verify + assert.NotNil(t, ctx) + assert.NoError(t, err) + + subject, ok := SubjectFromContext(ctx) + assert.True(t, ok) + assert.EqualValues(t, "jdoe@example.com", subject) + + groups, ok := GroupsFromContext(ctx) + assert.True(t, ok) + assert.Contains(t, groups, "department-1") + assert.Contains(t, groups, "department-2") +} + +func TestOIDCProviderForConfigWithTLS(t *testing.T) { + // prepare the CA cert for the TLS handler + cert := x509.Certificate{ + NotBefore: time.Now(), + NotAfter: time.Now().Add(10 * time.Second), + IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)}, + SerialNumber: big.NewInt(9447457), // some number + } + priv, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + + x509Cert, err := x509.CreateCertificate(rand.Reader, &cert, &cert, &priv.PublicKey, priv) + require.NoError(t, err) + + caFile, err := ioutil.TempFile(os.TempDir(), "cert") + require.NoError(t, err) + defer os.Remove(caFile.Name()) + + err = pem.Encode(caFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: x509Cert, + }) + require.NoError(t, err) + + oidcServer, err := newOIDCServer() + require.NoError(t, err) + defer oidcServer.Close() + + tlsCert := tls.Certificate{ + Certificate: [][]byte{x509Cert}, + PrivateKey: priv, + } + oidcServer.TLS = &tls.Config{Certificates: []tls.Certificate{tlsCert}} + oidcServer.StartTLS() + + // prepare the processor configuration + config := OIDC{ + IssuerURL: oidcServer.URL, + IssuerCAPath: caFile.Name(), + Audience: "unit-test", + } + + // test + provider, err := getProviderForConfig(config) + + // verify + assert.NoError(t, err) + assert.NotNil(t, provider) +} + +func TestOIDCLoadIssuerCAFromPath(t *testing.T) { + // prepare + cert := x509.Certificate{ + SerialNumber: big.NewInt(9447457), // some number + IsCA: true, + } + priv, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + + x509Cert, err := x509.CreateCertificate(rand.Reader, &cert, &cert, &priv.PublicKey, priv) + require.NoError(t, err) + + file, err := ioutil.TempFile(os.TempDir(), "cert") + require.NoError(t, err) + defer os.Remove(file.Name()) + + err = pem.Encode(file, &pem.Block{ + Type: "CERTIFICATE", + Bytes: x509Cert, + }) + require.NoError(t, err) + + // test + loaded, err := getIssuerCACertFromPath(file.Name()) + + // verify + assert.NoError(t, err) + assert.Equal(t, cert.SerialNumber, loaded.SerialNumber) +} + +func TestOIDCFailedToLoadIssuerCAFromPathEmptyCert(t *testing.T) { + // prepare + file, err := ioutil.TempFile(os.TempDir(), "cert") + require.NoError(t, err) + defer os.Remove(file.Name()) + + // test + loaded, err := getIssuerCACertFromPath(file.Name()) // the file exists, but the contents isn't a cert + + // verify + assert.Error(t, err) + assert.Nil(t, loaded) +} + +func TestOIDCFailedToLoadIssuerCAFromPathMissingFile(t *testing.T) { + // test + loaded, err := getIssuerCACertFromPath("some-non-existing-file") + + // verify + assert.Error(t, err) + assert.Nil(t, loaded) +} + +func TestOIDCFailedToLoadIssuerCAFromPathInvalidContent(t *testing.T) { + // prepare + file, err := ioutil.TempFile(os.TempDir(), "cert") + require.NoError(t, err) + defer os.Remove(file.Name()) + file.Write([]byte("foobar")) + + config := OIDC{ + IssuerCAPath: file.Name(), + } + + // test + provider, err := getProviderForConfig(config) // cross test with getIssuerCACertFromPath + + // verify + assert.Error(t, err) + assert.Nil(t, provider) +} + +func TestOIDCInvalidAuthHeader(t *testing.T) { + // prepare + p, err := newOIDCAuthenticator(Authentication{ + OIDC: &OIDC{ + Audience: "some-audience", + IssuerURL: "http://example.com", + }, + }) + require.NoError(t, err) + + // test + ctx, err := p.Authenticate(context.Background(), map[string][]string{"authorization": {"some-value"}}) + + // verify + assert.NotNil(t, ctx) + assert.Equal(t, errInvalidAuthenticationHeaderFormat, err) +} + +func TestOIDCNotAuthenticated(t *testing.T) { + // prepare + p, err := newOIDCAuthenticator(Authentication{ + OIDC: &OIDC{ + Audience: "some-audience", + IssuerURL: "http://example.com", + }, + }) + require.NoError(t, err) + + // test + ctx, err := p.Authenticate(context.Background(), make(map[string][]string)) + + // verify + assert.NotNil(t, ctx) + assert.Equal(t, errNotAuthenticated, err) +} + +func TestProviderNotReacheable(t *testing.T) { + // prepare + p, err := newOIDCAuthenticator(Authentication{ + OIDC: &OIDC{ + Audience: "some-audience", + IssuerURL: "http://example.com", + }, + }) + require.NoError(t, err) + + // test + err = p.Start(context.Background()) + + // verify + assert.Error(t, err) +} + +func TestFailedToVerifyToken(t *testing.T) { + // prepare + oidcServer, err := newOIDCServer() + require.NoError(t, err) + oidcServer.Start() + defer oidcServer.Close() + + p, err := newOIDCAuthenticator(Authentication{ + OIDC: &OIDC{ + IssuerURL: oidcServer.URL, + Audience: "unit-test", + }, + }) + require.NoError(t, err) + + err = p.Start(context.Background()) + require.NoError(t, err) + + // test + ctx, err := p.Authenticate(context.Background(), map[string][]string{"authorization": {"Bearer some-token"}}) + + // verify + assert.NotNil(t, ctx) + assert.Error(t, err) +} + +func TestFailedToGetGroupsClaimFromToken(t *testing.T) { + // prepare + oidcServer, err := newOIDCServer() + require.NoError(t, err) + oidcServer.Start() + defer oidcServer.Close() + + for _, tt := range []struct { + casename string + config Authentication + expectedError error + }{ + { + "groupsClaimNonExisting", + Authentication{ + OIDC: &OIDC{ + IssuerURL: oidcServer.URL, + Audience: "unit-test", + GroupsClaim: "non-existing-claim", + }, + }, + errGroupsClaimNotFound, + }, + { + "usernameClaimNonExisting", + Authentication{ + OIDC: &OIDC{ + IssuerURL: oidcServer.URL, + Audience: "unit-test", + UsernameClaim: "non-existing-claim", + }, + }, + errClaimNotFound, + }, + { + "usernameNotString", + Authentication{ + OIDC: &OIDC{ + IssuerURL: oidcServer.URL, + Audience: "unit-test", + UsernameClaim: "some-non-string-field", + }, + }, + errUsernameNotString, + }, + } { + t.Run(tt.casename, func(t *testing.T) { + p, err := newOIDCAuthenticator(tt.config) + require.NoError(t, err) + + err = p.Start(context.Background()) + require.NoError(t, err) + + payload, _ := json.Marshal(map[string]interface{}{ + "iss": oidcServer.URL, + "some-non-string-field": 123, + "aud": "unit-test", + "exp": time.Now().Add(time.Minute).Unix(), + }) + token, err := oidcServer.token(payload) + require.NoError(t, err) + + // test + ctx, err := p.Authenticate(context.Background(), map[string][]string{"authorization": {fmt.Sprintf("Bearer %s", token)}}) + + // verify + assert.NotNil(t, ctx) + assert.True(t, errors.Is(err, tt.expectedError)) + }) + } +} + +func TestSubjectFromClaims(t *testing.T) { + // prepare + claims := map[string]interface{}{ + "username": "jdoe", + } + + // test + username, err := getSubjectFromClaims(claims, "username", "") + + // verify + assert.NoError(t, err) + assert.Equal(t, "jdoe", username) +} + +func TestSubjectFallback(t *testing.T) { + // prepare + claims := map[string]interface{}{ + "sub": "jdoe", + } + + // test + username, err := getSubjectFromClaims(claims, "", "jdoe") + + // verify + assert.NoError(t, err) + assert.Equal(t, "jdoe", username) +} + +func TestGroupsFromClaim(t *testing.T) { + // prepare + for _, tt := range []struct { + casename string + input interface{} + expected []string + }{ + { + "single-string", + "department-1", + []string{"department-1"}, + }, + { + "multiple-strings", + []string{"department-1", "department-2"}, + []string{"department-1", "department-2"}, + }, + { + "multiple-things", + []interface{}{"department-1", 123}, + []string{"department-1", "123"}, + }, + } { + t.Run(tt.casename, func(t *testing.T) { + claims := map[string]interface{}{ + "sub": "jdoe", + "memberships": tt.input, + } + + // test + groups, err := getGroupsFromClaims(claims, "memberships") + assert.NoError(t, err) + assert.Equal(t, tt.expected, groups) + }) + } +} + +func TestEmptyGroupsClaim(t *testing.T) { + // prepare + claims := map[string]interface{}{ + "sub": "jdoe", + } + + // test + groups, err := getGroupsFromClaims(claims, "") + assert.NoError(t, err) + assert.Equal(t, []string{}, groups) +} + +func TestMissingClient(t *testing.T) { + // prepare + config := Authentication{ + OIDC: &OIDC{ + IssuerURL: "http://example.com/", + }, + } + + // test + p, err := newOIDCAuthenticator(config) + + // verify + assert.Nil(t, p) + assert.Equal(t, errNoClientIDProvided, err) +} + +func TestMissingIssuerURL(t *testing.T) { + // prepare + config := Authentication{ + OIDC: &OIDC{ + Audience: "some-audience", + }, + } + + // test + p, err := newOIDCAuthenticator(config) + + // verify + assert.Nil(t, p) + assert.Equal(t, errNoIssuerURL, err) +} + +func TestClose(t *testing.T) { + // prepare + config := Authentication{ + OIDC: &OIDC{ + Audience: "some-audience", + IssuerURL: "http://example.com/", + }, + } + p, err := newOIDCAuthenticator(config) + require.NoError(t, err) + require.NotNil(t, p) + + // test + err = p.Close() // for now, we never fail + + // verify + assert.NoError(t, err) +} + +func TestUnaryInterceptor(t *testing.T) { + // prepare + config := Authentication{ + OIDC: &OIDC{ + Audience: "some-audience", + IssuerURL: "http://example.com/", + }, + } + p, err := newOIDCAuthenticator(config) + require.NoError(t, err) + require.NotNil(t, p) + + interceptorCalled := false + p.unaryInterceptor = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, authenticate authenticateFunc) (interface{}, error) { + interceptorCalled = true + return nil, nil + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, nil + } + + // test + res, err := p.UnaryInterceptor(context.Background(), nil, &grpc.UnaryServerInfo{}, handler) + + // verify + assert.NoError(t, err) + assert.Nil(t, res) + assert.True(t, interceptorCalled) +} + +func TestStreamInterceptor(t *testing.T) { + // prepare + config := Authentication{ + OIDC: &OIDC{ + Audience: "some-audience", + IssuerURL: "http://example.com/", + }, + } + p, err := newOIDCAuthenticator(config) + require.NoError(t, err) + require.NotNil(t, p) + + interceptorCalled := false + p.streamInterceptor = func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler, authenticate authenticateFunc) error { + interceptorCalled = true + return nil + } + handler := func(srv interface{}, stream grpc.ServerStream) error { + return nil + } + streamServer := &mockServerStream{ + ctx: context.Background(), + } + + // test + err = p.StreamInterceptor(nil, streamServer, &grpc.StreamServerInfo{}, handler) + + // verify + assert.NoError(t, err) + assert.True(t, interceptorCalled) +} diff --git a/internal/otel_collector/config/configauth/oidc_server_test.go b/internal/otel_collector/config/configauth/oidc_server_test.go new file mode 100644 index 00000000000..3faac34f09b --- /dev/null +++ b/internal/otel_collector/config/configauth/oidc_server_test.go @@ -0,0 +1,136 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" // #nosec + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/binary" + "encoding/json" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "time" +) + +// oidcServer is an overly simplified OIDC mock server, good enough to sign the tokens required by the test +// and pass the verification done by the underlying libraries +type oidcServer struct { + *httptest.Server + x509Cert []byte + privateKey *rsa.PrivateKey +} + +func newOIDCServer() (*oidcServer, error) { + jwks := map[string]interface{}{} + + mux := http.NewServeMux() + server := httptest.NewUnstartedServer(mux) + + mux.HandleFunc("/.well-known/openid-configuration", func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + json.NewEncoder(w).Encode(map[string]interface{}{ + "issuer": server.URL, + "jwks_uri": fmt.Sprintf("%s/.well-known/jwks.json", server.URL), + }) + }) + mux.HandleFunc("/.well-known/jwks.json", func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + json.NewEncoder(w).Encode(jwks) + }) + + privateKey, err := createPrivateKey() + if err != nil { + return nil, err + } + + x509Cert, err := createCertificate(privateKey) + if err != nil { + return nil, err + } + + eBytes := make([]byte, 8) + binary.BigEndian.PutUint64(eBytes, uint64(privateKey.E)) + eBytes = bytes.TrimLeft(eBytes, "\x00") + + // #nosec + sum := sha1.Sum(x509Cert) + jwks["keys"] = []map[string]interface{}{{ + "alg": "RS256", + "kty": "RSA", + "use": "sig", + "x5c": []string{base64.StdEncoding.EncodeToString(x509Cert)}, + "n": base64.RawURLEncoding.EncodeToString(privateKey.N.Bytes()), + "e": base64.RawURLEncoding.EncodeToString(eBytes), + "kid": base64.RawURLEncoding.EncodeToString(sum[:]), + "x5t": base64.RawURLEncoding.EncodeToString(sum[:]), + }} + + return &oidcServer{server, x509Cert, privateKey}, nil +} + +func (s *oidcServer) token(jsonPayload []byte) (string, error) { + jsonHeader, _ := json.Marshal(map[string]interface{}{ + "alg": "RS256", + "typ": "JWT", + }) + + header := base64.RawURLEncoding.EncodeToString(jsonHeader) + payload := base64.RawURLEncoding.EncodeToString(jsonPayload) + digest := sha256.Sum256([]byte(fmt.Sprintf("%s.%s", header, payload))) + + signature, err := rsa.SignPKCS1v15(rand.Reader, s.privateKey, crypto.SHA256, digest[:]) + if err != nil { + return "", err + } + + encodedSignature := base64.RawURLEncoding.EncodeToString(signature) + token := fmt.Sprintf("%s.%s.%s", header, payload, encodedSignature) + return token, nil +} + +func createCertificate(privateKey *rsa.PrivateKey) ([]byte, error) { + cert := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Ecorp, Inc"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(5 * time.Minute), + } + + x509Cert, err := x509.CreateCertificate(rand.Reader, &cert, &cert, &privateKey.PublicKey, privateKey) + if err != nil { + return nil, err + } + + return x509Cert, nil +} + +func createPrivateKey() (*rsa.PrivateKey, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, err + } + return priv, nil +} diff --git a/internal/otel_collector/config/configcheck/configcheck.go b/internal/otel_collector/config/configcheck/configcheck.go new file mode 100644 index 00000000000..5d344f28671 --- /dev/null +++ b/internal/otel_collector/config/configcheck/configcheck.go @@ -0,0 +1,193 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configcheck has checks to be applied to configuration +// objects implemented by factories of components used in the OpenTelemetry +// collector. It is recommended for implementers of components to run the +// validations available on this package. +package configcheck + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" +) + +// The regular expression for valid config field tag. +var configFieldTagRegExp = regexp.MustCompile("^[a-z0-9][a-z0-9_]*$") + +// ValidateConfigFromFactories checks if all configurations for the given factories +// are satisfying the patterns used by the collector. +func ValidateConfigFromFactories(factories component.Factories) error { + var errs []error + var configs []interface{} + + for _, factory := range factories.Receivers { + configs = append(configs, factory.CreateDefaultConfig()) + } + for _, factory := range factories.Processors { + configs = append(configs, factory.CreateDefaultConfig()) + } + for _, factory := range factories.Exporters { + configs = append(configs, factory.CreateDefaultConfig()) + } + for _, factory := range factories.Extensions { + configs = append(configs, factory.CreateDefaultConfig()) + } + + for _, config := range configs { + if err := ValidateConfig(config); err != nil { + errs = append(errs, err) + } + } + + return componenterror.CombineErrors(errs) +} + +// ValidateConfig enforces that given configuration object is following the patterns +// used by the collector. This ensures consistency between different implementations +// of components and extensions. It is recommended for implementers of components +// to call this function on their tests passing the default configuration of the +// component factory. +func ValidateConfig(config interface{}) error { + t := reflect.TypeOf(config) + + tk := t.Kind() + if t.Kind() == reflect.Ptr { + t = t.Elem() + tk = t.Kind() + } + + if tk != reflect.Struct { + return fmt.Errorf( + "config must be a struct or a pointer to one, the passed object is a %s", + tk) + } + + return validateConfigDataType(t) +} + +// validateConfigDataType performs a descending validation of the given type. +// If the type is a struct it goes to each of its fields to check for the proper +// tags. +func validateConfigDataType(t reflect.Type) error { + var errs []error + + switch t.Kind() { + case reflect.Ptr: + if err := validateConfigDataType(t.Elem()); err != nil { + errs = append(errs, err) + } + case reflect.Struct: + // Reflect on the pointed data and check each of its fields. + nf := t.NumField() + for i := 0; i < nf; i++ { + f := t.Field(i) + if err := checkStructFieldTags(f); err != nil { + errs = append(errs, err) + } + } + default: + // The config object can carry other types but they are not used when + // reading the configuration via viper so ignore them. Basically ignore: + // reflect.Uintptr, reflect.Chan, reflect.Func, reflect.Interface, and + // reflect.UnsafePointer. + } + + if err := componenterror.CombineErrors(errs); err != nil { + return fmt.Errorf( + "type %q from package %q has invalid config settings: %v", + t.Name(), + t.PkgPath(), + err) + } + + return nil +} + +// checkStructFieldTags inspects the tags of a struct field. +func checkStructFieldTags(f reflect.StructField) error { + + tagValue := f.Tag.Get("mapstructure") + if tagValue == "" { + + // Ignore special types. + switch f.Type.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Uintptr, reflect.UnsafePointer: + // Allow the config to carry the types above, but since they are not read + // when loading configuration, just ignore them. + return nil + } + + // Public fields of other types should be tagged. + chars := []byte(f.Name) + if len(chars) > 0 && chars[0] >= 'A' && chars[0] <= 'Z' { + return fmt.Errorf("mapstructure tag not present on field %q", f.Name) + } + + // Not public field, no need to have a tag. + return nil + } + + tagParts := strings.Split(tagValue, ",") + if tagParts[0] != "" { + if tagParts[0] == "-" { + // Nothing to do, as mapstructure decode skips this field. + return nil + } + } + + // Check if squash is specified. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + // Field was squashed. + if f.Type.Kind() != reflect.Struct { + return fmt.Errorf( + "attempt to squash non-struct type on field %q", f.Name) + } + } + + switch f.Type.Kind() { + case reflect.Struct: + // It is another struct, continue down-level + return validateConfigDataType(f.Type) + + case reflect.Map, reflect.Slice, reflect.Array: + // The element of map, array, or slice can be itself a configuration object. + return validateConfigDataType(f.Type.Elem()) + + default: + fieldTag := tagParts[0] + if !configFieldTagRegExp.MatchString(fieldTag) { + return fmt.Errorf( + "field %q has config tag %q which doesn't satisfy %q", + f.Name, + fieldTag, + configFieldTagRegExp.String()) + } + } + + return nil +} diff --git a/internal/otel_collector/config/configcheck/configcheck_test.go b/internal/otel_collector/config/configcheck/configcheck_test.go new file mode 100644 index 00000000000..e8618a963f7 --- /dev/null +++ b/internal/otel_collector/config/configcheck/configcheck_test.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configcheck + +import ( + "context" + "io" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/service/defaultcomponents" +) + +func TestValidateConfigFromFactories_Success(t *testing.T) { + factories, err := defaultcomponents.Components() + require.NoError(t, err) + + err = ValidateConfigFromFactories(factories) + require.NoError(t, err) +} + +func TestValidateConfigFromFactories_Failure(t *testing.T) { + factories, err := defaultcomponents.Components() + require.NoError(t, err) + + // Add a factory returning config not following pattern to force error. + f := &badConfigExtensionFactory{} + factories.Extensions[f.Type()] = f + + err = ValidateConfigFromFactories(factories) + require.Error(t, err) +} + +func TestValidateConfigPointerAndValue(t *testing.T) { + config := struct { + SomeFiled string `mapstructure:"test"` + }{} + assert.NoError(t, ValidateConfig(config)) + assert.NoError(t, ValidateConfig(&config)) +} + +func TestValidateConfig(t *testing.T) { + type BadConfigTag struct { + BadTagField int `mapstructure:"test-dash"` + } + + tests := []struct { + name string + config interface{} + wantErrMsgSubStr string + }{ + { + name: "typical_config", + config: struct { + MyPublicString string `mapstructure:"string"` + }{}, + }, + { + name: "private_fields_ignored", + config: struct { + // A public type with proper tag. + MyPublicString string `mapstructure:"string"` + // A public type with proper tag. + MyPublicInt string `mapstructure:"int"` + // A public type that should be ignored. + MyFunc func() error + // A public type that should be ignored. + Reader io.Reader + // private type not tagged. + myPrivateString string + _someInt int + }{}, + }, + { + name: "not_struct_nor_pointer", + config: func(x int) int { + return x * x + }, + wantErrMsgSubStr: "config must be a struct or a pointer to one, the passed object is a func", + }, + { + name: "squash_on_non_struct", + config: struct { + MyInt int `mapstructure:",squash"` + }{}, + wantErrMsgSubStr: "attempt to squash non-struct type on field \"MyInt\"", + }, + { + name: "invalid_tag_detected", + config: BadConfigTag{}, + wantErrMsgSubStr: "field \"BadTagField\" has config tag \"test-dash\" which doesn't satisfy", + }, + { + name: "public_field_must_have_tag", + config: struct { + PublicFieldWithoutMapstructureTag string + }{}, + wantErrMsgSubStr: "mapstructure tag not present on field \"PublicFieldWithoutMapstructureTag\"", + }, + { + name: "invalid_map_item", + config: struct { + Map map[string]BadConfigTag `mapstructure:"test_map"` + }{}, + wantErrMsgSubStr: "field \"BadTagField\" has config tag \"test-dash\" which doesn't satisfy", + }, + { + name: "invalid_slice_item", + config: struct { + Slice []BadConfigTag `mapstructure:"test_slice"` + }{}, + wantErrMsgSubStr: "field \"BadTagField\" has config tag \"test-dash\" which doesn't satisfy", + }, + { + name: "invalid_array_item", + config: struct { + Array [2]BadConfigTag `mapstructure:"test_array"` + }{}, + wantErrMsgSubStr: "field \"BadTagField\" has config tag \"test-dash\" which doesn't satisfy", + }, + { + name: "invalid_map_item_ptr", + config: struct { + Map map[string]*BadConfigTag `mapstructure:"test_map"` + }{}, + wantErrMsgSubStr: "field \"BadTagField\" has config tag \"test-dash\" which doesn't satisfy", + }, + { + name: "invalid_slice_item_ptr", + config: struct { + Slice []*BadConfigTag `mapstructure:"test_slice"` + }{}, + wantErrMsgSubStr: "field \"BadTagField\" has config tag \"test-dash\" which doesn't satisfy", + }, + { + name: "invalid_array_item_ptr", + config: struct { + Array [2]*BadConfigTag `mapstructure:"test_array"` + }{}, + wantErrMsgSubStr: "field \"BadTagField\" has config tag \"test-dash\" which doesn't satisfy", + }, + { + name: "valid_map_item", + config: struct { + Map map[string]int `mapstructure:"test_map"` + }{}, + }, + { + name: "valid_slice_item", + config: struct { + Slice []string `mapstructure:"test_slice"` + }{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateConfig(tt.config) + if tt.wantErrMsgSubStr == "" { + assert.NoError(t, err) + } else { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), tt.wantErrMsgSubStr)) + } + }) + } +} + +// badConfigExtensionFactory was created to force error path from factory returning +// a config not satisfying the validation. +type badConfigExtensionFactory struct{} + +func (b badConfigExtensionFactory) Type() configmodels.Type { + return "bad_config" +} + +func (b badConfigExtensionFactory) CreateDefaultConfig() configmodels.Extension { + return &struct { + configmodels.ExtensionSettings + BadTagField int `mapstructure:"tag-with-dashes"` + }{} +} + +func (b badConfigExtensionFactory) CreateExtension(_ context.Context, _ component.ExtensionCreateParams, _ configmodels.Extension) (component.ServiceExtension, error) { + return nil, nil +} diff --git a/internal/otel_collector/config/configerror/configerror.go b/internal/otel_collector/config/configerror/configerror.go new file mode 100644 index 00000000000..fe694ad4b34 --- /dev/null +++ b/internal/otel_collector/config/configerror/configerror.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configerror contains the common errors caused by malformed configs. +package configerror + +import "errors" + +// ErrDataTypeIsNotSupported can be returned by receiver, exporter or processor +// factory methods that create the entity if the particular telemetry +// data type is not supported by the receiver, exporter or processor. +var ErrDataTypeIsNotSupported = errors.New("telemetry type is not supported") diff --git a/internal/otel_collector/config/configgrpc/README.md b/internal/otel_collector/config/configgrpc/README.md new file mode 100644 index 00000000000..4a6ff017b6d --- /dev/null +++ b/internal/otel_collector/config/configgrpc/README.md @@ -0,0 +1,62 @@ +# gRPC Configuration Settings + +gRPC exposes a [variety of settings](https://godoc.org/google.golang.org/grpc). +Several of these settings are available for configuration within individual +receivers or exporters. In general, none of these settings should need to be +adjusted. + +## Client Configuration + +[Exporters](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/README.md) +leverage client configuration. + +Note that client configuration supports TLS configuration, however +configuration parameters are not defined under `tls_settings` like server +configuration. For more information, see [configtls +README](../configtls/README.md). + +- [`balancer_name`](https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md) +- `compression` (default = gzip): Compression type to use (only gzip is supported today) +- `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) +- `headers`: name/value pairs added to the request +- [`keepalive`](https://godoc.org/google.golang.org/grpc/keepalive#ClientParameters) + - `permit_without_stream` + - `time` + - `timeout` +- [`read_buffer_size`](https://godoc.org/google.golang.org/grpc#ReadBufferSize) +- [`write_buffer_size`](https://godoc.org/google.golang.org/grpc#WriteBufferSize) + +Example: + +```yaml +exporter: + otlp: + endpoint: otelcol2:55690 + headers: + test1: "value1" + "test 2": "value 2" +``` + +## Server Configuration + +[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/master/receiver/README.md) +leverage server configuration. + +Note that transport configuration can also be configured. For more information, +see [confignet README](../confignet/README.md). + +- [`keepalive`](https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters) + - [`enforcement_policy`](https://godoc.org/google.golang.org/grpc/keepalive#EnforcementPolicy) + - `min_time` + - `permit_without_stream` + - [`server_parameters`](https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters) + - `max_connection_age` + - `max_connection_age_grace` + - `max_connection_idle` + - `time` + - `timeout` +- [`max_concurrent_streams`](https://godoc.org/google.golang.org/grpc#MaxConcurrentStreams) +- [`max_recv_msg_size_mib`](https://godoc.org/google.golang.org/grpc#MaxRecvMsgSize) +- [`read_buffer_size`](https://godoc.org/google.golang.org/grpc#ReadBufferSize) +- [`tls_settings`](../configtls/README.md) +- [`write_buffer_size`](https://godoc.org/google.golang.org/grpc#WriteBufferSize) diff --git a/internal/otel_collector/config/configgrpc/bearer_token.go b/internal/otel_collector/config/configgrpc/bearer_token.go new file mode 100644 index 00000000000..bf457f6dc75 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/bearer_token.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configgrpc + +import ( + "context" + "fmt" + + "google.golang.org/grpc/credentials" +) + +var _ credentials.PerRPCCredentials = (*PerRPCAuth)(nil) + +// PerRPCAuth is a gRPC credentials.PerRPCCredentials implementation that returns an 'authorization' header. +type PerRPCAuth struct { + metadata map[string]string +} + +// BearerToken returns a new PerRPCAuth based on the given token. +func BearerToken(t string) *PerRPCAuth { + return &PerRPCAuth{ + metadata: map[string]string{"authorization": fmt.Sprintf("Bearer %s", t)}, + } +} + +// GetRequestMetadata returns the request metadata to be used with the RPC. +func (c *PerRPCAuth) GetRequestMetadata(context.Context, ...string) (map[string]string, error) { + return c.metadata, nil +} + +// RequireTransportSecurity always returns true for this implementation. Passing bearer tokens in plain-text connections is a bad idea. +func (c *PerRPCAuth) RequireTransportSecurity() bool { + return true +} diff --git a/internal/otel_collector/config/configgrpc/bearer_token_test.go b/internal/otel_collector/config/configgrpc/bearer_token_test.go new file mode 100644 index 00000000000..d91c439496c --- /dev/null +++ b/internal/otel_collector/config/configgrpc/bearer_token_test.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configgrpc + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBearerToken(t *testing.T) { + // test + result := BearerToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...") + metadata, err := result.GetRequestMetadata(context.Background()) + require.NoError(t, err) + + // verify + assert.Equal(t, "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", metadata["authorization"]) +} + +func TestBearerTokenRequiresSecureTransport(t *testing.T) { + // test + token := BearerToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...") + + // verify + assert.True(t, token.RequireTransportSecurity()) +} diff --git a/internal/otel_collector/config/configgrpc/configgrpc.go b/internal/otel_collector/config/configgrpc/configgrpc.go new file mode 100644 index 00000000000..6d0131081c0 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/configgrpc.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configgrpc defines the gRPC configuration settings. +package configgrpc + +import ( + "fmt" + "net" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/keepalive" + + "go.opentelemetry.io/collector/config/configauth" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/config/configtls" +) + +// Compression gRPC keys for supported compression types within collector +const ( + CompressionUnsupported = "" + CompressionGzip = "gzip" + + PerRPCAuthTypeBearer = "bearer" +) + +var ( + // Map of opentelemetry compression types to grpc registered compression types + grpcCompressionKeyMap = map[string]string{ + CompressionGzip: gzip.Name, + } +) + +// Allowed balancer names to be set in grpclb_policy to discover the servers +var allowedBalancerNames = []string{roundrobin.Name, grpc.PickFirstBalancerName} + +// KeepaliveClientConfig exposes the keepalive.ClientParameters to be used by the exporter. +// Refer to the original data-structure for the meaning of each parameter: +// https://godoc.org/google.golang.org/grpc/keepalive#ClientParameters +type KeepaliveClientConfig struct { + Time time.Duration `mapstructure:"time,omitempty"` + Timeout time.Duration `mapstructure:"timeout,omitempty"` + PermitWithoutStream bool `mapstructure:"permit_without_stream,omitempty"` +} + +// GRPCClientSettings defines common settings for a gRPC client configuration. +type GRPCClientSettings struct { + // The target to which the exporter is going to send traces or metrics, + // using the gRPC protocol. The valid syntax is described at + // https://github.com/grpc/grpc/blob/master/doc/naming.md. + Endpoint string `mapstructure:"endpoint"` + + // The compression key for supported compression types within + // collector. Currently the only supported mode is `gzip`. + Compression string `mapstructure:"compression"` + + // TLSSetting struct exposes TLS client configuration. + TLSSetting configtls.TLSClientSetting `mapstructure:",squash"` + + // The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams + // (https://godoc.org/google.golang.org/grpc#WithKeepaliveParams). + Keepalive *KeepaliveClientConfig `mapstructure:"keepalive"` + + // ReadBufferSize for gRPC client. See grpc.WithReadBufferSize + // (https://godoc.org/google.golang.org/grpc#WithReadBufferSize). + ReadBufferSize int `mapstructure:"read_buffer_size"` + + // WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize + // (https://godoc.org/google.golang.org/grpc#WithWriteBufferSize). + WriteBufferSize int `mapstructure:"write_buffer_size"` + + // WaitForReady parameter configures client to wait for ready state before sending data. + // (https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md) + WaitForReady bool `mapstructure:"wait_for_ready"` + + // The headers associated with gRPC requests. + Headers map[string]string `mapstructure:"headers"` + + // PerRPCAuth parameter configures the client to send authentication data on a per-RPC basis. + PerRPCAuth *PerRPCAuthConfig `mapstructure:"per_rpc_auth"` + + // Sets the balancer in grpclb_policy to discover the servers. Default is pick_first + // https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md + BalancerName string `mapstructure:"balancer_name"` +} + +type KeepaliveServerConfig struct { + ServerParameters *KeepaliveServerParameters `mapstructure:"server_parameters,omitempty"` + EnforcementPolicy *KeepaliveEnforcementPolicy `mapstructure:"enforcement_policy,omitempty"` +} + +// PerRPCAuthConfig specifies how the Per-RPC authentication data should be obtained. +type PerRPCAuthConfig struct { + // AuthType represents the authentication type to use. Currently, only 'bearer' is supported. + AuthType string `mapstructure:"type,omitempty"` + + // BearerToken specifies the bearer token to use for every RPC. + BearerToken string `mapstructure:"bearer_token,omitempty"` +} + +// KeepaliveServerParameters allow configuration of the keepalive.ServerParameters. +// The same default values as keepalive.ServerParameters are applicable and get applied by the server. +// See https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters for details. +type KeepaliveServerParameters struct { + MaxConnectionIdle time.Duration `mapstructure:"max_connection_idle,omitempty"` + MaxConnectionAge time.Duration `mapstructure:"max_connection_age,omitempty"` + MaxConnectionAgeGrace time.Duration `mapstructure:"max_connection_age_grace,omitempty"` + Time time.Duration `mapstructure:"time,omitempty"` + Timeout time.Duration `mapstructure:"timeout,omitempty"` +} + +// KeepaliveEnforcementPolicy allow configuration of the keepalive.EnforcementPolicy. +// The same default values as keepalive.EnforcementPolicy are applicable and get applied by the server. +// See https://godoc.org/google.golang.org/grpc/keepalive#EnforcementPolicy for details. +type KeepaliveEnforcementPolicy struct { + MinTime time.Duration `mapstructure:"min_time,omitempty"` + PermitWithoutStream bool `mapstructure:"permit_without_stream,omitempty"` +} + +type GRPCServerSettings struct { + // Server net.Addr config. For transport only "tcp" and "unix" are valid options. + NetAddr confignet.NetAddr `mapstructure:",squash"` + + // Configures the protocol to use TLS. + // The default value is nil, which will cause the protocol to not use TLS. + TLSSetting *configtls.TLSServerSetting `mapstructure:"tls_settings,omitempty"` + + // MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server. + MaxRecvMsgSizeMiB uint64 `mapstructure:"max_recv_msg_size_mib"` + + // MaxConcurrentStreams sets the limit on the number of concurrent streams to each ServerTransport. + // It has effect only for streaming RPCs. + MaxConcurrentStreams uint32 `mapstructure:"max_concurrent_streams"` + + // ReadBufferSize for gRPC server. See grpc.ReadBufferSize + // (https://godoc.org/google.golang.org/grpc#ReadBufferSize). + ReadBufferSize int `mapstructure:"read_buffer_size"` + + // WriteBufferSize for gRPC server. See grpc.WriteBufferSize + // (https://godoc.org/google.golang.org/grpc#WriteBufferSize). + WriteBufferSize int `mapstructure:"write_buffer_size"` + + // Keepalive anchor for all the settings related to keepalive. + Keepalive *KeepaliveServerConfig `mapstructure:"keepalive,omitempty"` + + // Auth for this receiver + Auth *configauth.Authentication `mapstructure:"auth,omitempty"` +} + +// ToDialOptions maps configgrpc.GRPCClientSettings to a slice of dial options for gRPC +func (gcs *GRPCClientSettings) ToDialOptions() ([]grpc.DialOption, error) { + var opts []grpc.DialOption + if gcs.Compression != "" { + if compressionKey := GetGRPCCompressionKey(gcs.Compression); compressionKey != CompressionUnsupported { + opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(compressionKey))) + } else { + return nil, fmt.Errorf("unsupported compression type %q", gcs.Compression) + } + } + + tlsCfg, err := gcs.TLSSetting.LoadTLSConfig() + if err != nil { + return nil, err + } + tlsDialOption := grpc.WithInsecure() + if tlsCfg != nil { + tlsDialOption = grpc.WithTransportCredentials(credentials.NewTLS(tlsCfg)) + } + opts = append(opts, tlsDialOption) + + if gcs.ReadBufferSize > 0 { + opts = append(opts, grpc.WithReadBufferSize(gcs.ReadBufferSize)) + } + + if gcs.WriteBufferSize > 0 { + opts = append(opts, grpc.WithWriteBufferSize(gcs.WriteBufferSize)) + } + + if gcs.Keepalive != nil { + keepAliveOption := grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: gcs.Keepalive.Time, + Timeout: gcs.Keepalive.Timeout, + PermitWithoutStream: gcs.Keepalive.PermitWithoutStream, + }) + opts = append(opts, keepAliveOption) + } + + if gcs.PerRPCAuth != nil { + if strings.EqualFold(gcs.PerRPCAuth.AuthType, PerRPCAuthTypeBearer) { + sToken := gcs.PerRPCAuth.BearerToken + token := BearerToken(sToken) + opts = append(opts, grpc.WithPerRPCCredentials(token)) + } else { + return nil, fmt.Errorf("unsupported per-RPC auth type %q", gcs.PerRPCAuth.AuthType) + } + } + + if gcs.BalancerName != "" { + valid := validateBalancerName(gcs.BalancerName) + if !valid { + return nil, fmt.Errorf("invalid balancer_name: %s", gcs.BalancerName) + } + opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingPolicy":"%s"}`, gcs.BalancerName))) + } + + return opts, nil +} + +func validateBalancerName(balancerName string) bool { + for _, item := range allowedBalancerNames { + if item == balancerName { + return true + } + } + return false +} + +func (gss *GRPCServerSettings) ToListener() (net.Listener, error) { + return gss.NetAddr.Listen() +} + +// ToServerOption maps configgrpc.GRPCServerSettings to a slice of server options for gRPC +func (gss *GRPCServerSettings) ToServerOption() ([]grpc.ServerOption, error) { + var opts []grpc.ServerOption + + if gss.TLSSetting != nil { + tlsCfg, err := gss.TLSSetting.LoadTLSConfig() + if err != nil { + return nil, err + } + opts = append(opts, grpc.Creds(credentials.NewTLS(tlsCfg))) + } + + if gss.MaxRecvMsgSizeMiB > 0 { + opts = append(opts, grpc.MaxRecvMsgSize(int(gss.MaxRecvMsgSizeMiB*1024*1024))) + } + + if gss.MaxConcurrentStreams > 0 { + opts = append(opts, grpc.MaxConcurrentStreams(gss.MaxConcurrentStreams)) + } + + if gss.ReadBufferSize > 0 { + opts = append(opts, grpc.ReadBufferSize(gss.ReadBufferSize)) + } + + if gss.WriteBufferSize > 0 { + opts = append(opts, grpc.WriteBufferSize(gss.WriteBufferSize)) + } + + // The default values referenced in the GRPC docs are set within the server, so this code doesn't need + // to apply them over zero/nil values before passing these as grpc.ServerOptions. + // The following shows the server code for applying default grpc.ServerOptions. + // https://github.com/grpc/grpc-go/blob/120728e1f775e40a2a764341939b78d666b08260/internal/transport/http2_server.go#L184-L200 + if gss.Keepalive != nil { + if gss.Keepalive.ServerParameters != nil { + svrParams := gss.Keepalive.ServerParameters + opts = append(opts, grpc.KeepaliveParams(keepalive.ServerParameters{ + MaxConnectionIdle: svrParams.MaxConnectionIdle, + MaxConnectionAge: svrParams.MaxConnectionAge, + MaxConnectionAgeGrace: svrParams.MaxConnectionAgeGrace, + Time: svrParams.Time, + Timeout: svrParams.Timeout, + })) + } + // The default values referenced in the GRPC are set within the server, so this code doesn't need + // to apply them over zero/nil values before passing these as grpc.ServerOptions. + // The following shows the server code for applying default grpc.ServerOptions. + // https://github.com/grpc/grpc-go/blob/120728e1f775e40a2a764341939b78d666b08260/internal/transport/http2_server.go#L202-L205 + if gss.Keepalive.EnforcementPolicy != nil { + enfPol := gss.Keepalive.EnforcementPolicy + opts = append(opts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: enfPol.MinTime, + PermitWithoutStream: enfPol.PermitWithoutStream, + })) + } + } + + if gss.Auth != nil { + authOpts, err := gss.Auth.ToServerOptions() + if err != nil { + return nil, err + } + opts = append(opts, authOpts...) + } + + return opts, nil +} + +// GetGRPCCompressionKey returns the grpc registered compression key if the +// passed in compression key is supported, and CompressionUnsupported otherwise +func GetGRPCCompressionKey(compressionType string) string { + compressionKey := strings.ToLower(compressionType) + if encodingKey, ok := grpcCompressionKeyMap[compressionKey]; ok { + return encodingKey + } + return CompressionUnsupported +} diff --git a/internal/otel_collector/config/configgrpc/configgrpc_test.go b/internal/otel_collector/config/configgrpc/configgrpc_test.go new file mode 100644 index 00000000000..035176fd6a2 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/configgrpc_test.go @@ -0,0 +1,523 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configgrpc + +import ( + "context" + "path" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/config/configauth" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/config/configtls" + otelcol "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + "go.opentelemetry.io/collector/testutil" +) + +func TestDefaultGrpcClientSettings(t *testing.T) { + gcs := &GRPCClientSettings{ + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + opts, err := gcs.ToDialOptions() + assert.NoError(t, err) + assert.Len(t, opts, 1) +} + +func TestAllGrpcClientSettings(t *testing.T) { + gcs := &GRPCClientSettings{ + Headers: map[string]string{ + "test": "test", + }, + Endpoint: "localhost:1234", + Compression: "gzip", + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + Keepalive: &KeepaliveClientConfig{ + Time: time.Second, + Timeout: time.Second, + PermitWithoutStream: true, + }, + ReadBufferSize: 1024, + WriteBufferSize: 1024, + WaitForReady: true, + PerRPCAuth: nil, + BalancerName: "round_robin", + } + opts, err := gcs.ToDialOptions() + assert.NoError(t, err) + assert.Len(t, opts, 6) +} + +func TestDefaultGrpcServerSettings(t *testing.T) { + gss := &GRPCServerSettings{} + opts, err := gss.ToServerOption() + assert.NoError(t, err) + assert.Len(t, opts, 0) +} + +func TestAllGrpcServerSettingsExceptAuth(t *testing.T) { + gss := &GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "localhost:1234", + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{}, + ClientCAFile: "", + }, + MaxRecvMsgSizeMiB: 1, + MaxConcurrentStreams: 1024, + ReadBufferSize: 1024, + WriteBufferSize: 1024, + Keepalive: &KeepaliveServerConfig{ + ServerParameters: &KeepaliveServerParameters{ + MaxConnectionIdle: time.Second, + MaxConnectionAge: time.Second, + MaxConnectionAgeGrace: time.Second, + Time: time.Second, + Timeout: time.Second, + }, + EnforcementPolicy: &KeepaliveEnforcementPolicy{ + MinTime: time.Second, + PermitWithoutStream: true, + }, + }, + } + opts, err := gss.ToServerOption() + assert.NoError(t, err) + assert.Len(t, opts, 7) +} + +func TestGrpcServerAuthSettings(t *testing.T) { + gss := &GRPCServerSettings{} + + // sanity check + _, err := gss.ToServerOption() + require.NoError(t, err) + + // test + gss.Auth = &configauth.Authentication{ + OIDC: &configauth.OIDC{}, + } + opts, err := gss.ToServerOption() + + // verify + // an error here is a positive confirmation that Auth kicked in + assert.Error(t, err) + assert.Nil(t, opts) +} + +func TestGRPCClientSettingsError(t *testing.T) { + tests := []struct { + settings GRPCClientSettings + err string + }{ + { + err: "^failed to load TLS config: failed to load CA CertPool: failed to load CA /doesnt/exist:", + settings: GRPCClientSettings{ + Headers: nil, + Endpoint: "", + Compression: "", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/doesnt/exist", + }, + Insecure: false, + ServerName: "", + }, + Keepalive: nil, + }, + }, + { + err: "^failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither", + settings: GRPCClientSettings{ + Headers: nil, + Endpoint: "", + Compression: "", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "/doesnt/exist", + }, + Insecure: false, + ServerName: "", + }, + Keepalive: nil, + }, + }, + { + err: "invalid balancer_name: test", + settings: GRPCClientSettings{ + Headers: map[string]string{ + "test": "test", + }, + Endpoint: "localhost:1234", + Compression: "gzip", + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + Keepalive: &KeepaliveClientConfig{ + Time: time.Second, + Timeout: time.Second, + PermitWithoutStream: true, + }, + ReadBufferSize: 1024, + WriteBufferSize: 1024, + WaitForReady: true, + BalancerName: "test", + }, + }, + } + for _, test := range tests { + t.Run(test.err, func(t *testing.T) { + opts, err := test.settings.ToDialOptions() + assert.Nil(t, opts) + assert.Error(t, err) + assert.Regexp(t, test.err, err) + }) + } +} + +func TestUseSecure(t *testing.T) { + gcs := &GRPCClientSettings{ + Headers: nil, + Endpoint: "", + Compression: "", + TLSSetting: configtls.TLSClientSetting{}, + Keepalive: nil, + PerRPCAuth: nil, + } + dialOpts, err := gcs.ToDialOptions() + assert.NoError(t, err) + assert.Equal(t, len(dialOpts), 1) +} + +func TestGRPCServerSettingsError(t *testing.T) { + tests := []struct { + settings GRPCServerSettings + err string + }{ + { + err: "^failed to load TLS config: failed to load CA CertPool: failed to load CA /doesnt/exist:", + settings: GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "127.0.0.1:1234", + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/doesnt/exist", + }, + }, + }, + }, + { + err: "^failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither", + settings: GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "127.0.0.1:1234", + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "/doesnt/exist", + }, + }, + }, + }, + { + err: "^failed to load TLS config: failed to load client CA CertPool: failed to load CA /doesnt/exist:", + settings: GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "127.0.0.1:1234", + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + ClientCAFile: "/doesnt/exist", + }, + }, + }, + } + for _, test := range tests { + t.Run(test.err, func(t *testing.T) { + _, err := test.settings.ToServerOption() + assert.Regexp(t, test.err, err) + }) + } +} + +func TestGRPCServerSettings_ToListener_Error(t *testing.T) { + settings := GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "127.0.0.1:1234567", + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "/doesnt/exist", + }, + }, + Keepalive: nil, + } + _, err := settings.ToListener() + assert.Error(t, err) +} + +func TestGetGRPCCompressionKey(t *testing.T) { + if GetGRPCCompressionKey("gzip") != CompressionGzip { + t.Error("gzip is marked as supported but returned unsupported") + } + + if GetGRPCCompressionKey("Gzip") != CompressionGzip { + t.Error("Capitalization of CompressionGzip should not matter") + } + + if GetGRPCCompressionKey("badType") != CompressionUnsupported { + t.Error("badType is not supported but was returned as supported") + } +} + +func TestHttpReception(t *testing.T) { + tests := []struct { + name string + tlsServerCreds *configtls.TLSServerSetting + tlsClientCreds *configtls.TLSClientSetting + hasError bool + }{ + { + name: "noTLS", + tlsServerCreds: nil, + tlsClientCreds: &configtls.TLSClientSetting{ + Insecure: true, + }, + }, + { + name: "TLS", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + }, + ServerName: "localhost", + }, + }, + { + name: "NoServerCertificates", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + }, + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + }, + ServerName: "localhost", + }, + hasError: true, + }, + { + name: "mTLS", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + ClientCAFile: path.Join(".", "testdata", "ca.crt"), + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "client.crt"), + KeyFile: path.Join(".", "testdata", "client.key"), + }, + ServerName: "localhost", + }, + }, + { + name: "NoClientCertificate", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + ClientCAFile: path.Join(".", "testdata", "ca.crt"), + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + }, + ServerName: "localhost", + }, + hasError: true, + }, + { + name: "WrongClientCA", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + ClientCAFile: path.Join(".", "testdata", "server.crt"), + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "client.crt"), + KeyFile: path.Join(".", "testdata", "client.key"), + }, + ServerName: "localhost", + }, + hasError: true, + }, + } + // prepare + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gss := &GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "localhost:0", + Transport: "tcp", + }, + TLSSetting: tt.tlsServerCreds, + } + ln, err := gss.ToListener() + assert.NoError(t, err) + opts, err := gss.ToServerOption() + assert.NoError(t, err) + s := grpc.NewServer(opts...) + otelcol.RegisterTraceServiceServer(s, &grpcTraceServer{}) + + go func() { + _ = s.Serve(ln) + }() + + gcs := &GRPCClientSettings{ + Endpoint: ln.Addr().String(), + TLSSetting: *tt.tlsClientCreds, + } + clientOpts, errClient := gcs.ToDialOptions() + assert.NoError(t, errClient) + grpcClientConn, errDial := grpc.Dial(gcs.Endpoint, clientOpts...) + assert.NoError(t, errDial) + client := otelcol.NewTraceServiceClient(grpcClientConn) + ctx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Second) + resp, errResp := client.Export(ctx, &otelcol.ExportTraceServiceRequest{}, grpc.WaitForReady(true)) + if tt.hasError { + assert.Error(t, errResp) + } else { + assert.NoError(t, errResp) + assert.NotNil(t, resp) + } + cancelFunc() + s.Stop() + }) + } +} + +func TestReceiveOnUnixDomainSocket(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping test on windows") + } + socketName := testutil.TempSocketName(t) + gss := &GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: socketName, + Transport: "unix", + }, + } + ln, err := gss.ToListener() + assert.NoError(t, err) + opts, err := gss.ToServerOption() + assert.NoError(t, err) + s := grpc.NewServer(opts...) + otelcol.RegisterTraceServiceServer(s, &grpcTraceServer{}) + + go func() { + _ = s.Serve(ln) + }() + + gcs := &GRPCClientSettings{ + Endpoint: "unix://" + ln.Addr().String(), + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + clientOpts, errClient := gcs.ToDialOptions() + assert.NoError(t, errClient) + grpcClientConn, errDial := grpc.Dial(gcs.Endpoint, clientOpts...) + assert.NoError(t, errDial) + client := otelcol.NewTraceServiceClient(grpcClientConn) + ctx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Second) + resp, errResp := client.Export(ctx, &otelcol.ExportTraceServiceRequest{}, grpc.WaitForReady(true)) + assert.NoError(t, errResp) + assert.NotNil(t, resp) + cancelFunc() + s.Stop() +} + +type grpcTraceServer struct{} + +func (gts *grpcTraceServer) Export(context.Context, *otelcol.ExportTraceServiceRequest) (*otelcol.ExportTraceServiceResponse, error) { + return &otelcol.ExportTraceServiceResponse{}, nil +} + +func TestWithPerRPCAuthBearerToken(t *testing.T) { + // prepare + // test + gcs := &GRPCClientSettings{ + PerRPCAuth: &PerRPCAuthConfig{ + AuthType: "bearer", + BearerToken: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + }, + } + dialOpts, err := gcs.ToDialOptions() + + // verify + assert.NoError(t, err) + assert.Len(t, dialOpts, 2) // WithInsecure and WithPerRPCCredentials +} + +func TestWithPerRPCAuthInvalidAuthType(t *testing.T) { + // test + gcs := &GRPCClientSettings{ + PerRPCAuth: &PerRPCAuthConfig{ + AuthType: "non-existing", + }, + } + dialOpts, err := gcs.ToDialOptions() + + // verify + assert.Error(t, err) + assert.Nil(t, dialOpts) +} diff --git a/internal/otel_collector/config/configgrpc/gzip.go b/internal/otel_collector/config/configgrpc/gzip.go new file mode 100644 index 00000000000..68f9d98617e --- /dev/null +++ b/internal/otel_collector/config/configgrpc/gzip.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configgrpc + +import ( + // import the gzip package with auto-registers the gzip grpc compressor + _ "google.golang.org/grpc/encoding/gzip" +) diff --git a/internal/otel_collector/config/configgrpc/testdata/ca.crt b/internal/otel_collector/config/configgrpc/testdata/ca.crt new file mode 100644 index 00000000000..7a677e39a77 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNjCCAh4CCQCYMh590xiOGzANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJB +VTESMBAGA1UECAwJQXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoM +CU15T3JnTmFtZTEVMBMGA1UEAwwMTXlDb21tb25OYW1lMB4XDTIwMDkyMjA1MjIx +MFoXDTMwMDkyMDA1MjIxMFowXTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1c3Ry +YWxpYTEPMA0GA1UEBwwGU3lkbmV5MRIwEAYDVQQKDAlNeU9yZ05hbWUxFTATBgNV +BAMMDE15Q29tbW9uTmFtZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AJdLtbEbPVuBQqh2MLDuyYB/zg3dMfl1jyE64UgW5AVRHrfAGXgP55yeEh3XGiO+ +i5PYNeEILoYcLXtMstA24OTgxeLjTZ0zEaja50/Ow9/NjZcTc0f/DErHI3GvWTxW +dCdosGe4qSwi9BbRGPfAat5fJMSTERXDcAcH2aaD3ekK3WTqtXsFsErF7+SpzJfL +PZw4aSFS9a26PkxO+Z5coqdYRC1CIpZGVFRg/PVcb7NNTrRf+Wu/hOncNkHDXKKz +qeBkhnHczQrPDzxhG2FvrahMgGSsRgBDdMwTBfmBhlbP+sM0HSPuCmKD95/osO03 +sG13nWMSDb7QYETVyg3E4t8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAfLGe9cAN +1WH09KVYIWWzu74tkaOIRFdkXzcx6fMq4Gpi49/lxG1INCrJ/4F8UyhHq0mmSsxb +UGs3KFfDsRctX7PNgOLYHxlUcAhQFzT3xqrRg7iqaiGWKTSGE1fXg29LKm/Ox/MC +npumt7rsSix5Viyb0/njcSX8CdSCirhKCiJklfd5J/Cwxqm+j/Pgaz2YrOj8Axa1 +/GJtPOtIpPYEBbXXUMpuijSikcfurZJL62WWxrzUGZjRsmSJAl5bvTJTOKGQb634 +Y0oehROKnkA2N0UVa4LM2M5C+CVZNl8vKAsdj1pywRGEOQoH42wBNu71Wob1f7jt +JOXWGJcoyEjbSg== +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configgrpc/testdata/client.crt b/internal/otel_collector/config/configgrpc/testdata/client.crt new file mode 100644 index 00000000000..2fc037de49f --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/client.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdZMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjEwWhcNMzAwOTIwMDUyMjEwWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAyMraDxgfr7DEShaRbpKnKnRm4xrh9StCpTWVxViCS4JmACrlNsDBggan +Xz4rQSsV1Z2lznYPdbpXVVDY/8Q87GDXQLmB48cff+DLdU2TAvsalraty4edlf1Q +j6WNi/jFca9XIqqS358bmBau3SlEEJVv0StE8fDiZpHQuYADtdXxWhXGcrNC3quu +GKBtTCaj01EiZU5Rdqzd/KFEUQ5ns5K8j1vXJJzEhbmOXRN4NM0vvEBnd3ObP+Lw +pFUSkhxgYYLga8L5432bg/BA7OSLhZoEZzuMivyyNVC7sIoyLBYR0/Nk53ICmKz4 +gR18lTmpDXnmFZv7D1HXhwvFQ/xvbwIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQAM +K90gY676JVzErSogCDn3XCsEkSLyBa90LK2FMogU7ZF0x2/Y6qf2yBLqYAjp4Br1 +CXmmQAXyLGsTs1ahobgZNVEvdvhVxc6CHBw4aBbXaUfVGq26xauPu47wWHtxEeAx +h9huRcZZKtTsJBJ1N6Yg7mJbzFT0nQ0FGWuoWd9HQP7ncOlfmlBfuAGRKRn1lXXr +na0thmOFQskzyAByijuGuaFvr+v4IVHYqO3JPXNpwp2LNHvD/f0OOS2XWpsUX6Vn +2IDdMgZSNLrHDZpemtl1QSaHemG8s67LEvuG0/fsfV38pKPlhKV1xrkojNN3kvPq +IyU5uT3m01KkJAMtRrMT +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configgrpc/testdata/client.key b/internal/otel_collector/config/configgrpc/testdata/client.key new file mode 100644 index 00000000000..4c77070cc03 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAyMraDxgfr7DEShaRbpKnKnRm4xrh9StCpTWVxViCS4JmACrl +NsDBgganXz4rQSsV1Z2lznYPdbpXVVDY/8Q87GDXQLmB48cff+DLdU2TAvsalrat +y4edlf1Qj6WNi/jFca9XIqqS358bmBau3SlEEJVv0StE8fDiZpHQuYADtdXxWhXG +crNC3quuGKBtTCaj01EiZU5Rdqzd/KFEUQ5ns5K8j1vXJJzEhbmOXRN4NM0vvEBn +d3ObP+LwpFUSkhxgYYLga8L5432bg/BA7OSLhZoEZzuMivyyNVC7sIoyLBYR0/Nk +53ICmKz4gR18lTmpDXnmFZv7D1HXhwvFQ/xvbwIDAQABAoIBADHS1AUG0WYBENPp +gbDURxKry5Py6bqyP1lLUJyld79Q3gqQmkvZzKp9CC8D+Cu1izd0ZN40QWXPFTig +VRgyE4P8C62N2oMwt8o9d37l/uKweEqJjdqBDkNXlhPu2o6u7h9liNObS9KdYnV8 +u2s5gCA1VIesmvEF+sfEyuwcrc8ClHf4qs7VDqopZ6HZ3aT5ns4xXA5QoEZJlhDG +axwqWQ/jC4G+nGyrE2/AAGAgQtRhcs8aHTuEGBlNGlC9af/obyYLCqPm0A6ceyKz +PcZUDQCrsZnQpwqF7zsF7WmW8W5XqVHDFoJaNQt2/sp3OkOv9z78JodvB/MbGmNV +MkP1GeECgYEA9kbhLVsDDPA82wQuBsbK9u6A59ZPIXDfXJVNjcg1LKJkqJsKhY9z +uZ98rHlTI+FS5sCL/ixdM/tVNFI3EHaS7wOLJI9y2y+CVi2d5ffMKbPUtFJf5Q+A +zlJq1LseKdwsVT1jSah/jZ53YW1pOiJZPByUfLWIwLNHo0+fIEMfCTkCgYEA0LhC +sNb1W8GpMy6eTDfa4D90Wm0LvZgEyV8SCc09xdC6yp1bE4G19x037/YQsbLnE5vB +0YM8ILh977zCYHokC9qMdKDAxZx0RQD2IUDRbTymQ89uS5ednSg9dBxs9f/cxTlU +wQUxf4+yY/Rohyo0+mK4zkobG9lU1H83KKc1BecCgYEAkvQkdW3zWgsYJRBPbpe8 +kLAslypYOXoirhohFtM6d5HHQpyRILVCtqamPDyBEc3oK+0FG/vY+aWlZ/0PAnHe +p2ST6JL4VDX7LfU2XP0KBHBcIeVtdz9S+spPGPU2wH+yrIJe9prm0diXH7mrqpbI +bIgZSnkASwwvWRGvwA6NPHECgYBkD+JRG0zXp3lxgyj6y1BQb7tdWqflRgsNa1mf +f1jdDBtw5Y1zRZ0yEjzt+p64QleLzAFYaz0ZRrmBhJH/ZK8BS85IX4Trd/0506Ms +AAInB4uCOODctpwmatNDZhlKulZh6wFZ5B591CsmxlaSbkalv0xwAZELgd6sXSzZ +fYfrAwKBgQDM9StAiTdSjGn0Qk/YzkLlloEEebjJ7tRUpDGQgX3Z7YsCdfl/LeWU +yMV7UVDggPVjveT8TUJUm+ipD7CpesY1GTJovyRWKlyMpgAY2wKXV41oOnDD/0ef +AAa3FWMAf27ogbeXBxSUBN+1EhKBMKihQSD+Odnbu6SHUeiKskGU3Q== +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/config/configgrpc/testdata/server.crt b/internal/otel_collector/config/configgrpc/testdata/server.crt new file mode 100644 index 00000000000..70292eae048 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/server.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdYMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjEwWhcNMzAwOTIwMDUyMjEwWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA0HqHHTXVljLlyknGttODUu675uGGJJWdUCmjr+I9+3BAxGgNG4xVjMao +IsgAHajXgfTgfLha0mnMCdAHL4OUmvAj7sJ6NZRIK9DASiu5NWLQhgjUDg1DY2K/ +nhCVGp3w/J6aFfN+qSIaHCQz7MN66mcPtsXxMMPUBlqjWCLFpR5vL2M6k5tZI1L2 +pP4jfkfEIdlHgs/AXftwiYsNo57Rlaj+O7DwPqmJdVGeeE6Wka4ANK/5svIAgW9h +mwKhSwaXwle8GDMYgtbfQCrIO/Z5ctMKG9KXEgOBpoANYmeAGT2OPaCc43710T9P +MONdj3TKc8Y5FsUA0/kwac7oGFl+hQIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQCP +5as7sDX2vcXbROg62Weyg6YCDd12LYRWS2UHBebxwDFKVj1GmkJM2eN5bZWlyULI +Sv0yXSIQuMXIZDECdg0+YWxk6ZV57iYkduhez61wqhqYu9H1h5jlOvdlevunfNZ3 +VlpIkE2vVRIpu+IiNRSkh08M5csAe7MsrgdUcgenjygwNM3wPaQtlQ7tZ+quWyYc +rHO2lByVexHwpN2ZPiMZ7eIyEs9W2kt6ohcr8jJdryfO+7Q2FR5vE8K1Uh1wNcFh +WLPMIl4InYmIFfUChHvHCEmLS0TLW4lD9srFmO7VrlrPqUOULzUIm5wuXWgvdxw9 +3XHsXLqvMOf79boGpkfv +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configgrpc/testdata/server.key b/internal/otel_collector/config/configgrpc/testdata/server.key new file mode 100644 index 00000000000..98fba4b9432 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA0HqHHTXVljLlyknGttODUu675uGGJJWdUCmjr+I9+3BAxGgN +G4xVjMaoIsgAHajXgfTgfLha0mnMCdAHL4OUmvAj7sJ6NZRIK9DASiu5NWLQhgjU +Dg1DY2K/nhCVGp3w/J6aFfN+qSIaHCQz7MN66mcPtsXxMMPUBlqjWCLFpR5vL2M6 +k5tZI1L2pP4jfkfEIdlHgs/AXftwiYsNo57Rlaj+O7DwPqmJdVGeeE6Wka4ANK/5 +svIAgW9hmwKhSwaXwle8GDMYgtbfQCrIO/Z5ctMKG9KXEgOBpoANYmeAGT2OPaCc +43710T9PMONdj3TKc8Y5FsUA0/kwac7oGFl+hQIDAQABAoIBAFTJD/wcMcIE7xlG +yc7+1FC9EKQEIgbs5e59ELnuG+EPNPfrjTEf8IbxH94NUqa9TO/oRAfU/fLG3hk7 +hkCXla8xbJukcgkqRfOz0RAZGhiRGFb6bitMz5Qyy9Ufz1Pk2eYTJn046tEkMlQx +kQCAO5Pq2CQv+jgn3Cm9YOLuOU0+CEpET2lNgdUbj7wo0k2jDbxuU/CGqrQua8uH +hwM2hBH3eZJzO7EwdBhdubImg9RsDrLUkltgdVMAROP5+m03+J653v4UbaAvG4jM +IxkVW11Wdh4caKJNQY5gnNhnNG79uDeikX/dLTnSnvaARQWay/XKhP9EyKp3zVVk +4S4GEyECgYEA8TjZiGLSbgERoigUpgMxECyUAi/+GffiAR4PARdaoCXLGlXYBAax +N8n+CF62VS7FY0IYeGuzDG9O/rEGS27OBVM2j+cCkqOT9+vNJe5iwGfAzwuBAuCA +m5eRysLG4jIwhw2XRCL2gGQM92XodKkShAhXuG05nUqcUdpgdpBdJK0CgYEA3UAn +YhbXvNKUHcpjvyyQpLrJHS+Z+key1e8FWJ8TzQDWldbgCeJZrm9xCNARKZEXFxNG +V3MJWehKl2mC8ctU6u1aTi83bA7MdVvDw57SGj0HMLa4CXtdWEOt57Z6HzFJLoQy +aAxvKwbeBfyRbt7f5HaHw/w3VjZN9HA7ip7EJDkCgYAFlWhLpOX0D+hFlaHsudQv +6KhAaLX8CeXcWsLEJrM9U8KgyG3oofMGNJHBxdd4n02IX6ZLW0rYtdbhRF2970Gr +k+KGcDV6CXlKWtXz09HLXFt1L3H8DBBOCbMhO2L5J2pCJgljVV/ZVveJ3n0D/knk +boEBTt3viyOVLXXgKLVPPQKBgEFtGzhScPGRg+NbWivKTeuooJhU3z+3vBavW/Fc ++UoCGXKt3AqQONzwb4ifnrOgCCf2tzJc/kLsAkLMHMDL1Ay0q6O7KrR1m9iIjldm +u9KugVXScpG7PVtAiEihGXPn6zAqP42tP6KFoVo72fXjSmoQ8wztpJ+F53+FQNY5 +JN9hAoGBALV/knUA5tVyMofCoZflHVM3E4pFJiKQII1dzxE4g/sLYtSWDfKXP64W +mc7PKy46vjeTeRE0B8sGQ7RIhnUpPaA6OS8sMPSeJtdvfAPK0KCukHXTsLJg5DZo +XuC2gsdqPFQJQ/VDnp3JO7rbj7A3uYgzRT5xKHMluJnDDuUg1UUr +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/config/confighttp/README.md b/internal/otel_collector/config/confighttp/README.md new file mode 100644 index 00000000000..5449b679333 --- /dev/null +++ b/internal/otel_collector/config/confighttp/README.md @@ -0,0 +1,56 @@ +# HTTP Configuration Settings + +HTTP exposes a [variety of settings](https://golang.org/pkg/net/http/). +Several of these settings are available for configuration within individual +receivers or exporters. + +## Client Configuration + +[Exporters](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/README.md) +leverage client configuration. + +Note that client configuration supports TLS configuration, however +configuration parameters are not defined under `tls_settings` like server +configuration. For more information, see [configtls +README](../configtls/README.md). + +- `endpoint`: address:port +- `headers`: name/value pairs added to the HTTP request headers +- [`read_buffer_size`](https://golang.org/pkg/net/http/#Transport) +- [`timeout`](https://golang.org/pkg/net/http/#Client) +- [`write_buffer_size`](https://golang.org/pkg/net/http/#Transport) + +Example: + +```yaml +exporter: + otlp: + endpoint: otelcol2:55690 + headers: + test1: "value1" + "test 2": "value 2" +``` + +## Server Configuration + +[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/master/receiver/README.md) +leverage server configuration. + +- [`cors_allowed_origins`](https://github.com/rs/cors): An empty list means + that CORS is not enabled at all. A wildcard can be used to match any origin + or one or more characters of an origin. +- `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) +- [`tls_settings`](../configtls/README.md) + +Example: + +```yaml +receivers: + otlp: + cors_allowed_origins: + - https://foo.bar.com + - https://*.test.com + endpoint: 0.0.0.0:55690 + protocols: + http: +``` diff --git a/internal/otel_collector/config/confighttp/confighttp.go b/internal/otel_collector/config/confighttp/confighttp.go new file mode 100644 index 00000000000..56936c74da8 --- /dev/null +++ b/internal/otel_collector/config/confighttp/confighttp.go @@ -0,0 +1,168 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confighttp + +import ( + "crypto/tls" + "net" + "net/http" + "time" + + "github.com/rs/cors" + + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/internal/middleware" +) + +type HTTPClientSettings struct { + // The target URL to send data to (e.g.: http://some.url:9411/v1/traces). + Endpoint string `mapstructure:"endpoint"` + + // TLSSetting struct exposes TLS client configuration. + TLSSetting configtls.TLSClientSetting `mapstructure:",squash"` + + // ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize. + ReadBufferSize int `mapstructure:"read_buffer_size"` + + // WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize. + WriteBufferSize int `mapstructure:"write_buffer_size"` + + // Timeout parameter configures `http.Client.Timeout`. + Timeout time.Duration `mapstructure:"timeout,omitempty"` + + // Additional headers attached to each HTTP request sent by the client. + // Existing header values are overwritten if collision happens. + Headers map[string]string `mapstructure:"headers,omitempty"` + + // Custom Round Tripper to allow for individual components to intercept HTTP requests + CustomRoundTripper func(next http.RoundTripper) (http.RoundTripper, error) +} + +func (hcs *HTTPClientSettings) ToClient() (*http.Client, error) { + tlsCfg, err := hcs.TLSSetting.LoadTLSConfig() + if err != nil { + return nil, err + } + transport := http.DefaultTransport.(*http.Transport).Clone() + if tlsCfg != nil { + transport.TLSClientConfig = tlsCfg + } + if hcs.ReadBufferSize > 0 { + transport.ReadBufferSize = hcs.ReadBufferSize + } + if hcs.WriteBufferSize > 0 { + transport.WriteBufferSize = hcs.WriteBufferSize + } + + clientTransport := (http.RoundTripper)(transport) + if len(hcs.Headers) > 0 { + clientTransport = &headerRoundTripper{ + transport: transport, + headers: hcs.Headers, + } + } + + if hcs.CustomRoundTripper != nil { + clientTransport, err = hcs.CustomRoundTripper(clientTransport) + if err != nil { + return nil, err + } + } + + return &http.Client{ + Transport: clientTransport, + Timeout: hcs.Timeout, + }, nil +} + +// Custom RoundTripper that add headers +type headerRoundTripper struct { + transport http.RoundTripper + headers map[string]string +} + +// Custom RoundTrip that add headers +func (interceptor *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + for k, v := range interceptor.headers { + req.Header.Set(k, v) + } + // Send the request to next transport. + return interceptor.transport.RoundTrip(req) +} + +type HTTPServerSettings struct { + // Endpoint configures the listening address for the server. + Endpoint string `mapstructure:"endpoint"` + + // TLSSetting struct exposes TLS client configuration. + TLSSetting *configtls.TLSServerSetting `mapstructure:"tls_settings, omitempty"` + + // CorsOrigins are the allowed CORS origins for HTTP/JSON requests to grpc-gateway adapter + // for the OTLP receiver. See github.com/rs/cors + // An empty list means that CORS is not enabled at all. A wildcard (*) can be + // used to match any origin or one or more characters of an origin. + CorsOrigins []string `mapstructure:"cors_allowed_origins"` +} + +func (hss *HTTPServerSettings) ToListener() (net.Listener, error) { + listener, err := net.Listen("tcp", hss.Endpoint) + if err != nil { + return nil, err + } + + if hss.TLSSetting != nil { + var tlsCfg *tls.Config + tlsCfg, err = hss.TLSSetting.LoadTLSConfig() + if err != nil { + return nil, err + } + listener = tls.NewListener(listener, tlsCfg) + } + return listener, nil +} + +// toServerOptions has options that change the behavior of the HTTP server +// returned by HTTPServerSettings.ToServer(). +type toServerOptions struct { + errorHandler middleware.ErrorHandler +} + +type ToServerOption func(opts *toServerOptions) + +// WithErrorHandler overrides the HTTP error handler that gets invoked +// when there is a failure inside middleware.HTTPContentDecompressor. +func WithErrorHandler(e middleware.ErrorHandler) ToServerOption { + return func(opts *toServerOptions) { + opts.errorHandler = e + } +} + +func (hss *HTTPServerSettings) ToServer(handler http.Handler, opts ...ToServerOption) *http.Server { + serverOpts := &toServerOptions{} + for _, o := range opts { + o(serverOpts) + } + if len(hss.CorsOrigins) > 0 { + co := cors.Options{AllowedOrigins: hss.CorsOrigins} + handler = cors.New(co).Handler(handler) + } + handler = middleware.HTTPContentDecompressor( + handler, + middleware.WithErrorHandler(serverOpts.errorHandler), + ) + return &http.Server{ + Handler: handler, + } +} diff --git a/internal/otel_collector/config/confighttp/confighttp_test.go b/internal/otel_collector/config/confighttp/confighttp_test.go new file mode 100644 index 00000000000..66d98e4925a --- /dev/null +++ b/internal/otel_collector/config/confighttp/confighttp_test.go @@ -0,0 +1,427 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confighttp + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/config/configtls" +) + +func TestAllHTTPClientSettings(t *testing.T) { + tests := []struct { + name string + settings HTTPClientSettings + shouldError bool + }{ + { + name: "all_valid_settings", + settings: HTTPClientSettings{ + Endpoint: "localhost:1234", + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + ReadBufferSize: 1024, + WriteBufferSize: 512, + CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return next, nil }, + }, + shouldError: false, + }, + { + name: "error_round_tripper_returned", + settings: HTTPClientSettings{ + Endpoint: "localhost:1234", + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + ReadBufferSize: 1024, + WriteBufferSize: 512, + CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { return nil, errors.New("error") }, + }, + shouldError: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client, err := test.settings.ToClient() + if test.shouldError { + assert.Error(t, err) + return + } + assert.NoError(t, err) + transport := client.Transport.(*http.Transport) + assert.EqualValues(t, 1024, transport.ReadBufferSize) + assert.EqualValues(t, 512, transport.WriteBufferSize) + }) + } +} + +func TestHTTPClientSettingsError(t *testing.T) { + tests := []struct { + settings HTTPClientSettings + err string + }{ + { + err: "^failed to load TLS config: failed to load CA CertPool: failed to load CA /doesnt/exist:", + settings: HTTPClientSettings{ + Endpoint: "", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/doesnt/exist", + }, + Insecure: false, + ServerName: "", + }, + }, + }, + { + err: "^failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither", + settings: HTTPClientSettings{ + Endpoint: "", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "/doesnt/exist", + }, + Insecure: false, + ServerName: "", + }, + }, + }, + } + for _, test := range tests { + t.Run(test.err, func(t *testing.T) { + _, err := test.settings.ToClient() + assert.Regexp(t, test.err, err) + }) + } +} + +func TestHTTPServerSettingsError(t *testing.T) { + tests := []struct { + settings HTTPServerSettings + err string + }{ + { + err: "^failed to load TLS config: failed to load CA CertPool: failed to load CA /doesnt/exist:", + settings: HTTPServerSettings{ + Endpoint: "", + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/doesnt/exist", + }, + }, + }, + }, + { + err: "^failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither", + settings: HTTPServerSettings{ + Endpoint: "", + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "/doesnt/exist", + }, + }, + }, + }, + { + err: "^failed to load TLS config: failed to load client CA CertPool: failed to load CA /doesnt/exist:", + settings: HTTPServerSettings{ + Endpoint: "", + TLSSetting: &configtls.TLSServerSetting{ + ClientCAFile: "/doesnt/exist", + }, + }, + }, + } + for _, test := range tests { + t.Run(test.err, func(t *testing.T) { + _, err := test.settings.ToListener() + assert.Regexp(t, test.err, err) + }) + } +} + +func TestHttpReception(t *testing.T) { + tests := []struct { + name string + tlsServerCreds *configtls.TLSServerSetting + tlsClientCreds *configtls.TLSClientSetting + hasError bool + }{ + { + name: "noTLS", + tlsServerCreds: nil, + tlsClientCreds: &configtls.TLSClientSetting{ + Insecure: true, + }, + }, + { + name: "TLS", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + }, + ServerName: "localhost", + }, + }, + { + name: "NoServerCertificates", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + }, + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + }, + ServerName: "localhost", + }, + hasError: true, + }, + { + name: "mTLS", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + ClientCAFile: path.Join(".", "testdata", "ca.crt"), + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "client.crt"), + KeyFile: path.Join(".", "testdata", "client.key"), + }, + ServerName: "localhost", + }, + }, + { + name: "NoClientCertificate", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + ClientCAFile: path.Join(".", "testdata", "ca.crt"), + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + }, + ServerName: "localhost", + }, + hasError: true, + }, + { + name: "WrongClientCA", + tlsServerCreds: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + ClientCAFile: path.Join(".", "testdata", "server.crt"), + }, + tlsClientCreds: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: path.Join(".", "testdata", "ca.crt"), + CertFile: path.Join(".", "testdata", "client.crt"), + KeyFile: path.Join(".", "testdata", "client.key"), + }, + ServerName: "localhost", + }, + hasError: true, + }, + } + // prepare + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hss := &HTTPServerSettings{ + Endpoint: "localhost:0", + TLSSetting: tt.tlsServerCreds, + } + ln, err := hss.ToListener() + assert.NoError(t, err) + s := hss.ToServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, errWrite := fmt.Fprint(w, "test") + assert.NoError(t, errWrite) + })) + + go func() { + _ = s.Serve(ln) + }() + + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + prefix := "https://" + if tt.tlsClientCreds.Insecure { + prefix = "http://" + } + + hcs := &HTTPClientSettings{ + Endpoint: prefix + ln.Addr().String(), + TLSSetting: *tt.tlsClientCreds, + } + client, errClient := hcs.ToClient() + assert.NoError(t, errClient) + resp, errResp := client.Get(hcs.Endpoint) + if tt.hasError { + assert.Error(t, errResp) + } else { + assert.NoError(t, errResp) + body, errRead := ioutil.ReadAll(resp.Body) + assert.NoError(t, errRead) + assert.Equal(t, "test", string(body)) + } + require.NoError(t, s.Close()) + }) + } +} + +func TestHttpCors(t *testing.T) { + hss := &HTTPServerSettings{ + Endpoint: "localhost:0", + CorsOrigins: []string{"allowed-*.com"}, + } + + ln, err := hss.ToListener() + assert.NoError(t, err) + s := hss.ToServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + go func() { + _ = s.Serve(ln) + }() + + // TODO: make starting server deterministic + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + url := fmt.Sprintf("http://%s", ln.Addr().String()) + + // Verify allowed domain gets responses that allow CORS. + verifyCorsResp(t, url, "allowed-origin.com", 200, true) + + // Verify disallowed domain gets responses that disallow CORS. + verifyCorsResp(t, url, "disallowed-origin.com", 200, false) + + require.NoError(t, s.Close()) +} + +func verifyCorsResp(t *testing.T, url string, origin string, wantStatus int, wantAllowed bool) { + req, err := http.NewRequest("OPTIONS", url, nil) + require.NoError(t, err, "Error creating trace OPTIONS request: %v", err) + req.Header.Set("Origin", origin) + req.Header.Set("Access-Control-Request-Method", "POST") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err, "Error sending OPTIONS to http server: %v", err) + + err = resp.Body.Close() + if err != nil { + t.Errorf("Error closing OPTIONS response body, %v", err) + } + + assert.Equal(t, wantStatus, resp.StatusCode) + + gotAllowOrigin := resp.Header.Get("Access-Control-Allow-Origin") + gotAllowMethods := resp.Header.Get("Access-Control-Allow-Methods") + + wantAllowOrigin := "" + wantAllowMethods := "" + if wantAllowed { + wantAllowOrigin = origin + wantAllowMethods = "POST" + } + assert.Equal(t, wantAllowOrigin, gotAllowOrigin) + assert.Equal(t, wantAllowMethods, gotAllowMethods) +} + +func ExampleHTTPServerSettings() { + settings := HTTPServerSettings{ + Endpoint: ":443", + } + s := settings.ToServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {})) + l, err := settings.ToListener() + if err != nil { + panic(err) + } + if err = s.Serve(l); err != nil { + panic(err) + } +} + +func TestHttpHeaders(t *testing.T) { + tests := []struct { + name string + headers map[string]string + }{ + { + "with_headers", + map[string]string{ + "header1": "value1", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for k, v := range tt.headers { + assert.Equal(t, r.Header.Get(k), v) + } + w.WriteHeader(200) + })) + defer server.Close() + serverURL, _ := url.Parse(server.URL) + setting := HTTPClientSettings{ + Endpoint: serverURL.String(), + TLSSetting: configtls.TLSClientSetting{}, + ReadBufferSize: 0, + WriteBufferSize: 0, + Timeout: 0, + Headers: map[string]string{ + "header1": "value1", + }, + } + client, _ := setting.ToClient() + req, err := http.NewRequest("GET", setting.Endpoint, nil) + assert.NoError(t, err) + client.Do(req) + }) + } +} diff --git a/internal/otel_collector/config/confighttp/testdata/ca.crt b/internal/otel_collector/config/confighttp/testdata/ca.crt new file mode 100644 index 00000000000..9ab794b7ffc --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNjCCAh4CCQDgJLdDKyhMRTANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJB +VTESMBAGA1UECAwJQXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoM +CU15T3JnTmFtZTEVMBMGA1UEAwwMTXlDb21tb25OYW1lMB4XDTIwMDkyMjA1MjIx +MFoXDTMwMDkyMDA1MjIxMFowXTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1c3Ry +YWxpYTEPMA0GA1UEBwwGU3lkbmV5MRIwEAYDVQQKDAlNeU9yZ05hbWUxFTATBgNV +BAMMDE15Q29tbW9uTmFtZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AMKJc2wz8eLzAPonO37JahdY1Rt1dkRzQuung2Fe5O8UnbtEnDc7N7fbRLBgSRl0 +F5+V2USHCtYfAJ0tLifmInLOfEgmxIB2HNnwVLwDAnyXzp6NQEVw51bsILMTuFfB +mgp8Jq8KokGGOOh6GmM9h0a3KVdpxqPD+088t8AAwZrO5dHNIxZ4Bq471Stvcm7Z +jAWAoRsjceVdGr82+iB9wTio/FIeygb5rO5Ju1GMisR1LgJ6apDv9FrtWdorRxnb +qFMXdPvMyM34oIRT6bxETSIYYHjozUz1/H0GB4NeGUbov0etnviTl+oMpRj0vZpT +DB8SD1XjHGOpbUZ6ibgUrWMCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEApBKbrk4g +Bd9/T1P3T3BBWOn8iMYLapBeapP6kVW9VtrpSvwKv6dchhh2Iizz5PnEEKBnU7ho ++GQXrKM0L/ejQeDqEo0lkZLJmovGNzXTNBGhgcVJ37qt0Bt58SoCA2Oc8dn4gtyR +eGi2lSVDUc+kiZWm9lUfTcwQeqTb8oS64DwJR8f11uX3NJn9N7fbwino60D5U7Na +ojO9ua4W3K5C8cuNEjssyE6qjSQ4lhXBlHxA9viSdQSQN0Lv/AH1s175jQ7G24jM +58v5DC7P0oodiOdr9Z0hndK8c1mgB2fTTme+h9iDYVttbMHoARYCWSy02/ZzHRah +tAEubJUHnzv5vA== +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/confighttp/testdata/client.crt b/internal/otel_collector/config/confighttp/testdata/client.crt new file mode 100644 index 00000000000..ee5dab3c561 --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/client.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdbMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjEwWhcNMzAwOTIwMDUyMjEwWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA0TJ19WUFfGqdkus7kOqFQkR570sncfM0oLWK3Jzrqf0LO18BKv3LKQ/N +vVVkKa4IXkGo+oopzb/I5GQuCkp0LFz+lIfPioiEF1DQ+MJAHBEvNerwSgezlYbh +2cg/NS+f7CTe98AaEIiA+UtXDCWq2mttBLSckkvDzFpB++WL5HjonUyzE03ijAli +CJvMZmVFY7Q/uP00S0tyvgskHkeQXxVQ7rBlg43OYKRs0lXyEOYypv+2i7vxb2NM +rZciZa9wNxdWHPukeMsY2HEpZPEAochE3zppfomjc2T+B2F3uBkB0YcK0K4ugO2+ +KuzpIhoQpmdFwXjmnLjaYUZ7s+XUrwIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQB6 +II9PnbwfjQPn5vx6vEnoe6HQV0V+xnh5zfD8DJ6hM42HbGSBAqD64z6odkx7jKdj +B0tdxgx9eN0/tp15ss3h5BRksVMf1k4fFG0MY/jS5GDX4V8G3e/4SbrkNjXdA2UR +i0QMB2nyPObkpCVIRDwtdv0E416Rpm1GDtcjjuyBRAfODkj/LZ3nmwzEtXwo2XG3 +hthyC/4x6LmK0g4siA0ru8vtwUHh7d7A7rcZDPGajA+B9ByBQT3GzCND8NVqbyiq +G/XpRVQ4XmE2Vdg05hDVpHzgmyii6eIrDnQd4XrHBWLV6JuUMGu1goQDTxKlyt0p +gPm/gT00VmSUUh4QLX91 +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/confighttp/testdata/client.key b/internal/otel_collector/config/confighttp/testdata/client.key new file mode 100644 index 00000000000..e5876e89835 --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA0TJ19WUFfGqdkus7kOqFQkR570sncfM0oLWK3Jzrqf0LO18B +Kv3LKQ/NvVVkKa4IXkGo+oopzb/I5GQuCkp0LFz+lIfPioiEF1DQ+MJAHBEvNerw +SgezlYbh2cg/NS+f7CTe98AaEIiA+UtXDCWq2mttBLSckkvDzFpB++WL5HjonUyz +E03ijAliCJvMZmVFY7Q/uP00S0tyvgskHkeQXxVQ7rBlg43OYKRs0lXyEOYypv+2 +i7vxb2NMrZciZa9wNxdWHPukeMsY2HEpZPEAochE3zppfomjc2T+B2F3uBkB0YcK +0K4ugO2+KuzpIhoQpmdFwXjmnLjaYUZ7s+XUrwIDAQABAoIBAEH+CxwIbDydXWv1 +bOsAMF2BQH3uVVkrAZUY7988WVNckeh+xd2MBkTDyYFKqLhFQDqLuAShBSL0tyjl +OWjhp9g+1ciBN0VaX2EDi4iNrq+r9BqsLHUODObEkAalltruVSKnVvcM0Kwag6Ug +0Srxzv3sGY38c8/quq+CYYJXHVRLBX8vQ70rwp+BKEipce18/kVxywaYGYqjzwcA +iiG6QJlyl7BrOUK8KPpyvC0OhnDpDdCO875MuBLzvulFcqvGbGNHcgRqAEk1xH5b +SQoiZBaZGWK1Ns7+bwCtcVBhvxsWqIffIvXAG74DQqpIdgmR1hVLx1e4HxVBHpDQ +Z096yVECgYEA+9M2xKbzyUJCc4MfOjRG0V1lzECrBt0sv6GMY2o8PFj1+MRONmHV +G556oxeK1NT9r6KRxK8NKSxkR775HDgSFd3VdFLpmCDQu/z/PSWoSo+0jmToOX9t +eykF4MCLhU8ck2AiDne4MB7MNKqPesbGsmK2IwPkHLGQ8Sz0367AqFMCgYEA1KpT +tafR5D/yq4iC51o6PjQ4gMn7vpiGvkU9VVEzZQRGaP5W3ssTEh9b58wlMTOxQE3Z +cpoVNRXAg1jOkKa0NZm5SOOz1PpdNINIbGpVVrx/cUkhKHDEj+uDt72fS8cyU14n +52jlh+3LpG1UyLNX7eod/xv+Wo5oLe3fvJAzprUCgYEA5PtBqb9FnZOqaO6pznsK +igWrMvb6jNtAfV+gECXhb95Ui0e09q4u4VZRnUsi6jRiGPpyIa4rAW1kIfj8+zPg +/hEgrw1VawcrxkResnMze9kADRqkLuQ34O2EcsGiHC27hia70Pv7d4YJmToeDT4C +HuKzS1OWcKDlcue2Ik780BECgYAVwsACDIQLqQd5yeQrLC5dgxZtBz39SLow6gDW +pBJwObnCsJPPBFSVPCQ5WchMeo+eltizQ1T8M5eZWRL59jTmby5oaPRTzLKQ1wYo +IdFNqMgZnXQJIVDbsSuvN3X/WQirQy0uHqut9wUpdA6C4ucSbyxWmFS0i3HZkUed +kdvXKQKBgGpQ7jpDzNh3zXO7AU4m4FlAN+N+dVXCP0C8DwcyRM69Sioyybh5lVww +QfQoSs3/m6/XZ5r6l9OvM6oUGfovk9Ja7NN8aqXQ4lwrET4SblRroKXQqzvp0aJ2 +XwHVfW5N9DcAQtQQzyWHxcKLyNOZLYQmBRsKuw4wu2I/rn8gWsy5 +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/config/confighttp/testdata/server.crt b/internal/otel_collector/config/confighttp/testdata/server.crt new file mode 100644 index 00000000000..b1346ea87b4 --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/server.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdaMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjEwWhcNMzAwOTIwMDUyMjEwWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAzIHy8CASiF6aI3CsI6RGlJBZExPk/Utvlp89ga42g+e1YxZUZtMm79A4 +uVOXnNsRvFtgiRA8xHrdcNcgCDBhBA7p5vQC/KgJymM6cdiNTStQbhvl7qpgyU8d +PYQNqKaaHo5ceW/AQM2z5XZRnak2HhI7VhO4QLOfp7CB0XvpFGG2lWpZ/xEHGIit +PcUQUmiROPremupF7mB04HQVH3TxWTtmHwvfWICbjO6gMfIT3me/4HrECA/WX2hj +ffP1HPfPz3ZU8UMWmodQif2/aX7auh1CfqpJbVVYMCtMr7WCmKXiYkrMK6osaoku +eCgM+ouNf1rXnzxdX6ApwZXrx9t/3QIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBi +K8J0Vy3/CHbYcQTg2UK0OCo08auuUUy3j9nSM2KfeeIKdM0DlRgLyIH63VwZNWFI +0tk27MONgsnXPFuEfUg4QG0QXx+rny10ckzI74ff1tOvu3LFkKRYafz3a1LfWW2Q +WDnta3lC+OsSKMEze1Bd4mvHZoiqTvkLbpmAudoWF7n+VSNXoOjizoMissqxy8iD +uZ6ChBWvJ1V+MtttXP0D7rSJuB0bkVwtcyEMNkUh7GrZVl61EcMQfjg5Vwsntdrv +cIIubS1F8uzT7ABLOGhiYm6gr3HPHsQp/t8sXNWIbjTmoueYBK215rvY3FQrzvAW +hNltkVRow5h+FyA/WVDN +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/confighttp/testdata/server.key b/internal/otel_collector/config/confighttp/testdata/server.key new file mode 100644 index 00000000000..cef03655b40 --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAzIHy8CASiF6aI3CsI6RGlJBZExPk/Utvlp89ga42g+e1YxZU +ZtMm79A4uVOXnNsRvFtgiRA8xHrdcNcgCDBhBA7p5vQC/KgJymM6cdiNTStQbhvl +7qpgyU8dPYQNqKaaHo5ceW/AQM2z5XZRnak2HhI7VhO4QLOfp7CB0XvpFGG2lWpZ +/xEHGIitPcUQUmiROPremupF7mB04HQVH3TxWTtmHwvfWICbjO6gMfIT3me/4HrE +CA/WX2hjffP1HPfPz3ZU8UMWmodQif2/aX7auh1CfqpJbVVYMCtMr7WCmKXiYkrM +K6osaokueCgM+ouNf1rXnzxdX6ApwZXrx9t/3QIDAQABAoIBABRe3VQN3cq3oaLm +Fj92nZEuz7CWyrhwSy01r2q7b7Kz4d182+tiHP7GPuA282MsbxfUAkmk1Gi91FDp +HMe0CfXdhm7631FLa649NBUi/PAy4FAXd0/OqNVkjAUUokeqUK+6fnuaJgxOcRzq +LDcII9va9Q4d6LyJJ94MNuIm9ZCR/Yg3S3X6TnW+fh6CWw0NL0MV9/7JLPLUZglT +UsFayjNUUxXqrL1OuQ6yyEEVxPtu0rBD9n6s3LGf7iWrmltRaPOkq6feaU741PMV +uF7YUB5oNOVSJNWDFg9cxxJfpO+5I05YA0oiahrrd1jLu+j/1LdKvDSXBy2bLnIu +m3VbigECgYEA5qZdBj/id/HMCoX/Pw/jXzv0OEkOIULc1Jh1il6GUavCYjwPI0KE +tzJUjYfEa7ToymZtcrRYg4qoG7diWHndW4J7hmxj17b+PlwCsoU/TMkxLgw9mmc0 +Qp6fn8VOdGZ4ysTGn80Pn9zRDApy5f29b070cIHjFBXZnREuE0o8hOsCgYEA4vwK +C7JoHFNnxzpDj2aW6JDmNxMRpNOeQkC5rRR6uDjTHdaGq3WI0aGqvc6l47kIcb9w +MJiapHWCzJNc56jqmb/lgDku4sGRs5g6meOYENCYf9aKZzG9fkG/gGZf3eg2Yp2z +KwfKsk4g1HUdwIcC6dTQTgsGoPMYReP44R6Z/FcCgYBeb4Us9uExvPWO5XgxiL7O +kkyW8wpvAeJKxTVy9urF665F7FNCW4zdOSU3YXxBoSujGzb6vO50xUO5PWdt1E+W +lSEgU6a5frowLBoKn9XgCYwyT161pkXWdP3kO7O4ovAYDWNJsHsSOCX7aRfMJQz3 +0vrwSa4A3kVgMtWLnlyTCwKBgQDKfpLvsG9Upcu1XnMbISiLvYjDpU1eQDO1Y0zB +7b01T+x3eASYPbibW6CYyBwSNeYko+aQU/PRt8vCecyuFnGETD+PznPXc1xqXeoZ +k4L7rTv/AARk32jvk/Qlti7cJuctvwYx4zefLjf3kavDMC8XL/XNSeTV/UiwQRqs +qsIw7QKBgDSHMszYPoSaihjPFwIlEDqjk6QNUm4MuISV9/rdRuA+RzBVUOhtWnI0 +Oxh71iELCWxxKn73G0DqIMUfMLvnR7IMBFPS7wn0T13eF9f7LanJYCEXPAJU5OsZ +RNgLchEoy8xRALwfPxYncMEEzOcvexZWXs+76vZc30BXfvEWuXbS +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/config/configmodels/configmodels.go b/internal/otel_collector/config/configmodels/configmodels.go new file mode 100644 index 00000000000..3084eab5b16 --- /dev/null +++ b/internal/otel_collector/config/configmodels/configmodels.go @@ -0,0 +1,223 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configmodels defines the data models for entities. This file defines the +// models for configuration format. The defined entities are: +// Config (the top-level structure), Receivers, Exporters, Processors, Pipelines. +package configmodels + +/* +Receivers, Exporters and Processors typically have common configuration settings, however +sometimes specific implementations will have extra configuration settings. +This requires the configuration data for these entities to be polymorphic. + +To satisfy these requirements we declare interfaces Receiver, Exporter, Processor, +which define the behavior. We also provide helper structs ReceiverSettings, ExporterSettings, +ProcessorSettings, which define the common settings and un-marshaling from config files. + +Specific Receivers/Exporters/Processors are expected to at the minimum implement the +corresponding interface and if they have additional settings they must also extend +the corresponding common settings struct (the easiest approach is to embed the common struct). +*/ + +// Config defines the configuration for the various elements of collector or agent. +type Config struct { + Receivers + Exporters + Processors + Extensions + Service +} + +// Type is the component type as it is used in the config. +type Type string + +// NamedEntity is a configuration entity that has a type and a name. +type NamedEntity interface { + Type() Type + Name() string + SetName(name string) +} + +// Receiver is the configuration of a receiver. Specific receivers must implement this +// interface and will typically embed ReceiverSettings struct or a struct that extends it. +type Receiver interface { + NamedEntity +} + +// Receivers is a map of names to Receivers. +type Receivers map[string]Receiver + +// Exporter is the configuration of an exporter. +type Exporter interface { + NamedEntity +} + +// Exporters is a map of names to Exporters. +type Exporters map[string]Exporter + +// Processor is the configuration of a processor. Specific processors must implement this +// interface and will typically embed ProcessorSettings struct or a struct that extends it. +type Processor interface { + NamedEntity +} + +// Processors is a map of names to Processors. +type Processors map[string]Processor + +// DataType is the data type that is supported for collection. We currently support +// collecting metrics, traces and logs, this can expand in the future. + +type DataType string + +// Currently supported data types. Add new data types here when new types are supported in the future. +const ( + // TracesDataType is the data type tag for traces. + TracesDataType DataType = "traces" + + // MetricsDataType is the data type tag for metrics. + MetricsDataType DataType = "metrics" + + // LogsDataType is the data type tag for logs. + LogsDataType DataType = "logs" +) + +// Pipeline defines a single pipeline. +type Pipeline struct { + Name string `mapstructure:"-"` + InputType DataType `mapstructure:"-"` + Receivers []string `mapstructure:"receivers"` + Processors []string `mapstructure:"processors"` + Exporters []string `mapstructure:"exporters"` +} + +// Pipelines is a map of names to Pipelines. +type Pipelines map[string]*Pipeline + +// Extension is the configuration of a service extension. Specific extensions +// must implement this interface and will typically embed ExtensionSettings +// struct or a struct that extends it. +type Extension interface { + NamedEntity +} + +// Extensions is a map of names to extensions. +type Extensions map[string]Extension + +// Service defines the configurable components of the service. +type Service struct { + // Extensions is the ordered list of extensions configured for the service. + Extensions []string `mapstructure:"extensions"` + + // Pipelines is the set of data pipelines configured for the service. + Pipelines Pipelines `mapstructure:"pipelines"` +} + +// Below are common setting structs for Receivers, Exporters and Processors. +// These are helper structs which you can embed when implementing your specific +// receiver/exporter/processor config storage. + +// ReceiverSettings defines common settings for a receiver configuration. +// Specific receivers can embed this struct and extend it with more fields if needed. +type ReceiverSettings struct { + TypeVal Type `mapstructure:"-"` + NameVal string `mapstructure:"-"` +} + +// Name gets the receiver name. +func (rs *ReceiverSettings) Name() string { + return rs.NameVal +} + +// SetName sets the receiver name. +func (rs *ReceiverSettings) SetName(name string) { + rs.NameVal = name +} + +// Type sets the receiver type. +func (rs *ReceiverSettings) Type() Type { + return rs.TypeVal +} + +// ExporterSettings defines common settings for an exporter configuration. +// Specific exporters can embed this struct and extend it with more fields if needed. +type ExporterSettings struct { + TypeVal Type `mapstructure:"-"` + NameVal string `mapstructure:"-"` +} + +var _ Exporter = (*ExporterSettings)(nil) + +// Name gets the exporter name. +func (es *ExporterSettings) Name() string { + return es.NameVal +} + +// SetName sets the exporter name. +func (es *ExporterSettings) SetName(name string) { + es.NameVal = name +} + +// Type sets the exporter type. +func (es *ExporterSettings) Type() Type { + return es.TypeVal +} + +// ProcessorSettings defines common settings for a processor configuration. +// Specific processors can embed this struct and extend it with more fields if needed. +type ProcessorSettings struct { + TypeVal Type `mapstructure:"-"` + NameVal string `mapstructure:"-"` +} + +// Name gets the processor name. +func (proc *ProcessorSettings) Name() string { + return proc.NameVal +} + +// SetName sets the processor name. +func (proc *ProcessorSettings) SetName(name string) { + proc.NameVal = name +} + +// Type sets the processor type. +func (proc *ProcessorSettings) Type() Type { + return proc.TypeVal +} + +var _ Processor = (*ProcessorSettings)(nil) + +// ExtensionSettings defines common settings for a service extension configuration. +// Specific extensions can embed this struct and extend it with more fields if needed. +type ExtensionSettings struct { + TypeVal Type `mapstructure:"-"` + NameVal string `mapstructure:"-"` +} + +// Name gets the extension name. +func (ext *ExtensionSettings) Name() string { + return ext.NameVal +} + +// SetName sets the extension name. +func (ext *ExtensionSettings) SetName(name string) { + ext.NameVal = name +} + +// Type sets the extension type. +func (ext *ExtensionSettings) Type() Type { + return ext.TypeVal +} + +var _ Extension = (*ExtensionSettings)(nil) diff --git a/internal/otel_collector/config/confignet/README.md b/internal/otel_collector/config/confignet/README.md new file mode 100644 index 00000000000..2a4053da59f --- /dev/null +++ b/internal/otel_collector/config/confignet/README.md @@ -0,0 +1,18 @@ +# Network Configuration Settings + +[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/master/receiver/README.md) +leverage network configuration to set connection and transport information. + +- `endpoint`: Configures the address for this network connection. For TCP and + UDP networks, the address has the form "host:port". The host must be a + literal IP address, or a host name that can be resolved to IP addresses. The + port must be a literal port number or a service name. If the host is a + literal IPv6 address it must be enclosed in square brackets, as in + "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of + the literal IPv6 address as defined in RFC 4007. +- `transport`: Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" + (IPv6-only), "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4" + (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". + +Note that for TCP receivers only the `endpoint` configuration setting is +required. diff --git a/internal/otel_collector/config/confignet/confignet.go b/internal/otel_collector/config/confignet/confignet.go new file mode 100644 index 00000000000..ec55a5a01e5 --- /dev/null +++ b/internal/otel_collector/config/confignet/confignet.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confignet + +import ( + "net" +) + +// NetAddr represents a network endpoint address. +type NetAddr struct { + // Endpoint configures the address for this network connection. + // For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, + // or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. + // If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or + // "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. + Endpoint string `mapstructure:"endpoint"` + + // Transport to use. Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), + // "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". + Transport string `mapstructure:"transport"` +} + +func (na *NetAddr) Dial() (net.Conn, error) { + return net.Dial(na.Transport, na.Endpoint) +} + +func (na *NetAddr) Listen() (net.Listener, error) { + return net.Listen(na.Transport, na.Endpoint) +} + +// TCPAddr represents a tcp endpoint address. +type TCPAddr struct { + // Endpoint configures the address for this network connection. + // The address has the form "host:port". The host must be a literal IP address, or a host name that can be + // resolved to IP addresses. The port must be a literal port number or a service name. + // If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or + // "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. + Endpoint string `mapstructure:"endpoint"` +} + +func (na *TCPAddr) Dial() (net.Conn, error) { + return net.Dial("tcp", na.Endpoint) +} + +func (na *TCPAddr) Listen() (net.Listener, error) { + return net.Listen("tcp", na.Endpoint) +} diff --git a/internal/otel_collector/config/confignet/confignet_test.go b/internal/otel_collector/config/confignet/confignet_test.go new file mode 100644 index 00000000000..885801fdd27 --- /dev/null +++ b/internal/otel_collector/config/confignet/confignet_test.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confignet + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNetAddr(t *testing.T) { + nas := &NetAddr{ + Endpoint: "localhost:0", + Transport: "tcp", + } + ln, err := nas.Listen() + assert.NoError(t, err) + done := make(chan bool, 1) + + go func() { + conn, errGo := ln.Accept() + assert.NoError(t, errGo) + buf := make([]byte, 10) + var numChr int + numChr, errGo = conn.Read(buf) + assert.NoError(t, errGo) + assert.Equal(t, "test", string(buf[:numChr])) + assert.NoError(t, conn.Close()) + done <- true + }() + + nac := &NetAddr{ + Endpoint: ln.Addr().String(), + Transport: "tcp", + } + var conn net.Conn + conn, err = nac.Dial() + assert.NoError(t, err) + _, err = conn.Write([]byte("test")) + assert.NoError(t, err) + assert.NoError(t, conn.Close()) + <-done + assert.NoError(t, ln.Close()) +} + +func TestTcpAddr(t *testing.T) { + nas := &TCPAddr{ + Endpoint: "localhost:0", + } + ln, err := nas.Listen() + assert.NoError(t, err) + done := make(chan bool, 1) + + go func() { + conn, errGo := ln.Accept() + assert.NoError(t, errGo) + buf := make([]byte, 10) + var numChr int + numChr, errGo = conn.Read(buf) + assert.NoError(t, errGo) + assert.Equal(t, "test", string(buf[:numChr])) + assert.NoError(t, conn.Close()) + done <- true + }() + + nac := &TCPAddr{ + Endpoint: ln.Addr().String(), + } + var conn net.Conn + conn, err = nac.Dial() + assert.NoError(t, err) + _, err = conn.Write([]byte("test")) + assert.NoError(t, err) + assert.NoError(t, conn.Close()) + <-done + assert.NoError(t, ln.Close()) +} diff --git a/internal/otel_collector/config/configtelemetry/configtelemetry.go b/internal/otel_collector/config/configtelemetry/configtelemetry.go new file mode 100644 index 00000000000..0c01c70dbd0 --- /dev/null +++ b/internal/otel_collector/config/configtelemetry/configtelemetry.go @@ -0,0 +1,127 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtelemetry + +import ( + "flag" + "fmt" + "strings" +) + +const ( + // LevelNone indicates that no telemetry data should be collected. + LevelNone Level = iota - 1 + // LevelBasic is the recommended and covers the basics of the service telemetry. + LevelBasic + // LevelNormal adds some other indicators on top of basic. + LevelNormal + // LevelDetailed adds dimensions and views to the previous levels. + LevelDetailed + + levelNoneStr = "none" + levelBasicStr = "basic" + levelNormalStr = "normal" + levelDetailedStr = "detailed" + + metricsLevelCfg = "metrics-level" +) + +var metricsLevelPtr = new(Level) + +// Flags is a helper func, to add the telemetry config flags to the service that exposes +// the application flags. +func Flags(flags *flag.FlagSet) { + flags.Var( + metricsLevelPtr, + metricsLevelCfg, + "Output level of telemetry metrics (none, basic, normal, detailed)") +} + +// Level is the level of internal telemetry (metrics, logs, traces about the component itself) +// that every component should generate. +type Level int8 + +var _ flag.Value = (*Level)(nil) + +func (l *Level) String() string { + switch *l { + case LevelNone: + return levelNoneStr + case LevelBasic: + return levelBasicStr + case LevelNormal: + return levelNormalStr + case LevelDetailed: + return levelDetailedStr + } + return "unknown" +} + +func (l *Level) Set(s string) error { + lvl, err := parseLevel(s) + if err != nil { + return err + } + *l = lvl + return nil +} + +// GetMetricsLevelFlagValue returns the value of the "--metrics-level" flag. +// IMPORTANT: This must be used only in the core collector code for the moment. +func GetMetricsLevelFlagValue() Level { + return *metricsLevelPtr +} + +// TelemetrySetting exposes the common Telemetry configuration for one component. +type TelemetrySetting struct { + // MetricsLevelStr is the level of telemetry metrics, the possible values are: + // - "none" indicates that no telemetry data should be collected; + // - "basic" is the recommended and covers the basics of the service telemetry. + // - "normal" adds some other indicators on top of basic. + // - "detailed" adds dimensions and views to the previous levels. + MetricsLevelStr string `mapstructure:"metrics_level"` +} + +// DefaultTelemetrySetting returns the default TelemetrySetting. +// The level is set to the "--metrics-level" flag if set, otherwise the default "basic" level. +func DefaultTelemetrySetting() TelemetrySetting { + return TelemetrySetting{ + MetricsLevelStr: metricsLevelPtr.String(), + } +} + +// GetMetricsLevel returns the parsed level, or error if unknown value. +// Empty string is consider unknown value. +func (ts TelemetrySetting) GetMetricsLevel() (Level, error) { + return parseLevel(ts.MetricsLevelStr) +} + +// ParseLevel returns the Level represented by the string. The parsing is case-insensitive +// and it returns error if the string value is unknown. +func parseLevel(str string) (Level, error) { + str = strings.ToLower(str) + + switch str { + case levelNoneStr: + return LevelNone, nil + case levelBasicStr: + return LevelBasic, nil + case levelNormalStr: + return LevelNormal, nil + case levelDetailedStr: + return LevelDetailed, nil + } + return LevelNone, fmt.Errorf("unknown metrics level %q", str) +} diff --git a/internal/otel_collector/config/configtelemetry/configtelemetry_test.go b/internal/otel_collector/config/configtelemetry/configtelemetry_test.go new file mode 100644 index 00000000000..4512bb5750b --- /dev/null +++ b/internal/otel_collector/config/configtelemetry/configtelemetry_test.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtelemetry + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseFrom(t *testing.T) { + tests := []struct { + str string + level Level + err bool + }{ + { + str: "", + level: LevelNone, + err: true, + }, + { + str: "other_string", + level: LevelNone, + err: true, + }, + { + str: levelNoneStr, + level: LevelNone, + }, + { + str: levelBasicStr, + level: LevelBasic, + }, + { + str: levelNormalStr, + level: LevelNormal, + }, + { + str: levelDetailedStr, + level: LevelDetailed, + }, + } + + for _, test := range tests { + t.Run(test.str, func(t *testing.T) { + lvl, err := parseLevel(test.str) + if test.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.level, lvl) + }) + } +} + +func TestLevelSet(t *testing.T) { + tests := []struct { + str string + level Level + err bool + }{ + { + str: "", + level: LevelNone, + err: true, + }, + { + str: "other_string", + level: LevelNone, + err: true, + }, + { + str: levelNoneStr, + level: LevelNone, + }, + { + str: levelBasicStr, + level: LevelBasic, + }, + { + str: levelNormalStr, + level: LevelNormal, + }, + { + str: levelDetailedStr, + level: LevelDetailed, + }, + } + for _, test := range tests { + t.Run(test.str, func(t *testing.T) { + lvl := new(Level) + err := lvl.Set(test.str) + if test.err { + assert.Error(t, err) + assert.Equal(t, LevelBasic, *lvl) + } else { + assert.NoError(t, err) + assert.Equal(t, test.level, *lvl) + } + }) + } +} + +func TestLevelString(t *testing.T) { + tests := []struct { + str string + level Level + err bool + }{ + { + str: "unknown", + level: Level(-10), + }, + { + str: levelNoneStr, + level: LevelNone, + }, + { + str: levelBasicStr, + level: LevelBasic, + }, + { + str: levelNormalStr, + level: LevelNormal, + }, + { + str: levelDetailedStr, + level: LevelDetailed, + }, + } + for _, test := range tests { + t.Run(test.str, func(t *testing.T) { + assert.Equal(t, test.str, test.level.String()) + }) + } +} + +func TestTelemetrySettings(t *testing.T) { + ts := &TelemetrySetting{ + MetricsLevelStr: "unknown", + } + _, err := ts.GetMetricsLevel() + assert.Error(t, err) +} + +func TestDefaultTelemetrySettings(t *testing.T) { + ts := DefaultTelemetrySetting() + assert.Equal(t, levelBasicStr, ts.MetricsLevelStr) + lvl, err := ts.GetMetricsLevel() + require.NoError(t, err) + assert.Equal(t, LevelBasic, lvl) +} diff --git a/internal/otel_collector/config/configtest/configtest.go b/internal/otel_collector/config/configtest/configtest.go new file mode 100644 index 00000000000..3d75e3e5d35 --- /dev/null +++ b/internal/otel_collector/config/configtest/configtest.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtest + +import ( + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configmodels" +) + +// NewViperFromYamlFile creates a viper instance that reads the given fileName as yaml config +// and can then be used to unmarshal the file contents to objects. +// Example usage for testing can be found in configtest_test.go +func NewViperFromYamlFile(t *testing.T, fileName string) *viper.Viper { + // Read yaml config from file + v := config.NewViper() + v.SetConfigFile(fileName) + require.NoErrorf(t, v.ReadInConfig(), "unable to read the file %v", fileName) + + return v +} + +// LoadConfigFile loads a config from file. +func LoadConfigFile(t *testing.T, fileName string, factories component.Factories) (*configmodels.Config, error) { + v := NewViperFromYamlFile(t, fileName) + + // Load the config from viper using the given factories. + cfg, err := config.Load(v, factories) + if err != nil { + return nil, err + } + return cfg, config.ValidateConfig(cfg, zap.NewNop()) +} diff --git a/internal/otel_collector/config/configtest/configtest_test.go b/internal/otel_collector/config/configtest/configtest_test.go new file mode 100644 index 00000000000..e5680f572bc --- /dev/null +++ b/internal/otel_collector/config/configtest/configtest_test.go @@ -0,0 +1,127 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" +) + +func TestLoadConfigFile(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + cfg, err := LoadConfigFile(t, "testdata/config.yaml", factories) + require.NoError(t, err, "Unable to load config") + + // Verify extensions. + assert.Equal(t, 3, len(cfg.Extensions)) + assert.Equal(t, "some string", cfg.Extensions["exampleextension/1"].(*componenttest.ExampleExtensionCfg).ExtraSetting) + + // Verify service. + assert.Equal(t, 2, len(cfg.Service.Extensions)) + assert.Equal(t, "exampleextension/0", cfg.Service.Extensions[0]) + assert.Equal(t, "exampleextension/1", cfg.Service.Extensions[1]) + + // Verify receivers + assert.Equal(t, 2, len(cfg.Receivers), "Incorrect receivers count") + + assert.Equal(t, + &componenttest.ExampleReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: "examplereceiver", + NameVal: "examplereceiver", + }, + TCPAddr: confignet.TCPAddr{ + Endpoint: "localhost:1000", + }, + ExtraSetting: "some string", + }, + cfg.Receivers["examplereceiver"], + "Did not load receiver config correctly") + + assert.Equal(t, + &componenttest.ExampleReceiver{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: "examplereceiver", + NameVal: "examplereceiver/myreceiver", + }, + TCPAddr: confignet.TCPAddr{ + Endpoint: "localhost:12345", + }, + ExtraSetting: "some string", + }, + cfg.Receivers["examplereceiver/myreceiver"], + "Did not load receiver config correctly") + + // Verify exporters + assert.Equal(t, 2, len(cfg.Exporters), "Incorrect exporters count") + + assert.Equal(t, + &componenttest.ExampleExporter{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "exampleexporter", + TypeVal: "exampleexporter", + }, + ExtraSetting: "some export string", + }, + cfg.Exporters["exampleexporter"], + "Did not load exporter config correctly") + + assert.Equal(t, + &componenttest.ExampleExporter{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "exampleexporter/myexporter", + TypeVal: "exampleexporter", + }, + ExtraSetting: "some export string 2", + }, + cfg.Exporters["exampleexporter/myexporter"], + "Did not load exporter config correctly") + + // Verify Processors + assert.Equal(t, 1, len(cfg.Processors), "Incorrect processors count") + + assert.Equal(t, + &componenttest.ExampleProcessorCfg{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "exampleprocessor", + NameVal: "exampleprocessor", + }, + ExtraSetting: "some export string", + }, + cfg.Processors["exampleprocessor"], + "Did not load processor config correctly") + + // Verify Pipelines + assert.Equal(t, 1, len(cfg.Service.Pipelines), "Incorrect pipelines count") + + assert.Equal(t, + &configmodels.Pipeline{ + Name: "traces", + InputType: configmodels.TracesDataType, + Receivers: []string{"examplereceiver"}, + Processors: []string{"exampleprocessor"}, + Exporters: []string{"exampleexporter"}, + }, + cfg.Service.Pipelines["traces"], + "Did not load pipeline config correctly") +} diff --git a/internal/otel_collector/config/configtest/testdata/config.yaml b/internal/otel_collector/config/configtest/testdata/config.yaml new file mode 100644 index 00000000000..da82f64eb33 --- /dev/null +++ b/internal/otel_collector/config/configtest/testdata/config.yaml @@ -0,0 +1,28 @@ +receivers: + examplereceiver: + examplereceiver/myreceiver: + endpoint: "localhost:12345" + extra: "some string" + +processors: + exampleprocessor: + +exporters: + exampleexporter/myexporter: + extra: "some export string 2" + exampleexporter: + +extensions: + exampleextension/0: + exampleextension/disabled: + extra: "not present in the service" + exampleextension/1: + extra: "some string" + +service: + extensions: [exampleextension/0, exampleextension/1] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configtls/README.md b/internal/otel_collector/config/configtls/README.md new file mode 100644 index 00000000000..7eda60cd074 --- /dev/null +++ b/internal/otel_collector/config/configtls/README.md @@ -0,0 +1,111 @@ +# TLS Configuration Settings + +Crypto TLS exposes a [variety of settings](https://godoc.org/crypto/tls). +Several of these settings are available for configuration within individual +receivers or exporters. + +Note that mutual TLS (mTLS) is also supported. + +## TLS / mTLS Configuration + +By default, TLS is enabled: + +- `insecure` (default = false): whether to enable client transport security for + the exporter's gRPC connection. See + [grpc.WithInsecure()](https://godoc.org/google.golang.org/grpc#WithInsecure). + +As a result, the following parameters are also required: + +- `cert_file`: Path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file`: Path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +A certificate authority may also need to be defined: + +- `ca_file`: Path to the CA cert. For a client this verifies the server + certificate. For a server this verifies client certificates. If empty uses + system root CA. Should only be used if `insecure` is set to false. + +Additionally you can configure TLS to be enabled but skip verifying the server's +certificate chain. This cannot be combined with `insecure` since `insecure` +won't use TLS at all. + +- `insecure_skip_verify` (default = false): whether to skip verifying the + certificate or not. + +How TLS/mTLS is configured depends on whether configuring the client or server. +See below for examples. + +## Client Configuration + +[Exporters](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/README.md) +leverage client configuration. + +Note that client configuration supports TLS configuration, however +configuration parameters are not defined under `tls_settings` like server +configuration. For more information, see [configtls +README](../configtls/README.md). + +Beyond TLS configuration, the following setting can optionally be configured: + +- `server_name_override`: If set to a non-empty string, it will override the + virtual host name of authority (e.g. :authority header field) in requests + (typically used for testing). + +Example: + +```yaml +exporters: + otlp: + endpoint: myserver.local:55690 + insecure: false + ca_file: server.crt + cert_file: client.crt + key_file: client.key + otlp/insecure: + endpoint: myserver.local:55690 + insecure: true + otlp/secure_no_verify: + endpoint: myserver.local:55690 + insecure: false + insecure_skip_verify: true +``` + +## Server Configuration + +[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/master/receiver/README.md) +leverage server configuration. + +Beyond TLS configuration, the following setting can optionally be configured +(required for mTLS): + +- `client_ca_file`: Path to the TLS cert to use by the server to verify a + client certificate. (optional) This sets the ClientCAs and ClientAuth to + RequireAndVerifyClientCert in the TLSConfig. Please refer to + https://godoc.org/crypto/tls#Config for more information. + +Example: + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: mysite.local:55690 + tls_settings: + cert_file: server.crt + key_file: server.key + otlp/mtls: + protocols: + grpc: + client_ca_file: client.pem + endpoint: mysite.local:55690 + tls_settings: + cert_file: server.crt + key_file: server.key + otlp/notls: + protocols: + grpc: + endpoint: mysite.local:55690 +``` diff --git a/internal/otel_collector/config/configtls/configtls.go b/internal/otel_collector/config/configtls/configtls.go new file mode 100644 index 00000000000..2a27dc8117f --- /dev/null +++ b/internal/otel_collector/config/configtls/configtls.go @@ -0,0 +1,151 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtls + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "path/filepath" +) + +// TLSSetting exposes the common client and server TLS configurations. +// Note: Since there isn't anything specific to a server connection. Components +// with server connections should use TLSSetting. +type TLSSetting struct { + // Path to the CA cert. For a client this verifies the server certificate. + // For a server this verifies client certificates. If empty uses system root CA. + // (optional) + CAFile string `mapstructure:"ca_file"` + // Path to the TLS cert to use for TLS required connections. (optional) + CertFile string `mapstructure:"cert_file"` + // Path to the TLS key to use for TLS required connections. (optional) + KeyFile string `mapstructure:"key_file"` +} + +// TLSClientSetting contains TLS configurations that are specific to client +// connections in addition to the common configurations. This should be used by +// components configuring TLS client connections. +type TLSClientSetting struct { + TLSSetting `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // These are config options specific to client connections. + + // In gRPC when set to true, this is used to disable the client transport security. + // See https://godoc.org/google.golang.org/grpc#WithInsecure. + // In HTTP, this disables verifying the server's certificate chain and host name + // (InsecureSkipVerify in the tls Config). Please refer to + // https://godoc.org/crypto/tls#Config for more information. + // (optional, default false) + Insecure bool `mapstructure:"insecure"` + // InsecureSkipVerify will enable TLS but not verify the certificate. + InsecureSkipVerify bool `mapstructure:"insecure_skip_verify"` + // ServerName requested by client for virtual hosting. + // This sets the ServerName in the TLSConfig. Please refer to + // https://godoc.org/crypto/tls#Config for more information. (optional) + ServerName string `mapstructure:"server_name_override"` +} + +// TLSServerSetting contains TLS configurations that are specific to server +// connections in addition to the common configurations. This should be used by +// components configuring TLS server connections. +type TLSServerSetting struct { + TLSSetting `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // These are config options specific to server connections. + + // Path to the TLS cert to use by the server to verify a client certificate. (optional) + // This sets the ClientCAs and ClientAuth to RequireAndVerifyClientCert in the TLSConfig. Please refer to + // https://godoc.org/crypto/tls#Config for more information. (optional) + ClientCAFile string `mapstructure:"client_ca_file"` +} + +// LoadTLSConfig loads TLS certificates and returns a tls.Config. +// This will set the RootCAs and Certificates of a tls.Config. +func (c TLSSetting) loadTLSConfig() (*tls.Config, error) { + // There is no need to load the System Certs for RootCAs because + // if the value is nil, it will default to checking against th System Certs. + var err error + var certPool *x509.CertPool + if len(c.CAFile) != 0 { + // setup user specified truststore + certPool, err = c.loadCert(c.CAFile) + if err != nil { + return nil, fmt.Errorf("failed to load CA CertPool: %w", err) + } + } + + if (c.CertFile == "" && c.KeyFile != "") || (c.CertFile != "" && c.KeyFile == "") { + return nil, fmt.Errorf("for auth via TLS, either both certificate and key must be supplied, or neither") + } + + var certificates []tls.Certificate + if c.CertFile != "" && c.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(filepath.Clean(c.CertFile), filepath.Clean(c.KeyFile)) + if err != nil { + return nil, fmt.Errorf("failed to load TLS cert and key: %w", err) + } + certificates = append(certificates, tlsCert) + } + + return &tls.Config{ + RootCAs: certPool, + Certificates: certificates, + }, nil +} + +func (c TLSSetting) loadCert(caPath string) (*x509.CertPool, error) { + caPEM, err := ioutil.ReadFile(filepath.Clean(caPath)) + if err != nil { + return nil, fmt.Errorf("failed to load CA %s: %w", caPath, err) + } + + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(caPEM) { + return nil, fmt.Errorf("failed to parse CA %s", caPath) + } + return certPool, nil +} + +func (c TLSClientSetting) LoadTLSConfig() (*tls.Config, error) { + if c.Insecure && c.CAFile == "" { + return nil, nil + } + + tlsCfg, err := c.TLSSetting.loadTLSConfig() + if err != nil { + return nil, fmt.Errorf("failed to load TLS config: %w", err) + } + tlsCfg.ServerName = c.ServerName + tlsCfg.InsecureSkipVerify = c.InsecureSkipVerify + return tlsCfg, nil +} + +func (c TLSServerSetting) LoadTLSConfig() (*tls.Config, error) { + tlsCfg, err := c.loadTLSConfig() + if err != nil { + return nil, fmt.Errorf("failed to load TLS config: %w", err) + } + if c.ClientCAFile != "" { + certPool, err := c.loadCert(c.ClientCAFile) + if err != nil { + return nil, fmt.Errorf("failed to load TLS config: failed to load client CA CertPool: %w", err) + } + tlsCfg.ClientCAs = certPool + tlsCfg.ClientAuth = tls.RequireAndVerifyClientCert + } + return tlsCfg, nil +} diff --git a/internal/otel_collector/config/configtls/configtls_test.go b/internal/otel_collector/config/configtls/configtls_test.go new file mode 100644 index 00000000000..9ebc88fd213 --- /dev/null +++ b/internal/otel_collector/config/configtls/configtls_test.go @@ -0,0 +1,181 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtls + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOptionsToConfig(t *testing.T) { + tests := []struct { + name string + options TLSSetting + expectError string + }{ + { + name: "should load system CA", + options: TLSSetting{CAFile: ""}, + }, + { + name: "should load custom CA", + options: TLSSetting{CAFile: "testdata/testCA.pem"}, + }, + { + name: "should fail with invalid CA file path", + options: TLSSetting{CAFile: "testdata/not/valid"}, + expectError: "failed to load CA", + }, + { + name: "should fail with invalid CA file content", + options: TLSSetting{CAFile: "testdata/testCA-bad.txt"}, + expectError: "failed to parse CA", + }, + { + name: "should load valid TLS settings", + options: TLSSetting{ + CAFile: "testdata/testCA.pem", + CertFile: "testdata/test-cert.pem", + KeyFile: "testdata/test-key.pem", + }, + }, + { + name: "should fail with missing TLS KeyFile", + options: TLSSetting{ + CAFile: "testdata/testCA.pem", + CertFile: "testdata/test-cert.pem", + }, + expectError: "both certificate and key must be supplied", + }, + { + name: "should fail with invalid TLS KeyFile", + options: TLSSetting{ + CAFile: "testdata/testCA.pem", + CertFile: "testdata/test-cert.pem", + KeyFile: "testdata/not/valid", + }, + expectError: "failed to load TLS cert and key", + }, + { + name: "should fail with missing TLS Cert", + options: TLSSetting{ + CAFile: "testdata/testCA.pem", + KeyFile: "testdata/test-key.pem", + }, + expectError: "both certificate and key must be supplied", + }, + { + name: "should fail with invalid TLS Cert", + options: TLSSetting{ + CAFile: "testdata/testCA.pem", + CertFile: "testdata/not/valid", + KeyFile: "testdata/test-key.pem", + }, + expectError: "failed to load TLS cert and key", + }, + { + name: "should fail with invalid TLS CA", + options: TLSSetting{ + CAFile: "testdata/not/valid", + }, + expectError: "failed to load CA", + }, + { + name: "should fail with invalid CA pool", + options: TLSSetting{ + CAFile: "testdata/testCA-bad.txt", + }, + expectError: "failed to parse CA", + }, + { + name: "should pass with valid CA pool", + options: TLSSetting{ + CAFile: "testdata/testCA.pem", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg, err := test.options.loadTLSConfig() + if test.expectError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), test.expectError) + } else { + require.NoError(t, err) + assert.NotNil(t, cfg) + } + }) + } +} + +func TestLoadTLSClientConfigError(t *testing.T) { + tlsSetting := TLSClientSetting{ + TLSSetting: TLSSetting{ + CertFile: "doesnt/exist", + KeyFile: "doesnt/exist", + }, + } + _, err := tlsSetting.LoadTLSConfig() + assert.Error(t, err) +} + +func TestLoadTLSClientConfig(t *testing.T) { + tlsSetting := TLSClientSetting{ + Insecure: true, + } + tlsCfg, err := tlsSetting.LoadTLSConfig() + assert.NoError(t, err) + assert.Nil(t, tlsCfg) + + tlsSetting = TLSClientSetting{} + tlsCfg, err = tlsSetting.LoadTLSConfig() + assert.NoError(t, err) + assert.NotNil(t, tlsCfg) + + tlsSetting = TLSClientSetting{ + InsecureSkipVerify: true, + } + tlsCfg, err = tlsSetting.LoadTLSConfig() + assert.NoError(t, err) + assert.NotNil(t, tlsCfg) + assert.True(t, tlsCfg.InsecureSkipVerify) +} + +func TestLoadTLSServerConfigError(t *testing.T) { + tlsSetting := TLSServerSetting{ + TLSSetting: TLSSetting{ + CertFile: "doesnt/exist", + KeyFile: "doesnt/exist", + }, + } + _, err := tlsSetting.LoadTLSConfig() + assert.Error(t, err) + + tlsSetting = TLSServerSetting{ + ClientCAFile: "doesnt/exist", + } + _, err = tlsSetting.LoadTLSConfig() + assert.Error(t, err) +} + +func TestLoadTLSServerConfig(t *testing.T) { + tlsSetting := TLSServerSetting{} + tlsCfg, err := tlsSetting.LoadTLSConfig() + assert.NoError(t, err) + assert.NotNil(t, tlsCfg) +} diff --git a/internal/otel_collector/config/configtls/testdata/test-cert.pem b/internal/otel_collector/config/configtls/testdata/test-cert.pem new file mode 100644 index 00000000000..627628866fa --- /dev/null +++ b/internal/otel_collector/config/configtls/testdata/test-cert.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEHjCCAoagAwIBAgIQTUSsPHdq9Uhu2bMD29ThkDANBgkqhkiG9w0BAQsFADBd +MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExGTAXBgNVBAsMEHRyaXN0 +YW5AdHJpc3RhbnMxIDAeBgNVBAMMF21rY2VydCB0cmlzdGFuQHRyaXN0YW5zMB4X +DTE5MDYxMTA3NTA0NloXDTI5MDYxMTA3NTA0NlowRDEnMCUGA1UEChMebWtjZXJ0 +IGRldmVsb3BtZW50IGNlcnRpZmljYXRlMRkwFwYDVQQLDBB0cmlzdGFuQHRyaXN0 +YW5zMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6zf2JlSdKTdiYDZV +i1yPnP65/CgqlxMflTP9N2P1W7F1SbvQgiGiSfUyc7NicffLGqqDbK3Q4hvANkRC +wOYc+nXZLL6IAxsZ/QBfud3GG2XhuETT2p84Wlqo55I3wFF+Efb89FRp+IiAy2gj +c275hmie6zDRYNJticmZwBIXfnYvwY66V8Y2jKEAjtf6BEmB8yPxWLhxdgY3FjWR +y3kRLfr6BhxVM2qYtl/gXbyGTFjAv7LgFQa/25OXRevs+VjBWFQiQ89b+YIZPpJB +y8y+02nsRLt9Oy9lWMq1/pEqySDV6T3rrw5rV7TLj2RGNkxbnjk+qmf5mWxYzO5X +QaBqeQIDAQABo3MwcTAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUH +AwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBQbdcIL3c/Yr+lR9wLU2FLPJSfk +ZjAbBgNVHREEFDASghBjbGllbnQuamFlZ2VyLmlvMA0GCSqGSIb3DQEBCwUAA4IB +gQBx/tQKqGLQGv90TyQOdKFPPOQ0iU/pXrM0t1Gn55UuvSz6G66IufPQuV+MeW1B +CGcSm12QAjwIvFVPFiBurygQ9eYU/tZW1NCaTSoSRa8KzYMBuDlfqYdS3/7gq2+L +L3b9QZt4rLgaQp0StTlpgCZuKa6N4aK25HQpu+lZ/ZxL1cvLlTGtI2VEWrx9hZ9q +5ImLy063iSc/YqD51XR0LJTkjSdep4sBEGtl5V582ncZAGZQim90hiaPrf3TXVnN +HQuzJNE5pwS637nCcyzmXn07Wh4qcT5vWDmySeN9eDJjfrbM9il11mkGZ9JQYf8Z +S+1562KvxjVVlsegnXaR27tAGkJ40X/OZRC28jLEXIjManDhClZD3uwqlSRtb6/M +ux4+8kqL90msVRlZR5VnUCR5/rZr4ji07NMDVJijI99lRQ5rDbf7Z9CMUpLTXcfd +jJBweUKlFEe3HZ9BfZOU3tLbAdQa2/I420lFVo8mEdu6cpKQpW8fITDvl/71OpQu +FsI= +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configtls/testdata/test-key.pem b/internal/otel_collector/config/configtls/testdata/test-key.pem new file mode 100644 index 00000000000..dc7766f9363 --- /dev/null +++ b/internal/otel_collector/config/configtls/testdata/test-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDrN/YmVJ0pN2Jg +NlWLXI+c/rn8KCqXEx+VM/03Y/VbsXVJu9CCIaJJ9TJzs2Jx98saqoNsrdDiG8A2 +RELA5hz6ddksvogDGxn9AF+53cYbZeG4RNPanzhaWqjnkjfAUX4R9vz0VGn4iIDL +aCNzbvmGaJ7rMNFg0m2JyZnAEhd+di/BjrpXxjaMoQCO1/oESYHzI/FYuHF2BjcW +NZHLeREt+voGHFUzapi2X+BdvIZMWMC/suAVBr/bk5dF6+z5WMFYVCJDz1v5ghk+ +kkHLzL7TaexEu307L2VYyrX+kSrJINXpPeuvDmtXtMuPZEY2TFueOT6qZ/mZbFjM +7ldBoGp5AgMBAAECgf854ouw4yHKAtcy1iw3H5A4Eneyli/k/c/H6ANonjDDX+h9 +PLsTSzOk/7JqxrpzUYeqCExPcnb1Ld8fe6zxy69V86p+WGUgXosGuBDWrL0UAP6L +WmTIaGZ11dm7I0CVE3jy8tVNS3jIsM8BP595yNWfPh/dwSXFrgNG5VXw7oLZm8Nd +q4+yybeRT/1dhlz+toV44x1GjfKkxqhnTPZvnyqvg8jYMVQmbsnUlvAyfRr3fh3g +zEnzuBW0KPPkNbMyL1Q3QU/8LVf4lQ37pI1887edJmlXtbEuh8QjTVDB/5oi7O5/ +2wxdGDTGIad4kXYG2vsuTVunZZq15BfMVyGkHoECgYEA9h1ROB6AfoxkykvQyJEb +1rOxQVz0tAgwzb25aThkSEXVZ6GgdZabgH4aArCGOEXrVt5hlDoDHC8ZEcyQ+yuF ++wFa2C6SorUkGnBJH9J9umWW+bOa5XigqgMHnpjM9yhNH34UnMSm+VarqczQhVx5 +QqIsbCsT+hbAkhwAgJo64kkCgYEA9KqbQ8YTRMc58n3juX8PIFYrOXsUGhWPG2jo +YoiUXgHSZDvxAsp6AtU2jUXjzjTCaF+h4zhxND3FD2yBLRt/Xx/GYXzmDf+Wx68B +4G0ZW4a+huoIEhsM6WGs7oT/sQxluMFb6G/rOaZEWDNzhYtVGNZTxnxCsd4eWj1j +9zy6RrECgYEA4qWTAyxLxr6Bny58ogfH7Evk4723+AdG8mFS2ww8hbYR1fKpM0C0 +CXuXdnybzjzNgl0e3YMjFBRncNXDehrVspbH0yfokABitBpNrQmKEVq201NMRSB2 +TLqnjK1IrB+oDmVslAYhgqMHSUK9kOLdJLj2UdLF/dxwEN3KtKPTsEkCgYEAhPPU +rY6MV/qfDZvFTL6z3JGWqYStVsNSYcWvSiQH49G/n4JHJIocpT9xhnFtKlfXMNqO +4SeBtK7AT/JZe8aOf4WHyuARL5gtOlNqhKckeW0OScgRHK2gZY4TaAXT4ETpXe2M +4RE4VLp6Nye2ZeJiGr4VBi3uHDOkcMsdcHOKkfECgYEAwEizw5kfhQ79bl9SwPbl +euE0wxUyEu+1lNqqAr6ty+BtfGufOxupzejNKghdpgB/bmuK77G8ikbDh6Ya6pQ1 +++Oes8NSFNiKq7pZOpjOeXRRo/OncBFKRDOX/i4ARWeJ/ZvjYz1fPyQuQiylaeDx +IYDJ4/DyVeyPiVrSQKJ5YLk= +-----END PRIVATE KEY----- diff --git a/internal/otel_collector/config/configtls/testdata/testCA-bad.txt b/internal/otel_collector/config/configtls/testdata/testCA-bad.txt new file mode 100644 index 00000000000..2f4aad65c7f --- /dev/null +++ b/internal/otel_collector/config/configtls/testdata/testCA-bad.txt @@ -0,0 +1,3 @@ +-----BEGIN CERTIFICATE----- +bad certificate +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configtls/testdata/testCA.pem b/internal/otel_collector/config/configtls/testdata/testCA.pem new file mode 100644 index 00000000000..0a986020542 --- /dev/null +++ b/internal/otel_collector/config/configtls/testdata/testCA.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICBzCCAXCgAwIBAgIQNkTaUtOczDHvL2YT/kqScTANBgkqhkiG9w0BAQsFADAX +MRUwEwYDVQQKEwxqYWdlcnRyYWNpbmcwHhcNMTkwMjA4MDYyODAyWhcNMTkwMjA4 +MDcyODAyWjAXMRUwEwYDVQQKEwxqYWdlcnRyYWNpbmcwgZ8wDQYJKoZIhvcNAQEB +BQADgY0AMIGJAoGBAMcOLYflHGbqC1f7+tbnsdfcpd0rEuX65+ab0WzelAgvo988 +yD+j7LDLPIE8IPk/tfqaETZ8h0LRUUTn8F2rW/wgrl/G8Onz0utog38N0elfTifG +Mu7GJCr/+aYM5xbQMDj4Brb4vhnkJF8UBe49fWILhIltUcm1SeKqVX3d1FvpAgMB +AAGjVDBSMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNV +HRMBAf8EBTADAQH/MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG +9w0BAQsFAAOBgQCreFjwpAn1HqJT812JOwoWKrt1NjOKGcz7pvIs1k3DfQVLH2aZ +iPKnCkzNgxMzQtwdgpAOXIAqXyNibvyOAv1C+3QSMLKbuPEHaIxlCuvl1suX/g25 +17x1o3Q64AnPCWOLpN2wjkfZqX7gZ84nsxpqb9Sbw1+2+kqX7dSZ3mfVxQ== +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/testdata/duplicate-exporter.yaml b/internal/otel_collector/config/testdata/duplicate-exporter.yaml new file mode 100644 index 00000000000..399be0b7bb5 --- /dev/null +++ b/internal/otel_collector/config/testdata/duplicate-exporter.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +exporters: + exampleexporter/exp: + exampleexporter/ exp : +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/duplicate-extension.yaml b/internal/otel_collector/config/testdata/duplicate-extension.yaml new file mode 100644 index 00000000000..854d146723e --- /dev/null +++ b/internal/otel_collector/config/testdata/duplicate-extension.yaml @@ -0,0 +1,3 @@ +extensions: + exampleextension/ext: + exampleextension/ ext: diff --git a/internal/otel_collector/config/testdata/duplicate-pipeline.yaml b/internal/otel_collector/config/testdata/duplicate-pipeline.yaml new file mode 100644 index 00000000000..671991a79d5 --- /dev/null +++ b/internal/otel_collector/config/testdata/duplicate-pipeline.yaml @@ -0,0 +1,16 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces/default: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] + traces/ default: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/duplicate-processor.yaml b/internal/otel_collector/config/testdata/duplicate-processor.yaml new file mode 100644 index 00000000000..5345cec5926 --- /dev/null +++ b/internal/otel_collector/config/testdata/duplicate-processor.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor/ abc: + exampleprocessor/abc: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/duplicate-receiver.yaml b/internal/otel_collector/config/testdata/duplicate-receiver.yaml new file mode 100644 index 00000000000..275054e6772 --- /dev/null +++ b/internal/otel_collector/config/testdata/duplicate-receiver.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver/ 1: + examplereceiver/1: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/empty-config.yaml b/internal/otel_collector/config/testdata/empty-config.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/internal/otel_collector/config/testdata/invalid-exporter-section.yaml b/internal/otel_collector/config/testdata/invalid-exporter-section.yaml new file mode 100644 index 00000000000..fa5e0257b9a --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-exporter-section.yaml @@ -0,0 +1,20 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + unknown_section: exporter +extensions: + exampleextension: +service: + extensions: + - exampleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/testdata/invalid-exporter-sub-config.yaml b/internal/otel_collector/config/testdata/invalid-exporter-sub-config.yaml new file mode 100644 index 00000000000..ac4264869c3 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-exporter-sub-config.yaml @@ -0,0 +1,13 @@ +receivers: + multireceiver: +exporters: + exampleexporter: + tests +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [multireceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/invalid-extension-name.yaml b/internal/otel_collector/config/testdata/invalid-extension-name.yaml new file mode 100644 index 00000000000..8ed98292506 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-extension-name.yaml @@ -0,0 +1,15 @@ +extensions: + exampleextension: +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +service: + extensions: [exampleextension, nosuchextension, and, another, three] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/testdata/invalid-extension-section.yaml b/internal/otel_collector/config/testdata/invalid-extension-section.yaml new file mode 100644 index 00000000000..1e0a809d9a3 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-extension-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: + unknown_section: + a_num: 2 +service: + extensions: + - examapleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/testdata/invalid-extension-sub-config.yaml b/internal/otel_collector/config/testdata/invalid-extension-sub-config.yaml new file mode 100644 index 00000000000..0859a71cfeb --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-extension-sub-config.yaml @@ -0,0 +1,16 @@ +extensions: + exampleextension: + tests +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/testdata/invalid-pipeline-section.yaml b/internal/otel_collector/config/testdata/invalid-pipeline-section.yaml new file mode 100644 index 00000000000..760edb27db4 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-pipeline-section.yaml @@ -0,0 +1,20 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extensions: + - exampleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter + unknown_section: 1 diff --git a/internal/otel_collector/config/testdata/invalid-pipeline-sub-config.yaml b/internal/otel_collector/config/testdata/invalid-pipeline-sub-config.yaml new file mode 100644 index 00000000000..9b7dcee8072 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-pipeline-sub-config.yaml @@ -0,0 +1,9 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces diff --git a/internal/otel_collector/config/testdata/invalid-pipeline-type-and-name.yaml b/internal/otel_collector/config/testdata/invalid-pipeline-type-and-name.yaml new file mode 100644 index 00000000000..889dd6b1b51 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-pipeline-type-and-name.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + +service: + pipelines: + /metrics: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/testdata/invalid-pipeline-type.yaml b/internal/otel_collector/config/testdata/invalid-pipeline-type.yaml new file mode 100644 index 00000000000..7c88405a947 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-pipeline-type.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + +service: + pipelines: + wrongdatatype: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/testdata/invalid-processor-section.yaml b/internal/otel_collector/config/testdata/invalid-processor-section.yaml new file mode 100644 index 00000000000..01d2a086ed4 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-processor-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: + unknown_section: + a_num: 2 +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extensions: + - examapleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/testdata/invalid-processor-sub-config.yaml b/internal/otel_collector/config/testdata/invalid-processor-sub-config.yaml new file mode 100644 index 00000000000..436290d616d --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-processor-sub-config.yaml @@ -0,0 +1,13 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: + tests +service: + pipelines: + traces: + receivers: [multireceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/invalid-receiver-name.yaml b/internal/otel_collector/config/testdata/invalid-receiver-name.yaml new file mode 100644 index 00000000000..ee71200b9ce --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-receiver-name.yaml @@ -0,0 +1,12 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [1,2,3] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/invalid-receiver-reference.yaml b/internal/otel_collector/config/testdata/invalid-receiver-reference.yaml new file mode 100644 index 00000000000..89288bae84c --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-receiver-reference.yaml @@ -0,0 +1,10 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [invalidreceivername] diff --git a/internal/otel_collector/config/testdata/invalid-receiver-section.yaml b/internal/otel_collector/config/testdata/invalid-receiver-section.yaml new file mode 100644 index 00000000000..558f17a4682 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-receiver-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: + unknown_section: + a_num: 2 +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extensions: + - examapleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/testdata/invalid-receiver-sub-config.yaml b/internal/otel_collector/config/testdata/invalid-receiver-sub-config.yaml new file mode 100644 index 00000000000..b7c567b2ce3 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-receiver-sub-config.yaml @@ -0,0 +1,13 @@ +receivers: + multireceiver: + tests +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [multireceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/invalid-sequence-value.yaml b/internal/otel_collector/config/testdata/invalid-sequence-value.yaml new file mode 100644 index 00000000000..7f7a0a385ee --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-sequence-value.yaml @@ -0,0 +1,14 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: + examplereceiver: + some: config + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/invalid-service-extensions-value.yaml b/internal/otel_collector/config/testdata/invalid-service-extensions-value.yaml new file mode 100644 index 00000000000..00b123c1931 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-service-extensions-value.yaml @@ -0,0 +1,18 @@ +extensions: + exampleextension: +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + +service: + extensions: + exampleextension: + error: true + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/testdata/invalid-service-section.yaml b/internal/otel_collector/config/testdata/invalid-service-section.yaml new file mode 100644 index 00000000000..6a3b8f92772 --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-service-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extenstions: + - examapleextension + unknown_section: + a_num: 2 + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/testdata/invalid-top-level-section.yaml b/internal/otel_collector/config/testdata/invalid-top-level-section.yaml new file mode 100644 index 00000000000..0b9819d17de --- /dev/null +++ b/internal/otel_collector/config/testdata/invalid-top-level-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extenstions: + - examapleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter +unknown_section: + a_num: 2 diff --git a/internal/otel_collector/config/testdata/metric-pipeline-cannot-have-processors.yaml b/internal/otel_collector/config/testdata/metric-pipeline-cannot-have-processors.yaml new file mode 100644 index 00000000000..28c865a1be9 --- /dev/null +++ b/internal/otel_collector/config/testdata/metric-pipeline-cannot-have-processors.yaml @@ -0,0 +1,12 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + metrics: + receivers: [multireceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/missing-all-sections.yaml b/internal/otel_collector/config/testdata/missing-all-sections.yaml new file mode 100644 index 00000000000..2eaee087e88 --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-all-sections.yaml @@ -0,0 +1,5 @@ +receivers: +exporters: +processors: +service: + pipeline: diff --git a/internal/otel_collector/config/testdata/missing-exporter-name-after-slash.yaml b/internal/otel_collector/config/testdata/missing-exporter-name-after-slash.yaml new file mode 100644 index 00000000000..ecb9517bcaf --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-exporter-name-after-slash.yaml @@ -0,0 +1,10 @@ +receivers: + multireceiver: +exporters: + exampleexporter/: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [somereceiver] diff --git a/internal/otel_collector/config/testdata/missing-exporters.yaml b/internal/otel_collector/config/testdata/missing-exporters.yaml new file mode 100644 index 00000000000..bcedbc83f27 --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-exporters.yaml @@ -0,0 +1,9 @@ +receivers: + multireceiver: +exporters: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [invalidreceivername] diff --git a/internal/otel_collector/config/testdata/missing-extension-type.yaml b/internal/otel_collector/config/testdata/missing-extension-type.yaml new file mode 100644 index 00000000000..14eca199345 --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-extension-type.yaml @@ -0,0 +1,2 @@ +extensions: + /exampleextension: diff --git a/internal/otel_collector/config/testdata/missing-pipelines.yaml b/internal/otel_collector/config/testdata/missing-pipelines.yaml new file mode 100644 index 00000000000..cf7f3ae1415 --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-pipelines.yaml @@ -0,0 +1,8 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: diff --git a/internal/otel_collector/config/testdata/missing-processor-type.yaml b/internal/otel_collector/config/testdata/missing-processor-type.yaml new file mode 100644 index 00000000000..d594ecdea3b --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-processor-type.yaml @@ -0,0 +1,10 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + /exampleprocessor: +service: + pipelines: + traces: + receivers: [somereceiver] diff --git a/internal/otel_collector/config/testdata/missing-processors.yaml b/internal/otel_collector/config/testdata/missing-processors.yaml new file mode 100644 index 00000000000..c3103ead3ea --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-processors.yaml @@ -0,0 +1,9 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: +service: + pipelines: + traces: + receivers: [invalidreceivername] diff --git a/internal/otel_collector/config/testdata/missing-receiver-type.yaml b/internal/otel_collector/config/testdata/missing-receiver-type.yaml new file mode 100644 index 00000000000..b05c26a28f7 --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-receiver-type.yaml @@ -0,0 +1,10 @@ +receivers: + /myreceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [somereceiver] diff --git a/internal/otel_collector/config/testdata/missing-receivers.yaml b/internal/otel_collector/config/testdata/missing-receivers.yaml new file mode 100644 index 00000000000..7a79d41a661 --- /dev/null +++ b/internal/otel_collector/config/testdata/missing-receivers.yaml @@ -0,0 +1,8 @@ +receivers: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: diff --git a/internal/otel_collector/config/testdata/multiproto-config.yaml b/internal/otel_collector/config/testdata/multiproto-config.yaml new file mode 100644 index 00000000000..23d4d801e59 --- /dev/null +++ b/internal/otel_collector/config/testdata/multiproto-config.yaml @@ -0,0 +1,26 @@ +receivers: + multireceiver: + multireceiver/myreceiver: + protocols: + http: + endpoint: "localhost:12345" + extra: "some string 1" + tcp: + endpoint: "0.0.0.0:4567" + extra: "some string 2" + +processors: + +exporters: + exampleexporter: + extra: "locahost:1010" + +service: + pipelines: + traces: + receivers: [multireceiver/myreceiver] + processors: [] + exporters: [exampleexporter] + metrics: + receivers: [multireceiver/myreceiver] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/testdata/pipeline-exporter-not-exists.yaml b/internal/otel_collector/config/testdata/pipeline-exporter-not-exists.yaml new file mode 100644 index 00000000000..0a88af150f9 --- /dev/null +++ b/internal/otel_collector/config/testdata/pipeline-exporter-not-exists.yaml @@ -0,0 +1,11 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + metrics: + receivers: [multireceiver] + exporters: [nosuchexporter] diff --git a/internal/otel_collector/config/testdata/pipeline-must-have-exporter.yaml b/internal/otel_collector/config/testdata/pipeline-must-have-exporter.yaml new file mode 100644 index 00000000000..714506f34e0 --- /dev/null +++ b/internal/otel_collector/config/testdata/pipeline-must-have-exporter.yaml @@ -0,0 +1,11 @@ +receivers: + multireceiver: +exporters: + exampleexporter: + extra: "not present in the service pipeline" +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [multireceiver] diff --git a/internal/otel_collector/config/testdata/pipeline-must-have-exporter2.yaml b/internal/otel_collector/config/testdata/pipeline-must-have-exporter2.yaml new file mode 100644 index 00000000000..375216afe6d --- /dev/null +++ b/internal/otel_collector/config/testdata/pipeline-must-have-exporter2.yaml @@ -0,0 +1,11 @@ +receivers: + multireceiver: +exporters: + exampleexporter: + extra: "not present in the service pipeline" +processors: + exampleprocessor: +service: + pipelines: + metrics: + receivers: [multireceiver] diff --git a/internal/otel_collector/config/testdata/pipeline-must-have-receiver.yaml b/internal/otel_collector/config/testdata/pipeline-must-have-receiver.yaml new file mode 100644 index 00000000000..ff6904139c2 --- /dev/null +++ b/internal/otel_collector/config/testdata/pipeline-must-have-receiver.yaml @@ -0,0 +1,11 @@ +receivers: + examplereceiver: + extra: "not present in the service pipeline" +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/testdata/pipeline-must-have-receiver2.yaml b/internal/otel_collector/config/testdata/pipeline-must-have-receiver2.yaml new file mode 100644 index 00000000000..a4a99990a77 --- /dev/null +++ b/internal/otel_collector/config/testdata/pipeline-must-have-receiver2.yaml @@ -0,0 +1,11 @@ +receivers: + examplereceiver: + extra: "not present in the service pipeline" +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + metrics: + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/testdata/pipeline-processor-not-exists.yaml b/internal/otel_collector/config/testdata/pipeline-processor-not-exists.yaml new file mode 100644 index 00000000000..634d9a3f4b4 --- /dev/null +++ b/internal/otel_collector/config/testdata/pipeline-processor-not-exists.yaml @@ -0,0 +1,15 @@ +receivers: + multireceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: + - multireceiver + processors: + - nosuchprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/testdata/simple-config-with-all-env.yaml b/internal/otel_collector/config/testdata/simple-config-with-all-env.yaml new file mode 100644 index 00000000000..f3428d7eda4 --- /dev/null +++ b/internal/otel_collector/config/testdata/simple-config-with-all-env.yaml @@ -0,0 +1,51 @@ +receivers: + examplereceiver: + endpoint: "localhost:1234" + extra: "$RECEIVERS_EXAMPLERECEIVER_EXTRA" + extra_map: + recv.1: "$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_1" + recv.2: "$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_2" + extra_list: + - "$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_1" + - "$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_2" + + +processors: + exampleprocessor: + extra: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA" + extra_map: + proc_1: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_1" + proc_2: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_2" + extra_list: + - "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1" + - "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_2" + +exporters: + exampleexporter: + extra_int: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_INT}" + extra: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA}" + extra_map: + exp_1: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_1}" + exp_2: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_2}" + extra_list: + - "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1}" + - "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_2}" + +extensions: + exampleextension: + extra: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA}" + extra_map: + ext-1: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_1}" + ext-2: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_2}" + extra_list: + - "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_1}" + - "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_2}" + +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/testdata/simple-config-with-escaped-env.yaml b/internal/otel_collector/config/testdata/simple-config-with-escaped-env.yaml new file mode 100644 index 00000000000..8870e6baf77 --- /dev/null +++ b/internal/otel_collector/config/testdata/simple-config-with-escaped-env.yaml @@ -0,0 +1,62 @@ +receivers: + examplereceiver: + endpoint: "localhost:1234" + extra: "$$RECEIVERS_EXAMPLERECEIVER_EXTRA" + extra_map: + # $$ -> escaped $ + recv.1: "$$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_1" + # $$$ -> escaped $ + substituted env var + recv.2: "$$$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_2" + # $$$$ -> two escaped $ + recv.3: "$$$$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_3" + # escaped $ in the middle + recv.4: "some$${RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_4}text" + # two escaped $ + recv.5: "$${ONE}$${TWO}" + # trailing escaped $ + recv.6: "text$$" + # escaped $ alone + recv.7: "$$" + extra_list: + - "$$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_1" + - "$$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_2" + + +processors: + exampleprocessor: + extra: "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA" + extra_map: + proc_1: "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_1" + proc_2: "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_2" + extra_list: + - "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1" + - "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_2" + +exporters: + exampleexporter: + extra: "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA}" + extra_map: + exp_1: "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_1}" + exp_2: "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_2}" + extra_list: + - "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1}" + - "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_2}" + +extensions: + exampleextension: + extra: "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA}" + extra_map: + ext-1: "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_1}" + ext-2: "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_2}" + extra_list: + - "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_1}" + - "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_2}" + +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/testdata/simple-config-with-no-env.yaml b/internal/otel_collector/config/testdata/simple-config-with-no-env.yaml new file mode 100644 index 00000000000..f9e1d7708b3 --- /dev/null +++ b/internal/otel_collector/config/testdata/simple-config-with-no-env.yaml @@ -0,0 +1,51 @@ +receivers: + examplereceiver: + endpoint: "localhost:1234" + extra: "some receiver string" + extra_map: + recv.1: "some receiver map value_1" + recv.2: "some receiver map value_2" + extra_list: + - "some receiver list value_1" + - "some receiver list value_2" + + +processors: + exampleprocessor: + extra: "some processor string" + extra_map: + proc_1: "some processor map value_1" + proc_2: "some processor map value_2" + extra_list: + - "some processor list value_1" + - "some processor list value_2" + +exporters: + exampleexporter: + extra_int: 65 + extra: "some exporter string" + extra_map: + exp_1: "some exporter map value_1" + exp_2: "some exporter map value_2" + extra_list: + - "some exporter list value_1" + - "some exporter list value_2" + +extensions: + exampleextension: + extra: "some extension string" + extra_map: + ext-1: "some extension map value_1" + ext-2: "some extension map value_2" + extra_list: + - "some extension list value_1" + - "some extension list value_2" + +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/testdata/simple-config-with-partial-env.yaml b/internal/otel_collector/config/testdata/simple-config-with-partial-env.yaml new file mode 100644 index 00000000000..7d3087b4a50 --- /dev/null +++ b/internal/otel_collector/config/testdata/simple-config-with-partial-env.yaml @@ -0,0 +1,51 @@ +receivers: + examplereceiver: + endpoint: "localhost:1234" + extra: "some receiver string" + extra_map: + recv.1: "some receiver map value_1" + recv.2: "some receiver map value_2" + extra_list: + - "some receiver list value_1" + - "some receiver list value_2" + + +processors: + exampleprocessor: + extra: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA" + extra_map: + proc_1: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_1" + proc_2: "some processor map value_2" + extra_list: + - "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1" + - "some processor list value_2" + +exporters: + exampleexporter: + extra_int: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_INT}" + extra: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA}" + extra_map: + exp_1: "some exporter map value_1" + exp_2: "some exporter map value_2" + extra_list: + - "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1}" + - "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_2}" + +extensions: + exampleextension: + extra: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA}" + extra_map: + ext-1: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_1}" + ext-2: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_2}" + extra_list: + - "some extension list value_1" + - "some extension list value_2" + +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/testdata/unknown-exporter-type.yaml b/internal/otel_collector/config/testdata/unknown-exporter-type.yaml new file mode 100644 index 00000000000..deb74322a5d --- /dev/null +++ b/internal/otel_collector/config/testdata/unknown-exporter-type.yaml @@ -0,0 +1,12 @@ +receivers: + examplereceiver: +exporters: + nosuchexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [multireceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/unknown-extension-type.yaml b/internal/otel_collector/config/testdata/unknown-extension-type.yaml new file mode 100644 index 00000000000..1ee2ca759c5 --- /dev/null +++ b/internal/otel_collector/config/testdata/unknown-extension-type.yaml @@ -0,0 +1,2 @@ +extensions: + nosuchextension: diff --git a/internal/otel_collector/config/testdata/unknown-processor-type.yaml b/internal/otel_collector/config/testdata/unknown-processor-type.yaml new file mode 100644 index 00000000000..02230c9d565 --- /dev/null +++ b/internal/otel_collector/config/testdata/unknown-processor-type.yaml @@ -0,0 +1,12 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + nosuchprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/testdata/unknown-receiver-type.yaml b/internal/otel_collector/config/testdata/unknown-receiver-type.yaml new file mode 100644 index 00000000000..eef9a5a7948 --- /dev/null +++ b/internal/otel_collector/config/testdata/unknown-receiver-type.yaml @@ -0,0 +1,15 @@ +receivers: + nosuchreceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: + - multireceiver + exporters: + - exampleexporter + processors: + - exampleprocessor diff --git a/internal/otel_collector/config/testdata/valid-config.yaml b/internal/otel_collector/config/testdata/valid-config.yaml new file mode 100644 index 00000000000..986f6ea1049 --- /dev/null +++ b/internal/otel_collector/config/testdata/valid-config.yaml @@ -0,0 +1,29 @@ +receivers: + examplereceiver: + examplereceiver/myreceiver: + endpoint: "localhost:12345" + extra: "some string" + +processors: + exampleprocessor: + +exporters: + exampleexporter/myexporter: + extra: "some export string 2" + exampleexporter: + +extensions: + exampleextension/0: + exampleextension/disabled: + extra: "not present in the service" + exampleextension/1: + extra: "some string" + +service: + extensions: [exampleextension/0, exampleextension/1] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/consumer/consumer.go b/internal/otel_collector/consumer/consumer.go new file mode 100644 index 00000000000..aa867f62984 --- /dev/null +++ b/internal/otel_collector/consumer/consumer.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package consumer contains interfaces that receive and process consumerdata. +package consumer + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// MetricsConsumer is the new metrics consumer interface that receives pdata.MetricData, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type MetricsConsumer interface { + ConsumeMetrics(ctx context.Context, md pdata.Metrics) error +} + +// TracesConsumer is an interface that receives pdata.Traces, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type TracesConsumer interface { + // ConsumeTraces receives pdata.Traces for processing. + ConsumeTraces(ctx context.Context, td pdata.Traces) error +} + +// LogsConsumer is an interface that receives pdata.Logs, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type LogsConsumer interface { + // ConsumeLogs receives pdata.Logs for processing. + ConsumeLogs(ctx context.Context, ld pdata.Logs) error +} diff --git a/internal/otel_collector/consumer/consumerdata/consumerdata.go b/internal/otel_collector/consumer/consumerdata/consumerdata.go new file mode 100644 index 00000000000..985f8909ea8 --- /dev/null +++ b/internal/otel_collector/consumer/consumerdata/consumerdata.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package consumerdata contains data structures that holds proto metrics/spans, node and resource. +package consumerdata + +import ( + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" +) + +// MetricsData is a struct that groups proto metrics with a unique node and a resource. +// Deprecated: use pdata.Metrics instead. +type MetricsData struct { + Node *commonpb.Node + Resource *resourcepb.Resource + Metrics []*metricspb.Metric +} + +// TraceData is a struct that groups proto spans with a unique node and a resource. +// Deprecated: use pdata.Traces instead. +type TraceData struct { + Node *commonpb.Node + Resource *resourcepb.Resource + Spans []*tracepb.Span + SourceFormat string +} diff --git a/internal/otel_collector/consumer/consumererror/partialerror.go b/internal/otel_collector/consumer/consumererror/partialerror.go new file mode 100644 index 00000000000..254af292021 --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/partialerror.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import "go.opentelemetry.io/collector/consumer/pdata" + +// PartialError can be used to signalize that a subset of received data failed to be processed or send. +// The preceding components in the pipeline can use this information for partial retries. +type PartialError struct { + error + failed pdata.Traces + failedLogs pdata.Logs + failedMetrics pdata.Metrics +} + +// PartialTracesError creates PartialError for failed traces. +// Use this error type only when a subset of received data set failed to be processed or sent. +func PartialTracesError(err error, failed pdata.Traces) error { + return PartialError{ + error: err, + failed: failed, + } +} + +// GetTraces returns failed traces. +func (err PartialError) GetTraces() pdata.Traces { + return err.failed +} + +// PartialLogsError creates PartialError for failed logs. +// Use this error type only when a subset of received data set failed to be processed or sent. +func PartialLogsError(err error, failedLogs pdata.Logs) error { + return PartialError{ + error: err, + failedLogs: failedLogs, + } +} + +// GetLogs returns failed logs. +func (err PartialError) GetLogs() pdata.Logs { + return err.failedLogs +} + +// PartialMetricsError creates PartialError for failed metrics. +// Use this error type only when a subset of received data set failed to be processed or sent. +func PartialMetricsError(err error, failedMetrics pdata.Metrics) error { + return PartialError{ + error: err, + failedMetrics: failedMetrics, + } +} + +// GetMetrics returns failed metrics. +func (err PartialError) GetMetrics() pdata.Metrics { + return err.failedMetrics +} diff --git a/internal/otel_collector/consumer/consumererror/partialerror_test.go b/internal/otel_collector/consumer/consumererror/partialerror_test.go new file mode 100644 index 00000000000..54d19c1c755 --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/partialerror_test.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestPartialError(t *testing.T) { + td := testdata.GenerateTraceDataOneSpan() + err := fmt.Errorf("some error") + partialErr := PartialTracesError(err, td) + assert.Equal(t, err.Error(), partialErr.Error()) + assert.Equal(t, td, partialErr.(PartialError).failed) +} + +func TestPartialErrorLogs(t *testing.T) { + td := testdata.GenerateLogDataOneLog() + err := fmt.Errorf("some error") + partialErr := PartialLogsError(err, td) + assert.Equal(t, err.Error(), partialErr.Error()) + assert.Equal(t, td, partialErr.(PartialError).failedLogs) +} + +func TestPartialErrorMetrics(t *testing.T) { + td := testdata.GenerateMetricsOneMetric() + err := fmt.Errorf("some error") + partialErr := PartialMetricsError(err, td) + assert.Equal(t, err.Error(), partialErr.Error()) + assert.Equal(t, td, partialErr.(PartialError).failedMetrics) +} diff --git a/internal/otel_collector/consumer/consumererror/partialscrapeerror.go b/internal/otel_collector/consumer/consumererror/partialscrapeerror.go new file mode 100644 index 00000000000..a3980d3c0bf --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/partialscrapeerror.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import "errors" + +// PartialScrapeError can be used to signalize that a subset of metrics were failed +// to be scraped +type PartialScrapeError struct { + error + Failed int +} + +// NewPartialScrapeError creates PartialScrapeError for failed metrics. +// Use this error type only when a subset of data was failed to be scraped. +func NewPartialScrapeError(err error, failed int) error { + return PartialScrapeError{ + error: err, + Failed: failed, + } +} + +// IsPartialScrapeError checks if an error was wrapped with PartialScrapeError. +func IsPartialScrapeError(err error) bool { + if err == nil { + return false + } + + var partialScrapeErr PartialScrapeError + return errors.As(err, &partialScrapeErr) +} diff --git a/internal/otel_collector/consumer/consumererror/partialscrapeerror_test.go b/internal/otel_collector/consumer/consumererror/partialscrapeerror_test.go new file mode 100644 index 00000000000..58d6cae5bea --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/partialscrapeerror_test.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPartialScrapeError(t *testing.T) { + failed := 2 + err := fmt.Errorf("some error") + partialErr := NewPartialScrapeError(err, failed) + assert.Equal(t, err.Error(), partialErr.Error()) + assert.Equal(t, failed, partialErr.(PartialScrapeError).Failed) +} + +func TestIsPartialScrapeError(t *testing.T) { + err := errors.New("testError") + require.False(t, IsPartialScrapeError(err)) + + err = NewPartialScrapeError(err, 2) + require.True(t, IsPartialScrapeError(err)) +} diff --git a/internal/otel_collector/consumer/consumererror/permanenterror.go b/internal/otel_collector/consumer/consumererror/permanenterror.go new file mode 100644 index 00000000000..e4d0950596e --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/permanenterror.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package consumererror provides wrappers to easily classify errors. This allows +// appropriate action by error handlers without the need to know each individual +// error type/instance. +package consumererror + +// permanent is an error that will be always returned if its source +// receives the same inputs. +type permanent struct { + err error +} + +// Permanent wraps an error to indicate that it is a permanent error, i.e.: an +// error that will be always returned if its source receives the same inputs. +func Permanent(err error) error { + return permanent{err: err} +} + +func (p permanent) Error() string { + return "Permanent error: " + p.err.Error() +} + +// IsPermanent checks if an error was wrapped with the Permanent function, that +// is used to indicate that a given error will always be returned in the case +// that its sources receives the same input. +func IsPermanent(err error) bool { + if err != nil { + _, isPermanent := err.(permanent) + return isPermanent + } + return false +} diff --git a/internal/otel_collector/consumer/consumererror/permanenterror_test.go b/internal/otel_collector/consumer/consumererror/permanenterror_test.go new file mode 100644 index 00000000000..8674825fc57 --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/permanenterror_test.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPermanent(t *testing.T) { + err := errors.New("testError") + require.False(t, IsPermanent(err)) + + err = Permanent(err) + require.True(t, IsPermanent(err)) +} + +func TestIsPermanent_NilError(t *testing.T) { + var err error + require.False(t, IsPermanent(err)) +} diff --git a/internal/otel_collector/consumer/consumertest/nop.go b/internal/otel_collector/consumer/consumertest/nop.go new file mode 100644 index 00000000000..74af9d90cb7 --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/nop.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "context" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" +) + +var ( + nopInstance = &nopConsumer{} +) + +type nopConsumer struct{} + +func (nc *nopConsumer) ConsumeTraces(context.Context, pdata.Traces) error { + return nil +} + +func (nc *nopConsumer) ConsumeMetrics(context.Context, pdata.Metrics) error { + return nil +} + +func (nc *nopConsumer) ConsumeLogs(context.Context, pdata.Logs) error { + return nil +} + +// NewTracesNop creates an TraceConsumer that just drops the received data. +func NewTracesNop() consumer.TracesConsumer { + return nopInstance +} + +// NewMetricsNop creates an MetricsConsumer that just drops the received data. +func NewMetricsNop() consumer.MetricsConsumer { + return nopInstance +} + +// NewLogsNop creates an LogsConsumer that just drops the received data. +func NewLogsNop() consumer.LogsConsumer { + return nopInstance +} diff --git a/internal/otel_collector/consumer/consumertest/nop_test.go b/internal/otel_collector/consumer/consumertest/nop_test.go new file mode 100644 index 00000000000..852879658db --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/nop_test.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestTracesNop(t *testing.T) { + nt := NewTracesNop() + require.NotNil(t, nt) + require.NoError(t, nt.ConsumeTraces(context.Background(), pdata.NewTraces())) +} + +func TestMetricsNop(t *testing.T) { + nm := NewMetricsNop() + require.NotNil(t, nm) + require.NoError(t, nm.ConsumeMetrics(context.Background(), pdata.NewMetrics())) +} + +func TestLogsNop(t *testing.T) { + nl := NewLogsNop() + require.NotNil(t, nl) + require.NoError(t, nl.ConsumeLogs(context.Background(), pdata.NewLogs())) +} diff --git a/internal/otel_collector/consumer/consumertest/sink.go b/internal/otel_collector/consumer/consumertest/sink.go new file mode 100644 index 00000000000..d35257289ee --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/sink.go @@ -0,0 +1,183 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "context" + "sync" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" +) + +type baseErrorConsumer struct { + mu sync.Mutex + consumeError error // to be returned by ConsumeTraces, if set +} + +// SetConsumeError sets an error that will be returned by the Consume function. +func (bec *baseErrorConsumer) SetConsumeError(err error) { + bec.mu.Lock() + defer bec.mu.Unlock() + bec.consumeError = err +} + +// TracesSink acts as a trace receiver for use in tests. +type TracesSink struct { + baseErrorConsumer + traces []pdata.Traces + spansCount int +} + +var _ consumer.TracesConsumer = (*TracesSink)(nil) + +// ConsumeTraceData stores traces for tests. +func (ste *TracesSink) ConsumeTraces(_ context.Context, td pdata.Traces) error { + ste.mu.Lock() + defer ste.mu.Unlock() + + if ste.consumeError != nil { + return ste.consumeError + } + + ste.traces = append(ste.traces, td) + ste.spansCount += td.SpanCount() + + return nil +} + +// AllTraces returns the traces sent to the test sink. +func (ste *TracesSink) AllTraces() []pdata.Traces { + ste.mu.Lock() + defer ste.mu.Unlock() + + copyTraces := make([]pdata.Traces, len(ste.traces)) + copy(copyTraces, ste.traces) + return copyTraces +} + +// SpansCount return the number of spans sent to the test sing. +func (ste *TracesSink) SpansCount() int { + ste.mu.Lock() + defer ste.mu.Unlock() + return ste.spansCount +} + +// Reset deletes any existing metrics. +func (ste *TracesSink) Reset() { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.traces = nil + ste.spansCount = 0 +} + +// MetricsSink acts as a metrics receiver for use in tests. +type MetricsSink struct { + baseErrorConsumer + metrics []pdata.Metrics + metricsCount int +} + +var _ consumer.MetricsConsumer = (*MetricsSink)(nil) + +// ConsumeMetricsData stores traces for tests. +func (sme *MetricsSink) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + sme.mu.Lock() + defer sme.mu.Unlock() + if sme.consumeError != nil { + return sme.consumeError + } + + sme.metrics = append(sme.metrics, md) + sme.metricsCount += md.MetricCount() + + return nil +} + +// AllMetrics returns the metrics sent to the test sink. +func (sme *MetricsSink) AllMetrics() []pdata.Metrics { + sme.mu.Lock() + defer sme.mu.Unlock() + + copyMetrics := make([]pdata.Metrics, len(sme.metrics)) + copy(copyMetrics, sme.metrics) + return copyMetrics +} + +// MetricsCount return the number of metrics sent to the test sing. +func (sme *MetricsSink) MetricsCount() int { + sme.mu.Lock() + defer sme.mu.Unlock() + return sme.metricsCount +} + +// Reset deletes any existing metrics. +func (sme *MetricsSink) Reset() { + sme.mu.Lock() + defer sme.mu.Unlock() + + sme.metrics = nil + sme.metricsCount = 0 +} + +// LogsSink acts as a metrics receiver for use in tests. +type LogsSink struct { + baseErrorConsumer + logs []pdata.Logs + logRecordsCount int +} + +var _ consumer.LogsConsumer = (*LogsSink)(nil) + +// ConsumeLogData stores traces for tests. +func (sle *LogsSink) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + sle.mu.Lock() + defer sle.mu.Unlock() + if sle.consumeError != nil { + return sle.consumeError + } + + sle.logs = append(sle.logs, ld) + sle.logRecordsCount += ld.LogRecordCount() + + return nil +} + +// AllLog returns the metrics sent to the test sink. +func (sle *LogsSink) AllLogs() []pdata.Logs { + sle.mu.Lock() + defer sle.mu.Unlock() + + copyLogs := make([]pdata.Logs, len(sle.logs)) + copy(copyLogs, sle.logs) + return copyLogs +} + +// LogRecordsCount return the number of log records sent to the test sing. +func (sle *LogsSink) LogRecordsCount() int { + sle.mu.Lock() + defer sle.mu.Unlock() + return sle.logRecordsCount +} + +// Reset deletes any existing logs. +func (sle *LogsSink) Reset() { + sle.mu.Lock() + defer sle.mu.Unlock() + + sle.logs = nil + sle.logRecordsCount = 0 +} diff --git a/internal/otel_collector/consumer/consumertest/sink_test.go b/internal/otel_collector/consumer/consumertest/sink_test.go new file mode 100644 index 00000000000..17990885f62 --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/sink_test.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestTracesSink(t *testing.T) { + sink := new(TracesSink) + td := testdata.GenerateTraceDataOneSpan() + want := make([]pdata.Traces, 0, 7) + for i := 0; i < 7; i++ { + require.NoError(t, sink.ConsumeTraces(context.Background(), td)) + want = append(want, td) + } + assert.Equal(t, want, sink.AllTraces()) + assert.Equal(t, len(want), sink.SpansCount()) + sink.Reset() + assert.Equal(t, 0, len(sink.AllTraces())) + assert.Equal(t, 0, sink.SpansCount()) +} + +func TestTracesSink_Error(t *testing.T) { + sink := new(TracesSink) + sink.SetConsumeError(errors.New("my error")) + td := testdata.GenerateTraceDataOneSpan() + require.Error(t, sink.ConsumeTraces(context.Background(), td)) + assert.Len(t, sink.AllTraces(), 0) + assert.Equal(t, 0, sink.SpansCount()) +} + +func TestMetricsSink(t *testing.T) { + sink := new(MetricsSink) + md := testdata.GenerateMetricsOneMetric() + want := make([]pdata.Metrics, 0, 7) + for i := 0; i < 7; i++ { + require.NoError(t, sink.ConsumeMetrics(context.Background(), md)) + want = append(want, md) + } + assert.Equal(t, want, sink.AllMetrics()) + assert.Equal(t, len(want), sink.MetricsCount()) + sink.Reset() + assert.Equal(t, 0, len(sink.AllMetrics())) + assert.Equal(t, 0, sink.MetricsCount()) +} + +func TestMetricsSink_Error(t *testing.T) { + sink := new(MetricsSink) + sink.SetConsumeError(errors.New("my error")) + md := testdata.GenerateMetricsOneMetric() + require.Error(t, sink.ConsumeMetrics(context.Background(), md)) + assert.Len(t, sink.AllMetrics(), 0) + assert.Equal(t, 0, sink.MetricsCount()) +} + +func TestLogsSink(t *testing.T) { + sink := new(LogsSink) + md := testdata.GenerateLogDataOneLogNoResource() + want := make([]pdata.Logs, 0, 7) + for i := 0; i < 7; i++ { + require.NoError(t, sink.ConsumeLogs(context.Background(), md)) + want = append(want, md) + } + assert.Equal(t, want, sink.AllLogs()) + assert.Equal(t, len(want), sink.LogRecordsCount()) + sink.Reset() + assert.Equal(t, 0, len(sink.AllLogs())) + assert.Equal(t, 0, sink.LogRecordsCount()) +} + +func TestLogsSink_Error(t *testing.T) { + sink := new(LogsSink) + sink.SetConsumeError(errors.New("my error")) + ld := testdata.GenerateLogDataOneLogNoResource() + require.Error(t, sink.ConsumeLogs(context.Background(), ld)) + assert.Len(t, sink.AllLogs(), 0) + assert.Equal(t, 0, sink.LogRecordsCount()) +} diff --git a/internal/otel_collector/consumer/pdata/common.go b/internal/otel_collector/consumer/pdata/common.go new file mode 100644 index 00000000000..7bd75dd8ecb --- /dev/null +++ b/internal/otel_collector/consumer/pdata/common.go @@ -0,0 +1,790 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +// This file contains data structures that are common for all telemetry types, +// such as timestamps, attributes, etc. + +import ( + "sort" + "time" + + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" +) + +// TimestampUnixNano is a time specified as UNIX Epoch time in nanoseconds since +// 00:00:00 UTC on 1 January 1970. +type TimestampUnixNano uint64 + +func (ts TimestampUnixNano) String() string { + return time.Unix(0, int64(ts)).String() +} + +// AttributeValueType specifies the type of AttributeValue. +type AttributeValueType int + +const ( + AttributeValueNULL AttributeValueType = iota + AttributeValueSTRING + AttributeValueINT + AttributeValueDOUBLE + AttributeValueBOOL + AttributeValueMAP + AttributeValueARRAY +) + +func (avt AttributeValueType) String() string { + switch avt { + case AttributeValueNULL: + return "NULL" + case AttributeValueSTRING: + return "STRING" + case AttributeValueBOOL: + return "BOOL" + case AttributeValueINT: + return "INT" + case AttributeValueDOUBLE: + return "DOUBLE" + case AttributeValueMAP: + return "MAP" + case AttributeValueARRAY: + return "ARRAY" + } + return "" +} + +// AttributeValue represents a value of an attribute. Typically used in AttributeMap. +// Must use one of NewAttributeValue+ functions below to create new instances. +// +// Intended to be passed by value since internally it is just a pointer to actual +// value representation. For the same reason passing by value and calling setters +// will modify the original, e.g.: +// +// function f1(val AttributeValue) { val.SetIntVal(234) } +// function f2() { +// v := NewAttributeValueString("a string") +// f1(v) +// _ := v.Type() // this will return AttributeValueINT +// } +// +// Important: zero-initialized instance is not valid for use. All AttributeValue functions bellow must +// be called only on instances that are created via NewAttributeValue+ functions. +type AttributeValue struct { + orig *otlpcommon.AnyValue +} + +func newAttributeValue(orig *otlpcommon.AnyValue) AttributeValue { + return AttributeValue{orig} +} + +// NewAttributeValueNull creates a new AttributeValue with a null value. +func NewAttributeValueNull() AttributeValue { + orig := &otlpcommon.AnyValue{} + return AttributeValue{orig: orig} +} + +// Deprecated: Use NewAttributeValueNull() +func NewAttributeValue() AttributeValue { + return NewAttributeValueNull() +} + +// NewAttributeValueString creates a new AttributeValue with the given string value. +func NewAttributeValueString(v string) AttributeValue { + orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: v}} + return AttributeValue{orig: orig} +} + +// NewAttributeValueInt creates a new AttributeValue with the given int64 value. +func NewAttributeValueInt(v int64) AttributeValue { + orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: v}} + return AttributeValue{orig: orig} +} + +// NewAttributeValueDouble creates a new AttributeValue with the given float64 value. +func NewAttributeValueDouble(v float64) AttributeValue { + orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_DoubleValue{DoubleValue: v}} + return AttributeValue{orig: orig} +} + +// NewAttributeValueBool creates a new AttributeValue with the given bool value. +func NewAttributeValueBool(v bool) AttributeValue { + orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_BoolValue{BoolValue: v}} + return AttributeValue{orig: orig} +} + +// NewAttributeValueMap creates a new AttributeValue of map type. +func NewAttributeValueMap() AttributeValue { + orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}}} + return AttributeValue{orig: orig} +} + +// NewAttributeValueArray creates a new AttributeValue of array type. +func NewAttributeValueArray() AttributeValue { + orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}}} + return AttributeValue{orig: orig} +} + +// Type returns the type of the value for this AttributeValue. +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) Type() AttributeValueType { + if a.orig.Value == nil { + return AttributeValueNULL + } + switch a.orig.Value.(type) { + case *otlpcommon.AnyValue_StringValue: + return AttributeValueSTRING + case *otlpcommon.AnyValue_BoolValue: + return AttributeValueBOOL + case *otlpcommon.AnyValue_IntValue: + return AttributeValueINT + case *otlpcommon.AnyValue_DoubleValue: + return AttributeValueDOUBLE + case *otlpcommon.AnyValue_KvlistValue: + return AttributeValueMAP + case *otlpcommon.AnyValue_ArrayValue: + return AttributeValueARRAY + } + return AttributeValueNULL +} + +// StringVal returns the string value associated with this AttributeValue. +// If the Type() is not AttributeValueSTRING then returns empty string. +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) StringVal() string { + return a.orig.GetStringValue() +} + +// IntVal returns the int64 value associated with this AttributeValue. +// If the Type() is not AttributeValueINT then returns int64(0). +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) IntVal() int64 { + return a.orig.GetIntValue() +} + +// DoubleVal returns the float64 value associated with this AttributeValue. +// If the Type() is not AttributeValueDOUBLE then returns float64(0). +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) DoubleVal() float64 { + return a.orig.GetDoubleValue() +} + +// BoolVal returns the bool value associated with this AttributeValue. +// If the Type() is not AttributeValueBOOL then returns false. +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) BoolVal() bool { + return a.orig.GetBoolValue() +} + +// MapVal returns the map value associated with this AttributeValue. +// If the Type() is not AttributeValueMAP then returns an empty map. Note that modifying +// such empty map has no effect on this AttributeValue. +// +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) MapVal() AttributeMap { + kvlist := a.orig.GetKvlistValue() + if kvlist == nil { + return NewAttributeMap() + } + return newAttributeMap(&kvlist.Values) +} + +// ArrayVal returns the array value associated with this AttributeValue. +// If the Type() is not AttributeValueARRAY then returns an empty array. Note that modifying +// such empty array has no effect on this AttributeValue. +// +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) ArrayVal() AnyValueArray { + arr := a.orig.GetArrayValue() + if arr == nil { + return NewAnyValueArray() + } + return newAnyValueArray(&arr.Values) +} + +// SetStringVal replaces the string value associated with this AttributeValue, +// it also changes the type to be AttributeValueSTRING. +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) SetStringVal(v string) { + a.orig.Value = &otlpcommon.AnyValue_StringValue{StringValue: v} +} + +// SetIntVal replaces the int64 value associated with this AttributeValue, +// it also changes the type to be AttributeValueINT. +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) SetIntVal(v int64) { + a.orig.Value = &otlpcommon.AnyValue_IntValue{IntValue: v} +} + +// SetDoubleVal replaces the float64 value associated with this AttributeValue, +// it also changes the type to be AttributeValueDOUBLE. +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) SetDoubleVal(v float64) { + a.orig.Value = &otlpcommon.AnyValue_DoubleValue{DoubleValue: v} +} + +// SetBoolVal replaces the bool value associated with this AttributeValue, +// it also changes the type to be AttributeValueBOOL. +// Calling this function on zero-initialized AttributeValue will cause a panic. +func (a AttributeValue) SetBoolVal(v bool) { + a.orig.Value = &otlpcommon.AnyValue_BoolValue{BoolValue: v} +} + +// copyTo copies the value to AnyValue. Will panic if dest is nil. +func (a AttributeValue) copyTo(dest *otlpcommon.AnyValue) { + switch v := a.orig.Value.(type) { + case *otlpcommon.AnyValue_KvlistValue: + kv, ok := dest.Value.(*otlpcommon.AnyValue_KvlistValue) + if !ok { + kv = &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}} + dest.Value = kv + } + if v.KvlistValue == nil { + kv.KvlistValue = nil + return + } + // Deep copy to dest. + newAttributeMap(&v.KvlistValue.Values).CopyTo(newAttributeMap(&kv.KvlistValue.Values)) + case *otlpcommon.AnyValue_ArrayValue: + av, ok := dest.Value.(*otlpcommon.AnyValue_ArrayValue) + if !ok { + av = &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}} + dest.Value = av + } + if v.ArrayValue == nil { + av.ArrayValue = nil + return + } + // Deep copy to dest. + newAnyValueArray(&v.ArrayValue.Values).CopyTo(newAnyValueArray(&av.ArrayValue.Values)) + default: + // Primitive immutable type, no need for deep copy. + dest.Value = a.orig.Value + } +} + +func (a AttributeValue) CopyTo(dest AttributeValue) { + a.copyTo(dest.orig) +} + +// Equal checks for equality, it returns true if the objects are equal otherwise false. +func (a AttributeValue) Equal(av AttributeValue) bool { + if a.orig.Value == nil || av.orig.Value == nil { + return a.orig.Value == av.orig.Value + } + + switch v := a.orig.Value.(type) { + case *otlpcommon.AnyValue_StringValue: + return v.StringValue == av.orig.GetStringValue() + case *otlpcommon.AnyValue_BoolValue: + return v.BoolValue == av.orig.GetBoolValue() + case *otlpcommon.AnyValue_IntValue: + return v.IntValue == av.orig.GetIntValue() + case *otlpcommon.AnyValue_DoubleValue: + return v.DoubleValue == av.orig.GetDoubleValue() + } + // TODO: handle MAP and ARRAY data types. + return false +} + +func newAttributeKeyValueString(k string, v string) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + akv := AttributeValue{&orig.Value} + akv.SetStringVal(v) + return orig +} + +func newAttributeKeyValueInt(k string, v int64) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + akv := AttributeValue{&orig.Value} + akv.SetIntVal(v) + return orig +} + +func newAttributeKeyValueDouble(k string, v float64) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + akv := AttributeValue{&orig.Value} + akv.SetDoubleVal(v) + return orig +} + +func newAttributeKeyValueBool(k string, v bool) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + akv := AttributeValue{&orig.Value} + akv.SetBoolVal(v) + return orig +} + +func newAttributeKeyValueNull(k string) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + return orig +} + +func newAttributeKeyValue(k string, av AttributeValue) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + av.copyTo(&orig.Value) + return orig +} + +// AttributeMap stores a map of attribute keys to values. +type AttributeMap struct { + orig *[]otlpcommon.KeyValue +} + +// NewAttributeMap creates a AttributeMap with 0 elements. +func NewAttributeMap() AttributeMap { + orig := []otlpcommon.KeyValue(nil) + return AttributeMap{&orig} +} + +func newAttributeMap(orig *[]otlpcommon.KeyValue) AttributeMap { + return AttributeMap{orig} +} + +// InitFromMap overwrites the entire AttributeMap and reconstructs the AttributeMap +// with values from the given map[string]string. +// +// Returns the same instance to allow nicer code like: +// assert.EqualValues(t, NewAttributeMap().InitFromMap(map[string]AttributeValue{...}), actual) +func (am AttributeMap) InitFromMap(attrMap map[string]AttributeValue) AttributeMap { + if len(attrMap) == 0 { + *am.orig = []otlpcommon.KeyValue(nil) + return am + } + origs := make([]otlpcommon.KeyValue, len(attrMap)) + ix := 0 + for k, v := range attrMap { + origs[ix].Key = k + v.copyTo(&origs[ix].Value) + ix++ + } + *am.orig = origs + return am +} + +// InitEmptyWithCapacity constructs an empty AttributeMap with predefined slice capacity. +func (am AttributeMap) InitEmptyWithCapacity(cap int) { + if cap == 0 { + *am.orig = []otlpcommon.KeyValue(nil) + return + } + *am.orig = make([]otlpcommon.KeyValue, 0, cap) +} + +// Get returns the AttributeValue associated with the key and true. Returned +// AttributeValue is not a copy, it is a reference to the value stored in this map. +// It is allowed to modify the returned value using AttributeValue.Set* functions. +// Such modification will be applied to the value stored in this map. +// +// If the key does not exist returns an invalid instance of the KeyValue and false. +// Calling any functions on the returned invalid instance will cause a panic. +func (am AttributeMap) Get(key string) (AttributeValue, bool) { + for i := range *am.orig { + akv := &(*am.orig)[i] + if akv.Key == key { + return AttributeValue{&akv.Value}, true + } + } + return AttributeValue{nil}, false +} + +// Delete deletes the entry associated with the key and returns true if the key +// was present in the map, otherwise returns false. +func (am AttributeMap) Delete(key string) bool { + for i := range *am.orig { + akv := &(*am.orig)[i] + if akv.Key == key { + *akv = (*am.orig)[len(*am.orig)-1] + *am.orig = (*am.orig)[:len(*am.orig)-1] + return true + } + } + return false +} + +// Insert adds the AttributeValue to the map when the key does not exist. +// No action is applied to the map where the key already exists. +// +// Calling this function with a zero-initialized AttributeValue struct will cause a panic. +// +// Important: this function should not be used if the caller has access to +// the raw value to avoid an extra allocation. +func (am AttributeMap) Insert(k string, v AttributeValue) { + if _, existing := am.Get(k); !existing { + *am.orig = append(*am.orig, newAttributeKeyValue(k, v)) + } +} + +// InsertNull adds a null Value to the map when the key does not exist. +// No action is applied to the map where the key already exists. +func (am AttributeMap) InsertNull(k string) { + if _, existing := am.Get(k); !existing { + *am.orig = append(*am.orig, newAttributeKeyValueNull(k)) + } +} + +// InsertString adds the string Value to the map when the key does not exist. +// No action is applied to the map where the key already exists. +func (am AttributeMap) InsertString(k string, v string) { + if _, existing := am.Get(k); !existing { + *am.orig = append(*am.orig, newAttributeKeyValueString(k, v)) + } +} + +// InsertInt adds the int Value to the map when the key does not exist. +// No action is applied to the map where the key already exists. +func (am AttributeMap) InsertInt(k string, v int64) { + if _, existing := am.Get(k); !existing { + *am.orig = append(*am.orig, newAttributeKeyValueInt(k, v)) + } +} + +// InsertDouble adds the double Value to the map when the key does not exist. +// No action is applied to the map where the key already exists. +func (am AttributeMap) InsertDouble(k string, v float64) { + if _, existing := am.Get(k); !existing { + *am.orig = append(*am.orig, newAttributeKeyValueDouble(k, v)) + } +} + +// InsertBool adds the bool Value to the map when the key does not exist. +// No action is applied to the map where the key already exists. +func (am AttributeMap) InsertBool(k string, v bool) { + if _, existing := am.Get(k); !existing { + *am.orig = append(*am.orig, newAttributeKeyValueBool(k, v)) + } +} + +// Update updates an existing AttributeValue with a value. +// No action is applied to the map where the key does not exist. +// +// Calling this function with a zero-initialized AttributeValue struct will cause a panic. +// +// Important: this function should not be used if the caller has access to +// the raw value to avoid an extra allocation. +func (am AttributeMap) Update(k string, v AttributeValue) { + if av, existing := am.Get(k); existing { + v.copyTo(av.orig) + } +} + +// UpdateString updates an existing string Value with a value. +// No action is applied to the map where the key does not exist. +func (am AttributeMap) UpdateString(k string, v string) { + if av, existing := am.Get(k); existing { + av.SetStringVal(v) + } +} + +// UpdateInt updates an existing int Value with a value. +// No action is applied to the map where the key does not exist. +func (am AttributeMap) UpdateInt(k string, v int64) { + if av, existing := am.Get(k); existing { + av.SetIntVal(v) + } +} + +// UpdateDouble updates an existing double Value with a value. +// No action is applied to the map where the key does not exist. +func (am AttributeMap) UpdateDouble(k string, v float64) { + if av, existing := am.Get(k); existing { + av.SetDoubleVal(v) + } +} + +// UpdateBool updates an existing bool Value with a value. +// No action is applied to the map where the key does not exist. +func (am AttributeMap) UpdateBool(k string, v bool) { + if av, existing := am.Get(k); existing { + av.SetBoolVal(v) + } +} + +// Upsert performs the Insert or Update action. The AttributeValue is +// insert to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +// +// Calling this function with a zero-initialized AttributeValue struct will cause a panic. +// +// Important: this function should not be used if the caller has access to +// the raw value to avoid an extra allocation. +func (am AttributeMap) Upsert(k string, v AttributeValue) { + if av, existing := am.Get(k); existing { + v.copyTo(av.orig) + } else { + *am.orig = append(*am.orig, newAttributeKeyValue(k, v)) + } +} + +// UpsertString performs the Insert or Update action. The AttributeValue is +// insert to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (am AttributeMap) UpsertString(k string, v string) { + if av, existing := am.Get(k); existing { + av.SetStringVal(v) + } else { + *am.orig = append(*am.orig, newAttributeKeyValueString(k, v)) + } +} + +// UpsertInt performs the Insert or Update action. The int Value is +// insert to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (am AttributeMap) UpsertInt(k string, v int64) { + if av, existing := am.Get(k); existing { + av.SetIntVal(v) + } else { + *am.orig = append(*am.orig, newAttributeKeyValueInt(k, v)) + } +} + +// UpsertDouble performs the Insert or Update action. The double Value is +// insert to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (am AttributeMap) UpsertDouble(k string, v float64) { + if av, existing := am.Get(k); existing { + av.SetDoubleVal(v) + } else { + *am.orig = append(*am.orig, newAttributeKeyValueDouble(k, v)) + } +} + +// UpsertBool performs the Insert or Update action. The bool Value is +// insert to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (am AttributeMap) UpsertBool(k string, v bool) { + if av, existing := am.Get(k); existing { + av.SetBoolVal(v) + } else { + *am.orig = append(*am.orig, newAttributeKeyValueBool(k, v)) + } +} + +// Sort sorts the entries in the AttributeMap so two instances can be compared. +// Returns the same instance to allow nicer code like: +// assert.EqualValues(t, expected.Sort(), actual.Sort()) +func (am AttributeMap) Sort() AttributeMap { + // Intention is to move the nil values at the end. + sort.SliceStable(*am.orig, func(i, j int) bool { + return (*am.orig)[i].Key < (*am.orig)[j].Key + }) + return am +} + +// Len returns the length of this map. +// +// Because the AttributeMap is represented internally by a slice of pointers, and the data are comping from the wire, +// it is possible that when iterating using "ForEach" to get access to fewer elements because nil elements are skipped. +func (am AttributeMap) Len() int { + return len(*am.orig) +} + +// ForEach iterates over the every elements in the map by calling the provided func. +// +// Example: +// +// it := sm.ForEach(func(k string, v AttributeValue) { +// ... +// }) +func (am AttributeMap) ForEach(f func(k string, v AttributeValue)) { + for i := range *am.orig { + kv := &(*am.orig)[i] + f(kv.Key, AttributeValue{&kv.Value}) + } +} + +// CopyTo copies all elements from the current map to the dest. +func (am AttributeMap) CopyTo(dest AttributeMap) { + newLen := len(*am.orig) + oldCap := cap(*dest.orig) + if newLen <= oldCap { + // New slice fits in existing slice, no need to reallocate. + *dest.orig = (*dest.orig)[:newLen:oldCap] + for i := range *am.orig { + akv := &(*am.orig)[i] + destAkv := &(*dest.orig)[i] + destAkv.Key = akv.Key + AttributeValue{&akv.Value}.copyTo(&destAkv.Value) + } + return + } + + // New slice is bigger than exist slice. Allocate new space. + origs := make([]otlpcommon.KeyValue, len(*am.orig)) + for i := range *am.orig { + akv := &(*am.orig)[i] + origs[i].Key = akv.Key + AttributeValue{&akv.Value}.copyTo(&origs[i].Value) + } + *dest.orig = origs +} + +// StringMap stores a map of attribute keys to values. +type StringMap struct { + orig *[]otlpcommon.StringKeyValue +} + +// NewStringMap creates a StringMap with 0 elements. +func NewStringMap() StringMap { + orig := []otlpcommon.StringKeyValue(nil) + return StringMap{&orig} +} + +func newStringMap(orig *[]otlpcommon.StringKeyValue) StringMap { + return StringMap{orig} +} + +// InitFromMap overwrites the entire StringMap and reconstructs the StringMap +// with values from the given map[string]string. +// +// Returns the same instance to allow nicer code like: +// assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{...}), actual) +func (sm StringMap) InitFromMap(attrMap map[string]string) StringMap { + if len(attrMap) == 0 { + *sm.orig = []otlpcommon.StringKeyValue(nil) + return sm + } + origs := make([]otlpcommon.StringKeyValue, len(attrMap)) + ix := 0 + for k, v := range attrMap { + origs[ix].Key = k + origs[ix].Value = v + ix++ + } + *sm.orig = origs + return sm +} + +// InitEmptyWithCapacity constructs an empty StringMap with predefined slice capacity. +func (sm StringMap) InitEmptyWithCapacity(cap int) { + if cap == 0 { + *sm.orig = []otlpcommon.StringKeyValue(nil) + return + } + *sm.orig = make([]otlpcommon.StringKeyValue, 0, cap) +} + +// Get returns the StringValue associated with the key and true, +// otherwise an invalid instance of the StringKeyValue and false. +// Calling any functions on the returned invalid instance will cause a panic. +func (sm StringMap) Get(k string) (string, bool) { + skv, found := sm.get(k) + // GetValue handles the case where skv is nil. + return skv.GetValue(), found +} + +// Delete deletes the entry associated with the key and returns true if the key +// was present in the map, otherwise returns false. +func (sm StringMap) Delete(k string) bool { + for i := range *sm.orig { + skv := &(*sm.orig)[i] + if skv.Key == k { + (*sm.orig)[i] = (*sm.orig)[len(*sm.orig)-1] + *sm.orig = (*sm.orig)[:len(*sm.orig)-1] + return true + } + } + return false +} + +// Insert adds the string value to the map when the key does not exist. +// No action is applied to the map where the key already exists. +func (sm StringMap) Insert(k, v string) { + if _, existing := sm.Get(k); !existing { + *sm.orig = append(*sm.orig, newStringKeyValue(k, v)) + } +} + +// Update updates an existing string value with a value. +// No action is applied to the map where the key does not exist. +func (sm StringMap) Update(k, v string) { + if skv, existing := sm.get(k); existing { + skv.Value = v + } +} + +// Upsert performs the Insert or Update action. The string value is +// insert to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (sm StringMap) Upsert(k, v string) { + if skv, existing := sm.get(k); existing { + skv.Value = v + } else { + *sm.orig = append(*sm.orig, newStringKeyValue(k, v)) + } +} + +// Len returns the length of this map. +// +// Because the AttributeMap is represented internally by a slice of pointers, and the data are comping from the wire, +// it is possible that when iterating using "ForEach" to get access to fewer elements because nil elements are skipped. +func (sm StringMap) Len() int { + return len(*sm.orig) +} + +// ForEach iterates over the every elements in the map by calling the provided func. +// +// Example: +// +// it := sm.ForEach(func(k string, v StringValue) { +// ... +// }) +func (sm StringMap) ForEach(f func(k string, v string)) { + for i := range *sm.orig { + skv := &(*sm.orig)[i] + f(skv.Key, skv.Value) + } +} + +// CopyTo copies all elements from the current map to the dest. +func (sm StringMap) CopyTo(dest StringMap) { + newLen := len(*sm.orig) + oldCap := cap(*dest.orig) + if newLen <= oldCap { + *dest.orig = (*dest.orig)[:newLen:oldCap] + } else { + *dest.orig = make([]otlpcommon.StringKeyValue, newLen) + } + + for i := range *sm.orig { + skv := &(*sm.orig)[i] + (*dest.orig)[i].Key = skv.Key + (*dest.orig)[i].Value = skv.Value + } +} + +func (sm StringMap) get(k string) (*otlpcommon.StringKeyValue, bool) { + for i := range *sm.orig { + skv := &(*sm.orig)[i] + if skv.Key == k { + return skv, true + } + } + return nil, false +} + +// Sort sorts the entries in the StringMap so two instances can be compared. +// Returns the same instance to allow nicer code like: +// assert.EqualValues(t, expected.Sort(), actual.Sort()) +func (sm StringMap) Sort() StringMap { + sort.SliceStable(*sm.orig, func(i, j int) bool { + // Intention is to move the nil values at the end. + return (*sm.orig)[i].Key < (*sm.orig)[j].Key + }) + return sm +} + +func newStringKeyValue(k, v string) otlpcommon.StringKeyValue { + return otlpcommon.StringKeyValue{Key: k, Value: v} +} diff --git a/internal/otel_collector/consumer/pdata/common_test.go b/internal/otel_collector/consumer/pdata/common_test.go new file mode 100644 index 00000000000..06f217ba94c --- /dev/null +++ b/internal/otel_collector/consumer/pdata/common_test.go @@ -0,0 +1,1131 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "encoding/json" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" +) + +func TestAttributeValue(t *testing.T) { + v := NewAttributeValueString("abc") + assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, "abc", v.StringVal()) + + v = NewAttributeValueInt(123) + assert.EqualValues(t, AttributeValueINT, v.Type()) + assert.EqualValues(t, 123, v.IntVal()) + + v = NewAttributeValueDouble(3.4) + assert.EqualValues(t, AttributeValueDOUBLE, v.Type()) + assert.EqualValues(t, 3.4, v.DoubleVal()) + + v = NewAttributeValueBool(true) + assert.EqualValues(t, AttributeValueBOOL, v.Type()) + assert.True(t, v.BoolVal()) + + v = NewAttributeValueNull() + assert.EqualValues(t, AttributeValueNULL, v.Type()) + + v.SetStringVal("abc") + assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, "abc", v.StringVal()) + + v.SetIntVal(123) + assert.EqualValues(t, AttributeValueINT, v.Type()) + assert.EqualValues(t, 123, v.IntVal()) + + v.SetDoubleVal(3.4) + assert.EqualValues(t, AttributeValueDOUBLE, v.Type()) + assert.EqualValues(t, 3.4, v.DoubleVal()) + + v.SetBoolVal(true) + assert.EqualValues(t, AttributeValueBOOL, v.Type()) + assert.True(t, v.BoolVal()) +} + +func TestAttributeValueType(t *testing.T) { + assert.EqualValues(t, "NULL", AttributeValueNULL.String()) + assert.EqualValues(t, "STRING", AttributeValueSTRING.String()) + assert.EqualValues(t, "BOOL", AttributeValueBOOL.String()) + assert.EqualValues(t, "INT", AttributeValueINT.String()) + assert.EqualValues(t, "DOUBLE", AttributeValueDOUBLE.String()) + assert.EqualValues(t, "MAP", AttributeValueMAP.String()) + assert.EqualValues(t, "ARRAY", AttributeValueARRAY.String()) +} + +func fromVal(v interface{}) AttributeValue { + switch val := v.(type) { + case string: + return NewAttributeValueString(val) + case int: + return NewAttributeValueInt(int64(val)) + case float64: + return NewAttributeValueDouble(val) + case map[string]interface{}: + return fromMap(val) + case []interface{}: + return fromArray(val) + } + panic("data type is not supported in fromVal()") +} + +func fromMap(v map[string]interface{}) AttributeValue { + av := NewAttributeValueMap() + m := av.MapVal() + for k, v := range v { + m.Insert(k, fromVal(v)) + } + m.Sort() + return av +} + +func fromJSONMap(jsonStr string) AttributeValue { + var src map[string]interface{} + err := json.Unmarshal([]byte(jsonStr), &src) + if err != nil { + panic("Invalid input jsonStr:" + jsonStr) + } + return fromMap(src) +} + +func assertMapJSON(t *testing.T, expectedJSON string, actualMap AttributeValue) { + assert.EqualValues(t, fromJSONMap(expectedJSON).MapVal(), actualMap.MapVal().Sort()) +} + +func TestAttributeValueMap(t *testing.T) { + m1 := NewAttributeValueMap() + assert.EqualValues(t, fromJSONMap(`{}`), m1) + assert.EqualValues(t, AttributeValueMAP, m1.Type()) + assert.EqualValues(t, NewAttributeMap(), m1.MapVal()) + assert.EqualValues(t, 0, m1.MapVal().Len()) + + m1.MapVal().InsertDouble("double_key", 123) + assertMapJSON(t, `{"double_key":123}`, m1) + assert.EqualValues(t, 1, m1.MapVal().Len()) + + v, exists := m1.MapVal().Get("double_key") + require.True(t, exists) + assert.EqualValues(t, AttributeValueDOUBLE, v.Type()) + assert.EqualValues(t, 123, v.DoubleVal()) + + // Create a second map. + m2 := NewAttributeValueMap() + assertMapJSON(t, `{}`, m2) + assert.EqualValues(t, 0, m2.MapVal().Len()) + + // Modify the source map that was inserted. + m2.MapVal().UpsertString("key_in_child", "somestr") + assertMapJSON(t, `{"key_in_child": "somestr"}`, m2) + assert.EqualValues(t, 1, m2.MapVal().Len()) + + // Insert the second map as a child. This should perform a deep copy. + m1.MapVal().Insert("child_map", m2) + assertMapJSON(t, `{"double_key":123, "child_map": {"key_in_child": "somestr"}}`, m1) + assert.EqualValues(t, 2, m1.MapVal().Len()) + + // Check that the map was correctly copied. + childMap, exists := m1.MapVal().Get("child_map") + require.True(t, exists) + assert.EqualValues(t, AttributeValueMAP, childMap.Type()) + assert.EqualValues(t, 1, childMap.MapVal().Len()) + + v, exists = childMap.MapVal().Get("key_in_child") + require.True(t, exists) + assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, "somestr", v.StringVal()) + + // Modify the source map m2 that was inserted into m1. + m2.MapVal().UpdateString("key_in_child", "somestr2") + assertMapJSON(t, `{"key_in_child": "somestr2"}`, m2) + assert.EqualValues(t, 1, m2.MapVal().Len()) + + // The child map inside m1 should not be modified. + assertMapJSON(t, `{"double_key":123, "child_map": {"key_in_child": "somestr"}}`, m1) + childMap, exists = m1.MapVal().Get("child_map") + require.True(t, exists) + v, exists = childMap.MapVal().Get("key_in_child") + require.True(t, exists) + assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, "somestr", v.StringVal()) + + // Now modify the inserted map (not the source) + childMap.MapVal().UpdateString("key_in_child", "somestr3") + assertMapJSON(t, `{"double_key":123, "child_map": {"key_in_child": "somestr3"}}`, m1) + assert.EqualValues(t, 1, childMap.MapVal().Len()) + + v, exists = childMap.MapVal().Get("key_in_child") + require.True(t, exists) + assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, "somestr3", v.StringVal()) + + // The source child map should not be modified. + v, exists = m2.MapVal().Get("key_in_child") + require.True(t, exists) + assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, "somestr2", v.StringVal()) + + deleted := m1.MapVal().Delete("double_key") + assert.True(t, deleted) + assertMapJSON(t, `{"child_map": {"key_in_child": "somestr3"}}`, m1) + assert.EqualValues(t, 1, m1.MapVal().Len()) + _, exists = m1.MapVal().Get("double_key") + assert.False(t, exists) + + deleted = m1.MapVal().Delete("child_map") + assert.True(t, deleted) + assert.EqualValues(t, 0, m1.MapVal().Len()) + _, exists = m1.MapVal().Get("child_map") + assert.False(t, exists) + + // Test nil KvlistValue case for MapVal() func. + orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: nil}} + m1 = AttributeValue{orig: orig} + assert.EqualValues(t, NewAttributeMap(), m1.MapVal()) +} + +func TestNilOrigSetAttributeValue(t *testing.T) { + av := NewAttributeValueNull() + av.SetStringVal("abc") + assert.EqualValues(t, "abc", av.StringVal()) + + av = NewAttributeValueNull() + av.SetIntVal(123) + assert.EqualValues(t, 123, av.IntVal()) + + av = NewAttributeValueNull() + av.SetBoolVal(true) + assert.True(t, av.BoolVal()) + + av = NewAttributeValueNull() + av.SetDoubleVal(1.23) + assert.EqualValues(t, 1.23, av.DoubleVal()) +} + +func TestAttributeValueEqual(t *testing.T) { + av1 := NewAttributeValueNull() + av2 := NewAttributeValueNull() + assert.True(t, av1.Equal(av2)) + + av2 = NewAttributeValueString("abc") + assert.False(t, av1.Equal(av2)) + assert.False(t, av2.Equal(av1)) + + av1 = NewAttributeValueString("abc") + assert.True(t, av1.Equal(av2)) + + av2 = NewAttributeValueString("edf") + assert.False(t, av1.Equal(av2)) + + av2 = NewAttributeValueInt(123) + assert.False(t, av1.Equal(av2)) + assert.False(t, av2.Equal(av1)) + + av1 = NewAttributeValueInt(234) + assert.False(t, av1.Equal(av2)) + + av1 = NewAttributeValueInt(123) + assert.True(t, av1.Equal(av2)) + + av2 = NewAttributeValueDouble(123) + assert.False(t, av1.Equal(av2)) + assert.False(t, av2.Equal(av1)) + + av1 = NewAttributeValueDouble(234) + assert.False(t, av1.Equal(av2)) + + av1 = NewAttributeValueDouble(123) + assert.True(t, av1.Equal(av2)) + + av2 = NewAttributeValueBool(true) + assert.False(t, av1.Equal(av2)) + assert.False(t, av2.Equal(av1)) + + av1 = NewAttributeValueBool(true) + assert.True(t, av1.Equal(av2)) + + av1 = NewAttributeValueBool(false) + assert.False(t, av1.Equal(av2)) +} + +func TestNilAttributeMap(t *testing.T) { + assert.EqualValues(t, 0, NewAttributeMap().Len()) + + val, exist := NewAttributeMap().Get("test_key") + assert.False(t, exist) + assert.EqualValues(t, AttributeValue{nil}, val) + + insertMap := NewAttributeMap() + insertMap.Insert("k", NewAttributeValueString("v")) + assert.EqualValues(t, generateTestAttributeMap(), insertMap) + + insertMapString := NewAttributeMap() + insertMapString.InsertString("k", "v") + assert.EqualValues(t, generateTestAttributeMap(), insertMapString) + + insertMapNull := NewAttributeMap() + insertMapNull.InsertNull("k") + assert.EqualValues(t, generateTestNullAttributeMap(), insertMapNull) + + insertMapInt := NewAttributeMap() + insertMapInt.InsertInt("k", 123) + assert.EqualValues(t, generateTestIntAttributeMap(), insertMapInt) + + insertMapDouble := NewAttributeMap() + insertMapDouble.InsertDouble("k", 12.3) + assert.EqualValues(t, generateTestDoubleAttributeMap(), insertMapDouble) + + insertMapBool := NewAttributeMap() + insertMapBool.InsertBool("k", true) + assert.EqualValues(t, generateTestBoolAttributeMap(), insertMapBool) + + updateMap := NewAttributeMap() + updateMap.Update("k", NewAttributeValueString("v")) + assert.EqualValues(t, NewAttributeMap(), updateMap) + + updateMapString := NewAttributeMap() + updateMapString.UpdateString("k", "v") + assert.EqualValues(t, NewAttributeMap(), updateMapString) + + updateMapInt := NewAttributeMap() + updateMapInt.UpdateInt("k", 123) + assert.EqualValues(t, NewAttributeMap(), updateMapInt) + + updateMapDouble := NewAttributeMap() + updateMapDouble.UpdateDouble("k", 12.3) + assert.EqualValues(t, NewAttributeMap(), updateMapDouble) + + updateMapBool := NewAttributeMap() + updateMapBool.UpdateBool("k", true) + assert.EqualValues(t, NewAttributeMap(), updateMapBool) + + upsertMap := NewAttributeMap() + upsertMap.Upsert("k", NewAttributeValueString("v")) + assert.EqualValues(t, generateTestAttributeMap(), upsertMap) + + upsertMapString := NewAttributeMap() + upsertMapString.UpsertString("k", "v") + assert.EqualValues(t, generateTestAttributeMap(), upsertMapString) + + upsertMapInt := NewAttributeMap() + upsertMapInt.UpsertInt("k", 123) + assert.EqualValues(t, generateTestIntAttributeMap(), upsertMapInt) + + upsertMapDouble := NewAttributeMap() + upsertMapDouble.UpsertDouble("k", 12.3) + assert.EqualValues(t, generateTestDoubleAttributeMap(), upsertMapDouble) + + upsertMapBool := NewAttributeMap() + upsertMapBool.UpsertBool("k", true) + assert.EqualValues(t, generateTestBoolAttributeMap(), upsertMapBool) + + deleteMap := NewAttributeMap() + assert.False(t, deleteMap.Delete("k")) + assert.EqualValues(t, NewAttributeMap(), deleteMap) + + // Test Sort + assert.EqualValues(t, NewAttributeMap(), NewAttributeMap().Sort()) +} + +func TestAttributeMapWithEmpty(t *testing.T) { + origWithNil := []otlpcommon.KeyValue{ + {}, + { + Key: "test_key", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "test_value"}}, + }, + { + Key: "test_key2", + Value: otlpcommon.AnyValue{Value: nil}, + }, + } + sm := AttributeMap{ + orig: &origWithNil, + } + val, exist := sm.Get("test_key") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "test_value", val.StringVal()) + + val, exist = sm.Get("test_key2") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueNULL, val.Type()) + assert.EqualValues(t, "", val.StringVal()) + + sm.Insert("other_key", NewAttributeValueString("other_value")) + val, exist = sm.Get("other_key") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "other_value", val.StringVal()) + + sm.InsertString("other_key_string", "other_value") + val, exist = sm.Get("other_key") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "other_value", val.StringVal()) + + sm.InsertInt("other_key_int", 123) + val, exist = sm.Get("other_key_int") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueINT, val.Type()) + assert.EqualValues(t, 123, val.IntVal()) + + sm.InsertDouble("other_key_double", 1.23) + val, exist = sm.Get("other_key_double") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueDOUBLE, val.Type()) + assert.EqualValues(t, 1.23, val.DoubleVal()) + + sm.InsertBool("other_key_bool", true) + val, exist = sm.Get("other_key_bool") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueBOOL, val.Type()) + assert.True(t, val.BoolVal()) + + sm.Update("other_key", NewAttributeValueString("yet_another_value")) + val, exist = sm.Get("other_key") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "yet_another_value", val.StringVal()) + + sm.UpdateString("other_key_string", "yet_another_value") + val, exist = sm.Get("other_key_string") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "yet_another_value", val.StringVal()) + + sm.UpdateInt("other_key_int", 456) + val, exist = sm.Get("other_key_int") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueINT, val.Type()) + assert.EqualValues(t, 456, val.IntVal()) + + sm.UpdateDouble("other_key_double", 4.56) + val, exist = sm.Get("other_key_double") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueDOUBLE, val.Type()) + assert.EqualValues(t, 4.56, val.DoubleVal()) + + sm.UpdateBool("other_key_bool", false) + val, exist = sm.Get("other_key_bool") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueBOOL, val.Type()) + assert.False(t, val.BoolVal()) + + sm.Upsert("other_key", NewAttributeValueString("other_value")) + val, exist = sm.Get("other_key") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "other_value", val.StringVal()) + + sm.UpsertString("other_key_string", "other_value") + val, exist = sm.Get("other_key") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "other_value", val.StringVal()) + + sm.UpsertInt("other_key_int", 123) + val, exist = sm.Get("other_key_int") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueINT, val.Type()) + assert.EqualValues(t, 123, val.IntVal()) + + sm.UpsertDouble("other_key_double", 1.23) + val, exist = sm.Get("other_key_double") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueDOUBLE, val.Type()) + assert.EqualValues(t, 1.23, val.DoubleVal()) + + sm.UpsertBool("other_key_bool", true) + val, exist = sm.Get("other_key_bool") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueBOOL, val.Type()) + assert.True(t, val.BoolVal()) + + sm.Upsert("yet_another_key", NewAttributeValueString("yet_another_value")) + val, exist = sm.Get("yet_another_key") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "yet_another_value", val.StringVal()) + + sm.UpsertString("yet_another_key_string", "yet_another_value") + val, exist = sm.Get("yet_another_key_string") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "yet_another_value", val.StringVal()) + + sm.UpsertInt("yet_another_key_int", 456) + val, exist = sm.Get("yet_another_key_int") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueINT, val.Type()) + assert.EqualValues(t, 456, val.IntVal()) + + sm.UpsertDouble("yet_another_key_double", 4.56) + val, exist = sm.Get("yet_another_key_double") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueDOUBLE, val.Type()) + assert.EqualValues(t, 4.56, val.DoubleVal()) + + sm.UpsertBool("yet_another_key_bool", false) + val, exist = sm.Get("yet_another_key_bool") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueBOOL, val.Type()) + assert.False(t, val.BoolVal()) + + assert.True(t, sm.Delete("other_key")) + assert.True(t, sm.Delete("other_key_string")) + assert.True(t, sm.Delete("other_key_int")) + assert.True(t, sm.Delete("other_key_double")) + assert.True(t, sm.Delete("other_key_bool")) + assert.True(t, sm.Delete("yet_another_key")) + assert.True(t, sm.Delete("yet_another_key_string")) + assert.True(t, sm.Delete("yet_another_key_int")) + assert.True(t, sm.Delete("yet_another_key_double")) + assert.True(t, sm.Delete("yet_another_key_bool")) + assert.False(t, sm.Delete("other_key")) + assert.False(t, sm.Delete("yet_another_key")) + + // Test that the initial key is still there. + val, exist = sm.Get("test_key") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "test_value", val.StringVal()) + + val, exist = sm.Get("test_key2") + assert.True(t, exist) + assert.EqualValues(t, AttributeValueNULL, val.Type()) + assert.EqualValues(t, "", val.StringVal()) + + _, exist = sm.Get("test_key3") + assert.False(t, exist) + + // Test Sort + assert.EqualValues(t, AttributeMap{orig: &origWithNil}, sm.Sort()) +} + +func TestAttributeMapIterationNil(t *testing.T) { + NewAttributeMap().ForEach(func(k string, v AttributeValue) { + // Fail if any element is returned + t.Fail() + }) +} + +func TestAttributeMap_ForEach(t *testing.T) { + rawMap := map[string]AttributeValue{ + "k_string": NewAttributeValueString("123"), + "k_int": NewAttributeValueInt(123), + "k_double": NewAttributeValueDouble(1.23), + "k_bool": NewAttributeValueBool(true), + "k_null": NewAttributeValueNull(), + } + am := NewAttributeMap().InitFromMap(rawMap) + assert.EqualValues(t, 5, am.Len()) + + am.ForEach(func(k string, v AttributeValue) { + assert.True(t, v.Equal(rawMap[k])) + delete(rawMap, k) + }) + assert.EqualValues(t, 0, len(rawMap)) +} + +func TestAttributeMap_InitFromMap(t *testing.T) { + am := NewAttributeMap().InitFromMap(map[string]AttributeValue(nil)) + assert.EqualValues(t, NewAttributeMap(), am) + + rawMap := map[string]AttributeValue{ + "k_string": NewAttributeValueString("123"), + "k_int": NewAttributeValueInt(123), + "k_double": NewAttributeValueDouble(1.23), + "k_bool": NewAttributeValueBool(true), + "k_null": NewAttributeValueNull(), + } + rawOrig := []otlpcommon.KeyValue{ + newAttributeKeyValueString("k_string", "123"), + newAttributeKeyValueInt("k_int", 123), + newAttributeKeyValueDouble("k_double", 1.23), + newAttributeKeyValueBool("k_bool", true), + newAttributeKeyValueNull("k_null"), + } + am = NewAttributeMap().InitFromMap(rawMap) + assert.EqualValues(t, AttributeMap{orig: &rawOrig}.Sort(), am.Sort()) +} + +func TestAttributeValue_CopyTo(t *testing.T) { + // Test nil KvlistValue case for MapVal() func. + dest := NewAttributeValueNull() + orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: nil}} + AttributeValue{orig: orig}.CopyTo(dest) + assert.Nil(t, dest.orig.Value.(*otlpcommon.AnyValue_KvlistValue).KvlistValue) + + // Test nil ArrayValue case for ArrayVal() func. + dest = NewAttributeValueNull() + orig = &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: nil}} + AttributeValue{orig: orig}.CopyTo(dest) + assert.Nil(t, dest.orig.Value.(*otlpcommon.AnyValue_ArrayValue).ArrayValue) + + // Test copy empty value. + AttributeValue{orig: &otlpcommon.AnyValue{}}.CopyTo(dest) + assert.Nil(t, dest.orig.Value) +} + +func TestAttributeMap_CopyTo(t *testing.T) { + dest := NewAttributeMap() + // Test CopyTo to empty + NewAttributeMap().CopyTo(dest) + assert.EqualValues(t, 0, dest.Len()) + + // Test CopyTo larger slice + generateTestAttributeMap().CopyTo(dest) + assert.EqualValues(t, generateTestAttributeMap(), dest) + + // Test CopyTo same size slice + generateTestAttributeMap().CopyTo(dest) + assert.EqualValues(t, generateTestAttributeMap(), dest) + + // Test CopyTo with an empty Value in the destination + (*dest.orig)[0].Value = otlpcommon.AnyValue{} + generateTestAttributeMap().CopyTo(dest) + assert.EqualValues(t, generateTestAttributeMap(), dest) +} + +func TestAttributeValue_copyTo(t *testing.T) { + av := NewAttributeValueNull() + destVal := otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{}} + av.copyTo(&destVal) + assert.EqualValues(t, nil, destVal.Value) +} + +func TestAttributeMap_Update(t *testing.T) { + origWithNil := []otlpcommon.KeyValue{ + { + Key: "test_key", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "test_value"}}, + }, + { + Key: "test_key2", + Value: otlpcommon.AnyValue{Value: nil}, + }, + } + sm := AttributeMap{ + orig: &origWithNil, + } + + av, exists := sm.Get("test_key") + assert.True(t, exists) + assert.EqualValues(t, AttributeValueSTRING, av.Type()) + assert.EqualValues(t, "test_value", av.StringVal()) + av.SetIntVal(123) + + av2, exists := sm.Get("test_key") + assert.True(t, exists) + assert.EqualValues(t, AttributeValueINT, av2.Type()) + assert.EqualValues(t, 123, av2.IntVal()) + + av, exists = sm.Get("test_key2") + assert.True(t, exists) + assert.EqualValues(t, AttributeValueNULL, av.Type()) + assert.EqualValues(t, "", av.StringVal()) + av.SetIntVal(123) + + av2, exists = sm.Get("test_key2") + assert.True(t, exists) + assert.EqualValues(t, AttributeValueINT, av2.Type()) + assert.EqualValues(t, 123, av2.IntVal()) +} + +func TestAttributeMap_InitEmptyWithCapacity(t *testing.T) { + am := NewAttributeMap() + am.InitEmptyWithCapacity(0) + assert.Equal(t, NewAttributeMap(), am) + assert.Equal(t, 0, am.Len()) +} + +func TestNilStringMap(t *testing.T) { + assert.EqualValues(t, 0, NewStringMap().Len()) + + val, exist := NewStringMap().Get("test_key") + assert.False(t, exist) + assert.EqualValues(t, "", val) + + insertMap := NewStringMap() + insertMap.Insert("k", "v") + assert.EqualValues(t, generateTestStringMap(), insertMap) + + updateMap := NewStringMap() + updateMap.Update("k", "v") + assert.EqualValues(t, NewStringMap(), updateMap) + + upsertMap := NewStringMap() + upsertMap.Upsert("k", "v") + assert.EqualValues(t, generateTestStringMap(), upsertMap) + + deleteMap := NewStringMap() + assert.False(t, deleteMap.Delete("k")) + assert.EqualValues(t, NewStringMap(), deleteMap) + + // Test Sort + assert.EqualValues(t, NewStringMap(), NewStringMap().Sort()) +} + +func TestStringMapWithEmpty(t *testing.T) { + origWithNil := []otlpcommon.StringKeyValue{ + {}, + { + Key: "test_key", + Value: "test_value", + }, + } + sm := StringMap{ + orig: &origWithNil, + } + val, exist := sm.Get("test_key") + assert.True(t, exist) + assert.EqualValues(t, "test_value", val) + + sm.Insert("other_key", "other_value") + val, exist = sm.Get("other_key") + assert.True(t, exist) + assert.EqualValues(t, "other_value", val) + + sm.Update("other_key", "yet_another_value") + val, exist = sm.Get("other_key") + assert.True(t, exist) + assert.EqualValues(t, "yet_another_value", val) + + sm.Upsert("other_key", "other_value") + val, exist = sm.Get("other_key") + assert.True(t, exist) + assert.EqualValues(t, "other_value", val) + + sm.Upsert("yet_another_key", "yet_another_value") + val, exist = sm.Get("yet_another_key") + assert.True(t, exist) + assert.EqualValues(t, "yet_another_value", val) + + assert.True(t, sm.Delete("other_key")) + assert.True(t, sm.Delete("yet_another_key")) + assert.False(t, sm.Delete("other_key")) + assert.False(t, sm.Delete("yet_another_key")) + + // Test that the initial key is still there. + val, exist = sm.Get("test_key") + assert.True(t, exist) + assert.EqualValues(t, "test_value", val) + + // Test Sort + assert.EqualValues(t, StringMap{orig: &origWithNil}, sm.Sort()) +} + +func TestStringMap(t *testing.T) { + origRawMap := map[string]string{"k0": "v0", "k1": "v1", "k2": "v2"} + origMap := NewStringMap().InitFromMap(origRawMap) + sm := NewStringMap().InitFromMap(origRawMap) + assert.EqualValues(t, 3, sm.Len()) + + val, exist := sm.Get("k2") + assert.True(t, exist) + assert.EqualValues(t, "v2", val) + + val, exist = sm.Get("k3") + assert.False(t, exist) + assert.EqualValues(t, "", val) + + sm.Insert("k1", "v1") + assert.EqualValues(t, origMap.Sort(), sm.Sort()) + sm.Insert("k3", "v3") + assert.EqualValues(t, 4, sm.Len()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"k0": "v0", "k1": "v1", "k2": "v2", "k3": "v3"}).Sort(), sm.Sort()) + assert.True(t, sm.Delete("k3")) + assert.EqualValues(t, 3, sm.Len()) + assert.EqualValues(t, origMap.Sort(), sm.Sort()) + + sm.Update("k3", "v3") + assert.EqualValues(t, 3, sm.Len()) + assert.EqualValues(t, origMap.Sort(), sm.Sort()) + sm.Update("k2", "v3") + assert.EqualValues(t, 3, sm.Len()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"k0": "v0", "k1": "v1", "k2": "v3"}).Sort(), sm.Sort()) + sm.Update("k2", "v2") + assert.EqualValues(t, 3, sm.Len()) + assert.EqualValues(t, origMap.Sort(), sm.Sort()) + + sm.Upsert("k3", "v3") + assert.EqualValues(t, 4, sm.Len()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"k0": "v0", "k1": "v1", "k2": "v2", "k3": "v3"}).Sort(), sm.Sort()) + sm.Upsert("k1", "v5") + assert.EqualValues(t, 4, sm.Len()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"k0": "v0", "k1": "v5", "k2": "v2", "k3": "v3"}).Sort(), sm.Sort()) + sm.Upsert("k1", "v1") + assert.EqualValues(t, 4, sm.Len()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"k0": "v0", "k1": "v1", "k2": "v2", "k3": "v3"}).Sort(), sm.Sort()) + assert.True(t, sm.Delete("k3")) + assert.EqualValues(t, 3, sm.Len()) + assert.EqualValues(t, origMap.Sort(), sm.Sort()) + + assert.False(t, sm.Delete("k3")) + assert.EqualValues(t, 3, sm.Len()) + assert.EqualValues(t, origMap.Sort(), sm.Sort()) + + assert.True(t, sm.Delete("k0")) + assert.EqualValues(t, 2, sm.Len()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"k1": "v1", "k2": "v2"}).Sort(), sm.Sort()) + assert.True(t, sm.Delete("k2")) + assert.EqualValues(t, 1, sm.Len()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"k1": "v1"}).Sort(), sm.Sort()) + assert.True(t, sm.Delete("k1")) + assert.EqualValues(t, 0, sm.Len()) +} + +func TestStringMapIterationNil(t *testing.T) { + NewStringMap().ForEach(func(k string, v string) { + // Fail if any element is returned + t.Fail() + }) +} + +func TestStringMap_ForEach(t *testing.T) { + rawMap := map[string]string{"k0": "v0", "k1": "v1", "k2": "v2"} + sm := NewStringMap().InitFromMap(rawMap) + assert.EqualValues(t, 3, sm.Len()) + + sm.ForEach(func(k string, v string) { + assert.EqualValues(t, rawMap[k], v) + delete(rawMap, k) + }) + assert.EqualValues(t, 0, len(rawMap)) +} + +func TestStringMap_CopyTo(t *testing.T) { + dest := NewStringMap() + // Test CopyTo to empty + NewStringMap().CopyTo(dest) + assert.EqualValues(t, 0, dest.Len()) + + // Test CopyTo larger slice + generateTestStringMap().CopyTo(dest) + assert.EqualValues(t, generateTestStringMap(), dest) + + // Test CopyTo same size slice + generateTestStringMap().CopyTo(dest) + assert.EqualValues(t, generateTestStringMap(), dest) +} + +func TestStringMap_InitEmptyWithCapacity(t *testing.T) { + sm := NewStringMap() + sm.InitEmptyWithCapacity(0) + assert.Equal(t, NewStringMap(), sm) + assert.Equal(t, 0, sm.Len()) +} + +func TestStringMap_InitFromMap(t *testing.T) { + sm := NewStringMap().InitFromMap(map[string]string(nil)) + assert.EqualValues(t, NewStringMap(), sm) + + rawMap := map[string]string{"k0": "v0", "k1": "v1", "k2": "v2"} + rawOrig := []otlpcommon.StringKeyValue{ + { + Key: "k0", + Value: "v0", + }, + { + Key: "k1", + Value: "v1", + }, + { + Key: "k2", + Value: "v2", + }, + } + sm = NewStringMap().InitFromMap(rawMap) + assert.EqualValues(t, StringMap{orig: &rawOrig}.Sort(), sm.Sort()) +} + +func BenchmarkAttributeValue_CopyTo(b *testing.B) { + av := NewAttributeValueString("k") + c := NewAttributeValueInt(123) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + c.copyTo(av.orig) + } + if av.IntVal() != 123 { + b.Fail() + } +} + +func BenchmarkAttributeValue_SetIntVal(b *testing.B) { + av := NewAttributeValueString("k") + + b.ResetTimer() + for n := 0; n < b.N; n++ { + av.SetIntVal(int64(n)) + } + if av.IntVal() != int64(b.N-1) { + b.Fail() + } +} + +func BenchmarkAttributeMap_ForEach(b *testing.B) { + const numElements = 20 + rawOrig := make([]otlpcommon.KeyValue, numElements) + for i := 0; i < numElements; i++ { + rawOrig[i] = otlpcommon.KeyValue{ + Key: "k" + strconv.Itoa(i), + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "v" + strconv.Itoa(i)}}, + } + } + am := AttributeMap{ + orig: &rawOrig, + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + numEls := 0 + am.ForEach(func(k string, v AttributeValue) { + numEls++ + }) + if numEls != numElements { + b.Fail() + } + } +} + +func BenchmarkAttributeMap_RangeOverMap(b *testing.B) { + const numElements = 20 + rawOrig := make(map[string]AttributeValue, numElements) + for i := 0; i < numElements; i++ { + key := "k" + strconv.Itoa(i) + rawOrig[key] = NewAttributeValueString("v" + strconv.Itoa(i)) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + numEls := 0 + for _, v := range rawOrig { + if v.orig == nil { + continue + } + numEls++ + } + if numEls != numElements { + b.Fail() + } + } +} + +func BenchmarkStringMap_ForEach(b *testing.B) { + const numElements = 20 + rawOrig := make([]otlpcommon.StringKeyValue, numElements) + for i := 0; i < numElements; i++ { + rawOrig[i] = otlpcommon.StringKeyValue{ + Key: "k" + strconv.Itoa(i), + Value: "v" + strconv.Itoa(i), + } + } + sm := StringMap{ + orig: &rawOrig, + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + numEls := 0 + sm.ForEach(func(s string, value string) { + numEls++ + }) + if numEls != numElements { + b.Fail() + } + } +} + +func BenchmarkStringMap_RangeOverMap(b *testing.B) { + const numElements = 20 + rawOrig := make(map[string]string, numElements) + for i := 0; i < numElements; i++ { + key := "k" + strconv.Itoa(i) + rawOrig[key] = "v" + strconv.Itoa(i) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + numEls := 0 + for _, v := range rawOrig { + if v == "" { + continue + } + numEls++ + } + if numEls != numElements { + b.Fail() + } + } +} + +func fillTestAttributeValue(dest AttributeValue) { + dest.SetStringVal("v") +} + +func generateTestAttributeValue() AttributeValue { + av := NewAttributeValueNull() + fillTestAttributeValue(av) + return av +} + +func generateTestStringMap() StringMap { + sm := NewStringMap() + fillTestStringMap(sm) + return sm +} + +func fillTestStringMap(dest StringMap) { + dest.InitFromMap(map[string]string{ + "k": "v", + }) +} + +func generateTestAttributeMap() AttributeMap { + am := NewAttributeMap() + fillTestAttributeMap(am) + return am +} + +func fillTestAttributeMap(dest AttributeMap) { + dest.InitFromMap(map[string]AttributeValue{ + "k": NewAttributeValueString("v"), + }) +} + +func generateTestNullAttributeMap() AttributeMap { + am := NewAttributeMap() + am.InitFromMap(map[string]AttributeValue{ + "k": NewAttributeValueNull(), + }) + return am +} +func generateTestIntAttributeMap() AttributeMap { + am := NewAttributeMap() + am.InitFromMap(map[string]AttributeValue{ + "k": NewAttributeValueInt(123), + }) + return am +} + +func generateTestDoubleAttributeMap() AttributeMap { + am := NewAttributeMap() + am.InitFromMap(map[string]AttributeValue{ + "k": NewAttributeValueDouble(12.3), + }) + return am +} + +func generateTestBoolAttributeMap() AttributeMap { + am := NewAttributeMap() + am.InitFromMap(map[string]AttributeValue{ + "k": NewAttributeValueBool(true), + }) + return am +} + +func fromArray(v []interface{}) AttributeValue { + av := NewAttributeValueArray() + arr := av.ArrayVal() + for _, v := range v { + arr.Append(fromVal(v)) + } + return av +} + +func fromJSONArray(jsonStr string) AttributeValue { + var src []interface{} + err := json.Unmarshal([]byte(jsonStr), &src) + if err != nil { + panic("Invalid input jsonStr:" + jsonStr) + } + return fromArray(src) +} + +func assertArrayJSON(t *testing.T, expectedJSON string, actualArray AttributeValue) { + assert.EqualValues(t, fromJSONArray(expectedJSON).ArrayVal(), actualArray.ArrayVal()) +} + +func TestAttributeValueArray(t *testing.T) { + a1 := NewAttributeValueArray() + assert.EqualValues(t, fromJSONArray(`[]`), a1) + assert.EqualValues(t, AttributeValueARRAY, a1.Type()) + assert.EqualValues(t, NewAnyValueArray(), a1.ArrayVal()) + assert.EqualValues(t, 0, a1.ArrayVal().Len()) + + a1.ArrayVal().Resize(1) + v := a1.ArrayVal().At(0) + v.SetDoubleVal(123) + assertArrayJSON(t, `[123]`, a1) + assert.EqualValues(t, 1, a1.ArrayVal().Len()) + assert.EqualValues(t, AttributeValueDOUBLE, v.Type()) + assert.EqualValues(t, 123, v.DoubleVal()) + + // Create a second array. + a2 := NewAttributeValueArray() + assertArrayJSON(t, `[]`, a2) + assert.EqualValues(t, 0, a2.ArrayVal().Len()) + + a2.ArrayVal().Resize(1) + a2.ArrayVal().At(0).SetStringVal("somestr") + assertArrayJSON(t, `["somestr"]`, a2) + assert.EqualValues(t, 1, a2.ArrayVal().Len()) + + // Insert the second array as a child. + a1.ArrayVal().Append(a2) + assertArrayJSON(t, `[123, ["somestr"]]`, a1) + assert.EqualValues(t, 2, a1.ArrayVal().Len()) + + // Check that the array was correctly inserted. + childArray := a1.ArrayVal().At(1) + assert.EqualValues(t, AttributeValueARRAY, childArray.Type()) + assert.EqualValues(t, 1, childArray.ArrayVal().Len()) + + v = childArray.ArrayVal().At(0) + assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, "somestr", v.StringVal()) + + // Test nil values case for ArrayVal() func. + a1 = AttributeValue{orig: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: nil}}} + assert.EqualValues(t, NewAnyValueArray(), a1.ArrayVal()) +} + +func TestAnyValueArrayWithNilValues(t *testing.T) { + origWithNil := []otlpcommon.AnyValue{ + {}, + {Value: &otlpcommon.AnyValue_StringValue{StringValue: "test_value"}}, + } + sm := AnyValueArray{ + orig: &origWithNil, + } + + val := sm.At(0) + assert.EqualValues(t, AttributeValueNULL, val.Type()) + assert.EqualValues(t, "", val.StringVal()) + + val = sm.At(1) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "test_value", val.StringVal()) + + sm.Append(NewAttributeValueString("other_value")) + val = sm.At(2) + assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, "other_value", val.StringVal()) +} diff --git a/internal/otel_collector/consumer/pdata/doc.go b/internal/otel_collector/consumer/pdata/doc.go new file mode 100644 index 00000000000..4b4ca0b041b --- /dev/null +++ b/internal/otel_collector/consumer/pdata/doc.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pdata (pipeline data) implements data structures that represent telemetry data in-memory. +// All data received is converted into this format and travels through the pipeline +// in this format and that is converted from this format by exporters when sending. +// +// Current implementation primarily uses OTLP ProtoBuf structs as the underlying data +// structures for many of of the declared structs. We keep a pointer to OTLP protobuf +// in the "orig" member field. This allows efficient translation to/from OTLP wire +// protocol. Note that the underlying data structure is kept private so that in the +// future we are free to make changes to it to make more optimal. +// +// Most of internal data structures must be created via New* functions. Zero-initialized +// structures in most cases are not valid (read comments for each struct to know if it +// is the case). This is a slight deviation from idiomatic Go to avoid unnecessary +// pointer checks in dozens of functions which assume the invariant that "orig" member +// is non-nil. Several structures also provide New*Slice functions that allows to create +// more than one instance of the struct more efficiently instead of calling New* +// repeatedly. Use it where appropriate. +package pdata diff --git a/internal/otel_collector/consumer/pdata/generated_common.go b/internal/otel_collector/consumer/pdata/generated_common.go new file mode 100644 index 00000000000..9c5e7f73dd5 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_common.go @@ -0,0 +1,187 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" +) + +// InstrumentationLibrary is a message representing the instrumentation library information. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewInstrumentationLibrary function to create new instances. +// Important: zero-initialized instance is not valid for use. +type InstrumentationLibrary struct { + orig *otlpcommon.InstrumentationLibrary +} + +func newInstrumentationLibrary(orig *otlpcommon.InstrumentationLibrary) InstrumentationLibrary { + return InstrumentationLibrary{orig: orig} +} + +// NewInstrumentationLibrary creates a new empty InstrumentationLibrary. +// +// This must be used only in testing code since no "Set" method available. +func NewInstrumentationLibrary() InstrumentationLibrary { + return newInstrumentationLibrary(&otlpcommon.InstrumentationLibrary{}) +} + +// Name returns the name associated with this InstrumentationLibrary. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibrary) Name() string { + return (*ms.orig).Name +} + +// SetName replaces the name associated with this InstrumentationLibrary. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibrary) SetName(v string) { + (*ms.orig).Name = v +} + +// Version returns the version associated with this InstrumentationLibrary. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibrary) Version() string { + return (*ms.orig).Version +} + +// SetVersion replaces the version associated with this InstrumentationLibrary. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibrary) SetVersion(v string) { + (*ms.orig).Version = v +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms InstrumentationLibrary) CopyTo(dest InstrumentationLibrary) { + dest.SetName(ms.Name()) + dest.SetVersion(ms.Version()) +} + +// AnyValueArray logically represents a slice of AttributeValue. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewAnyValueArray function to create new instances. +// Important: zero-initialized instance is not valid for use. +type AnyValueArray struct { + // orig points to the slice otlpcommon.AnyValue field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]otlpcommon.AnyValue +} + +func newAnyValueArray(orig *[]otlpcommon.AnyValue) AnyValueArray { + return AnyValueArray{orig} +} + +// NewAnyValueArray creates a AnyValueArray with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewAnyValueArray() AnyValueArray { + orig := []otlpcommon.AnyValue(nil) + return AnyValueArray{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewAnyValueArray()". +func (es AnyValueArray) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es AnyValueArray) At(ix int) AttributeValue { + return newAttributeValue(&(*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es AnyValueArray) MoveAndAppendTo(dest AnyValueArray) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es AnyValueArray) CopyTo(dest AnyValueArray) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + } else { + (*dest.orig) = make([]otlpcommon.AnyValue, srcLen) + } + + for i := range *es.orig { + newAttributeValue(&(*es.orig)[i]).CopyTo(newAttributeValue(&(*dest.orig)[i])) + } +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new AnyValueArray can be initialized: +// es := NewAnyValueArray() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es AnyValueArray) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]otlpcommon.AnyValue, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + empty := otlpcommon.AnyValue{} + for i := oldLen; i < newLen; i++ { + *es.orig = append(*es.orig, empty) + } +} + +// Append will increase the length of the AnyValueArray by one and set the +// given AttributeValue at that new position. The original AttributeValue +// could still be referenced so do not reuse it after passing it to this +// method. +func (es AnyValueArray) Append(e AttributeValue) { + *es.orig = append(*es.orig, *e.orig) +} diff --git a/internal/otel_collector/consumer/pdata/generated_common_test.go b/internal/otel_collector/consumer/pdata/generated_common_test.go new file mode 100644 index 00000000000..a7765894034 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_common_test.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" +) + +func TestInstrumentationLibrary_CopyTo(t *testing.T) { + ms := NewInstrumentationLibrary() + generateTestInstrumentationLibrary().CopyTo(ms) + assert.EqualValues(t, generateTestInstrumentationLibrary(), ms) +} + +func TestInstrumentationLibrary_Name(t *testing.T) { + ms := NewInstrumentationLibrary() + assert.EqualValues(t, "", ms.Name()) + testValName := "test_name" + ms.SetName(testValName) + assert.EqualValues(t, testValName, ms.Name()) +} + +func TestInstrumentationLibrary_Version(t *testing.T) { + ms := NewInstrumentationLibrary() + assert.EqualValues(t, "", ms.Version()) + testValVersion := "test_version" + ms.SetVersion(testValVersion) + assert.EqualValues(t, testValVersion, ms.Version()) +} + +func TestAnyValueArray(t *testing.T) { + es := NewAnyValueArray() + assert.EqualValues(t, 0, es.Len()) + es = newAnyValueArray(&[]otlpcommon.AnyValue{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewAttributeValue() + testVal := generateTestAttributeValue() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestAttributeValue(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestAnyValueArray_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestAnyValueArray() + dest := NewAnyValueArray() + src := generateTestAnyValueArray() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestAnyValueArray(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestAnyValueArray(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestAnyValueArray().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestAnyValueArray_CopyTo(t *testing.T) { + dest := NewAnyValueArray() + // Test CopyTo to empty + NewAnyValueArray().CopyTo(dest) + assert.EqualValues(t, NewAnyValueArray(), dest) + + // Test CopyTo larger slice + generateTestAnyValueArray().CopyTo(dest) + assert.EqualValues(t, generateTestAnyValueArray(), dest) + + // Test CopyTo same size slice + generateTestAnyValueArray().CopyTo(dest) + assert.EqualValues(t, generateTestAnyValueArray(), dest) +} + +func TestAnyValueArray_Resize(t *testing.T) { + es := generateTestAnyValueArray() + emptyVal := NewAttributeValue() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpcommon.AnyValue]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpcommon.AnyValue]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpcommon.AnyValue]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpcommon.AnyValue]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestAnyValueArray_Append(t *testing.T) { + es := generateTestAnyValueArray() + + emptyVal := NewAttributeValue() + es.Append(emptyVal) + assert.EqualValues(t, *(es.At(7)).orig, *emptyVal.orig) + + emptyVal2 := NewAttributeValue() + es.Append(emptyVal2) + assert.EqualValues(t, *(es.At(8)).orig, *emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func generateTestInstrumentationLibrary() InstrumentationLibrary { + tv := NewInstrumentationLibrary() + fillTestInstrumentationLibrary(tv) + return tv +} + +func fillTestInstrumentationLibrary(tv InstrumentationLibrary) { + tv.SetName("test_name") + tv.SetVersion("test_version") +} + +func generateTestAnyValueArray() AnyValueArray { + tv := NewAnyValueArray() + fillTestAnyValueArray(tv) + return tv +} + +func fillTestAnyValueArray(tv AnyValueArray) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestAttributeValue(tv.At(i)) + } +} diff --git a/internal/otel_collector/consumer/pdata/generated_log.go b/internal/otel_collector/consumer/pdata/generated_log.go new file mode 100644 index 00000000000..82110449f8d --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_log.go @@ -0,0 +1,612 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + "go.opentelemetry.io/collector/internal/data" + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" +) + +// ResourceLogsSlice logically represents a slice of ResourceLogs. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceLogsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceLogsSlice struct { + // orig points to the slice otlplogs.ResourceLogs field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlplogs.ResourceLogs +} + +func newResourceLogsSlice(orig *[]*otlplogs.ResourceLogs) ResourceLogsSlice { + return ResourceLogsSlice{orig} +} + +// NewResourceLogsSlice creates a ResourceLogsSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewResourceLogsSlice() ResourceLogsSlice { + orig := []*otlplogs.ResourceLogs(nil) + return ResourceLogsSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceLogsSlice()". +func (es ResourceLogsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceLogsSlice) At(ix int) ResourceLogs { + return newResourceLogs((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceLogsSlice) MoveAndAppendTo(dest ResourceLogsSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceLogs((*es.orig)[i]).CopyTo(newResourceLogs((*dest.orig)[i])) + } + return + } + origs := make([]otlplogs.ResourceLogs, srcLen) + wrappers := make([]*otlplogs.ResourceLogs, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceLogs((*es.orig)[i]).CopyTo(newResourceLogs(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new ResourceLogsSlice can be initialized: +// es := NewResourceLogsSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es ResourceLogsSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlplogs.ResourceLogs, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlplogs.ResourceLogs, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the ResourceLogsSlice by one and set the +// given ResourceLogs at that new position. The original ResourceLogs +// could still be referenced so do not reuse it after passing it to this +// method. +func (es ResourceLogsSlice) Append(e ResourceLogs) { + *es.orig = append(*es.orig, e.orig) +} + +// ResourceLogs is a collection of logs from a Resource. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceLogs function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceLogs struct { + orig *otlplogs.ResourceLogs +} + +func newResourceLogs(orig *otlplogs.ResourceLogs) ResourceLogs { + return ResourceLogs{orig: orig} +} + +// NewResourceLogs creates a new empty ResourceLogs. +// +// This must be used only in testing code since no "Set" method available. +func NewResourceLogs() ResourceLogs { + return newResourceLogs(&otlplogs.ResourceLogs{}) +} + +// Resource returns the resource associated with this ResourceLogs. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ResourceLogs) Resource() Resource { + return newResource(&(*ms.orig).Resource) +} + +// InstrumentationLibraryLogs returns the InstrumentationLibraryLogs associated with this ResourceLogs. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ResourceLogs) InstrumentationLibraryLogs() InstrumentationLibraryLogsSlice { + return newInstrumentationLibraryLogsSlice(&(*ms.orig).InstrumentationLibraryLogs) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms ResourceLogs) CopyTo(dest ResourceLogs) { + ms.Resource().CopyTo(dest.Resource()) + ms.InstrumentationLibraryLogs().CopyTo(dest.InstrumentationLibraryLogs()) +} + +// InstrumentationLibraryLogsSlice logically represents a slice of InstrumentationLibraryLogs. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewInstrumentationLibraryLogsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type InstrumentationLibraryLogsSlice struct { + // orig points to the slice otlplogs.InstrumentationLibraryLogs field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlplogs.InstrumentationLibraryLogs +} + +func newInstrumentationLibraryLogsSlice(orig *[]*otlplogs.InstrumentationLibraryLogs) InstrumentationLibraryLogsSlice { + return InstrumentationLibraryLogsSlice{orig} +} + +// NewInstrumentationLibraryLogsSlice creates a InstrumentationLibraryLogsSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewInstrumentationLibraryLogsSlice() InstrumentationLibraryLogsSlice { + orig := []*otlplogs.InstrumentationLibraryLogs(nil) + return InstrumentationLibraryLogsSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewInstrumentationLibraryLogsSlice()". +func (es InstrumentationLibraryLogsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es InstrumentationLibraryLogsSlice) At(ix int) InstrumentationLibraryLogs { + return newInstrumentationLibraryLogs((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es InstrumentationLibraryLogsSlice) MoveAndAppendTo(dest InstrumentationLibraryLogsSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es InstrumentationLibraryLogsSlice) CopyTo(dest InstrumentationLibraryLogsSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newInstrumentationLibraryLogs((*es.orig)[i]).CopyTo(newInstrumentationLibraryLogs((*dest.orig)[i])) + } + return + } + origs := make([]otlplogs.InstrumentationLibraryLogs, srcLen) + wrappers := make([]*otlplogs.InstrumentationLibraryLogs, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newInstrumentationLibraryLogs((*es.orig)[i]).CopyTo(newInstrumentationLibraryLogs(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new InstrumentationLibraryLogsSlice can be initialized: +// es := NewInstrumentationLibraryLogsSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es InstrumentationLibraryLogsSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlplogs.InstrumentationLibraryLogs, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlplogs.InstrumentationLibraryLogs, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the InstrumentationLibraryLogsSlice by one and set the +// given InstrumentationLibraryLogs at that new position. The original InstrumentationLibraryLogs +// could still be referenced so do not reuse it after passing it to this +// method. +func (es InstrumentationLibraryLogsSlice) Append(e InstrumentationLibraryLogs) { + *es.orig = append(*es.orig, e.orig) +} + +// InstrumentationLibraryLogs is a collection of logs from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewInstrumentationLibraryLogs function to create new instances. +// Important: zero-initialized instance is not valid for use. +type InstrumentationLibraryLogs struct { + orig *otlplogs.InstrumentationLibraryLogs +} + +func newInstrumentationLibraryLogs(orig *otlplogs.InstrumentationLibraryLogs) InstrumentationLibraryLogs { + return InstrumentationLibraryLogs{orig: orig} +} + +// NewInstrumentationLibraryLogs creates a new empty InstrumentationLibraryLogs. +// +// This must be used only in testing code since no "Set" method available. +func NewInstrumentationLibraryLogs() InstrumentationLibraryLogs { + return newInstrumentationLibraryLogs(&otlplogs.InstrumentationLibraryLogs{}) +} + +// InstrumentationLibrary returns the instrumentationlibrary associated with this InstrumentationLibraryLogs. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibraryLogs) InstrumentationLibrary() InstrumentationLibrary { + return newInstrumentationLibrary(&(*ms.orig).InstrumentationLibrary) +} + +// Logs returns the Logs associated with this InstrumentationLibraryLogs. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibraryLogs) Logs() LogSlice { + return newLogSlice(&(*ms.orig).Logs) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms InstrumentationLibraryLogs) CopyTo(dest InstrumentationLibraryLogs) { + ms.InstrumentationLibrary().CopyTo(dest.InstrumentationLibrary()) + ms.Logs().CopyTo(dest.Logs()) +} + +// LogSlice logically represents a slice of LogRecord. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLogSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LogSlice struct { + // orig points to the slice otlplogs.LogRecord field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlplogs.LogRecord +} + +func newLogSlice(orig *[]*otlplogs.LogRecord) LogSlice { + return LogSlice{orig} +} + +// NewLogSlice creates a LogSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewLogSlice() LogSlice { + orig := []*otlplogs.LogRecord(nil) + return LogSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLogSlice()". +func (es LogSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LogSlice) At(ix int) LogRecord { + return newLogRecord((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LogSlice) MoveAndAppendTo(dest LogSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es LogSlice) CopyTo(dest LogSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLogRecord((*es.orig)[i]).CopyTo(newLogRecord((*dest.orig)[i])) + } + return + } + origs := make([]otlplogs.LogRecord, srcLen) + wrappers := make([]*otlplogs.LogRecord, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLogRecord((*es.orig)[i]).CopyTo(newLogRecord(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new LogSlice can be initialized: +// es := NewLogSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es LogSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlplogs.LogRecord, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlplogs.LogRecord, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the LogSlice by one and set the +// given LogRecord at that new position. The original LogRecord +// could still be referenced so do not reuse it after passing it to this +// method. +func (es LogSlice) Append(e LogRecord) { + *es.orig = append(*es.orig, e.orig) +} + +// LogRecord are experimental implementation of OpenTelemetry Log Data Model. + +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLogRecord function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LogRecord struct { + orig *otlplogs.LogRecord +} + +func newLogRecord(orig *otlplogs.LogRecord) LogRecord { + return LogRecord{orig: orig} +} + +// NewLogRecord creates a new empty LogRecord. +// +// This must be used only in testing code since no "Set" method available. +func NewLogRecord() LogRecord { + return newLogRecord(&otlplogs.LogRecord{}) +} + +// Timestamp returns the timestamp associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// TraceID returns the traceid associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) TraceID() TraceID { + return TraceID((*ms.orig).TraceId) +} + +// SetTraceID replaces the traceid associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SetTraceID(v TraceID) { + (*ms.orig).TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SpanID() SpanID { + return SpanID((*ms.orig).SpanId) +} + +// SetSpanID replaces the spanid associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SetSpanID(v SpanID) { + (*ms.orig).SpanId = data.SpanID(v) +} + +// Flags returns the flags associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) Flags() uint32 { + return uint32((*ms.orig).Flags) +} + +// SetFlags replaces the flags associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SetFlags(v uint32) { + (*ms.orig).Flags = uint32(v) +} + +// SeverityText returns the severitytext associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SeverityText() string { + return (*ms.orig).SeverityText +} + +// SetSeverityText replaces the severitytext associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SetSeverityText(v string) { + (*ms.orig).SeverityText = v +} + +// SeverityNumber returns the severitynumber associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SeverityNumber() SeverityNumber { + return SeverityNumber((*ms.orig).SeverityNumber) +} + +// SetSeverityNumber replaces the severitynumber associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SetSeverityNumber(v SeverityNumber) { + (*ms.orig).SeverityNumber = otlplogs.SeverityNumber(v) +} + +// Name returns the name associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) Name() string { + return (*ms.orig).Name +} + +// SetName replaces the name associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SetName(v string) { + (*ms.orig).Name = v +} + +// Body returns the body associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) Body() AttributeValue { + return newAttributeValue(&(*ms.orig).Body) +} + +// Attributes returns the Attributes associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) Attributes() AttributeMap { + return newAttributeMap(&(*ms.orig).Attributes) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) DroppedAttributesCount() uint32 { + return (*ms.orig).DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms LogRecord) SetDroppedAttributesCount(v uint32) { + (*ms.orig).DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms LogRecord) CopyTo(dest LogRecord) { + dest.SetTimestamp(ms.Timestamp()) + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) + dest.SetFlags(ms.Flags()) + dest.SetSeverityText(ms.SeverityText()) + dest.SetSeverityNumber(ms.SeverityNumber()) + dest.SetName(ms.Name()) + ms.Body().CopyTo(dest.Body()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} diff --git a/internal/otel_collector/consumer/pdata/generated_log_test.go b/internal/otel_collector/consumer/pdata/generated_log_test.go new file mode 100644 index 00000000000..3c25eed6d14 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_log_test.go @@ -0,0 +1,569 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" +) + +func TestResourceLogsSlice(t *testing.T) { + es := NewResourceLogsSlice() + assert.EqualValues(t, 0, es.Len()) + es = newResourceLogsSlice(&[]*otlplogs.ResourceLogs{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewResourceLogs() + testVal := generateTestResourceLogs() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestResourceLogs(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestResourceLogsSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestResourceLogsSlice() + dest := NewResourceLogsSlice() + src := generateTestResourceLogsSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestResourceLogsSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestResourceLogsSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestResourceLogsSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestResourceLogsSlice_CopyTo(t *testing.T) { + dest := NewResourceLogsSlice() + // Test CopyTo to empty + NewResourceLogsSlice().CopyTo(dest) + assert.EqualValues(t, NewResourceLogsSlice(), dest) + + // Test CopyTo larger slice + generateTestResourceLogsSlice().CopyTo(dest) + assert.EqualValues(t, generateTestResourceLogsSlice(), dest) + + // Test CopyTo same size slice + generateTestResourceLogsSlice().CopyTo(dest) + assert.EqualValues(t, generateTestResourceLogsSlice(), dest) +} + +func TestResourceLogsSlice_Resize(t *testing.T) { + es := generateTestResourceLogsSlice() + emptyVal := NewResourceLogs() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlplogs.ResourceLogs]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlplogs.ResourceLogs]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlplogs.ResourceLogs]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlplogs.ResourceLogs]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestResourceLogsSlice_Append(t *testing.T) { + es := generateTestResourceLogsSlice() + + emptyVal := NewResourceLogs() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewResourceLogs() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestResourceLogs_CopyTo(t *testing.T) { + ms := NewResourceLogs() + generateTestResourceLogs().CopyTo(ms) + assert.EqualValues(t, generateTestResourceLogs(), ms) +} + +func TestResourceLogs_Resource(t *testing.T) { + ms := NewResourceLogs() + fillTestResource(ms.Resource()) + assert.EqualValues(t, generateTestResource(), ms.Resource()) +} + +func TestResourceLogs_InstrumentationLibraryLogs(t *testing.T) { + ms := NewResourceLogs() + assert.EqualValues(t, NewInstrumentationLibraryLogsSlice(), ms.InstrumentationLibraryLogs()) + fillTestInstrumentationLibraryLogsSlice(ms.InstrumentationLibraryLogs()) + testValInstrumentationLibraryLogs := generateTestInstrumentationLibraryLogsSlice() + assert.EqualValues(t, testValInstrumentationLibraryLogs, ms.InstrumentationLibraryLogs()) +} + +func TestInstrumentationLibraryLogsSlice(t *testing.T) { + es := NewInstrumentationLibraryLogsSlice() + assert.EqualValues(t, 0, es.Len()) + es = newInstrumentationLibraryLogsSlice(&[]*otlplogs.InstrumentationLibraryLogs{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewInstrumentationLibraryLogs() + testVal := generateTestInstrumentationLibraryLogs() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestInstrumentationLibraryLogs(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestInstrumentationLibraryLogsSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestInstrumentationLibraryLogsSlice() + dest := NewInstrumentationLibraryLogsSlice() + src := generateTestInstrumentationLibraryLogsSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibraryLogsSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibraryLogsSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestInstrumentationLibraryLogsSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestInstrumentationLibraryLogsSlice_CopyTo(t *testing.T) { + dest := NewInstrumentationLibraryLogsSlice() + // Test CopyTo to empty + NewInstrumentationLibraryLogsSlice().CopyTo(dest) + assert.EqualValues(t, NewInstrumentationLibraryLogsSlice(), dest) + + // Test CopyTo larger slice + generateTestInstrumentationLibraryLogsSlice().CopyTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibraryLogsSlice(), dest) + + // Test CopyTo same size slice + generateTestInstrumentationLibraryLogsSlice().CopyTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibraryLogsSlice(), dest) +} + +func TestInstrumentationLibraryLogsSlice_Resize(t *testing.T) { + es := generateTestInstrumentationLibraryLogsSlice() + emptyVal := NewInstrumentationLibraryLogs() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlplogs.InstrumentationLibraryLogs]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlplogs.InstrumentationLibraryLogs]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlplogs.InstrumentationLibraryLogs]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlplogs.InstrumentationLibraryLogs]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestInstrumentationLibraryLogsSlice_Append(t *testing.T) { + es := generateTestInstrumentationLibraryLogsSlice() + + emptyVal := NewInstrumentationLibraryLogs() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewInstrumentationLibraryLogs() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestInstrumentationLibraryLogs_CopyTo(t *testing.T) { + ms := NewInstrumentationLibraryLogs() + generateTestInstrumentationLibraryLogs().CopyTo(ms) + assert.EqualValues(t, generateTestInstrumentationLibraryLogs(), ms) +} + +func TestInstrumentationLibraryLogs_InstrumentationLibrary(t *testing.T) { + ms := NewInstrumentationLibraryLogs() + fillTestInstrumentationLibrary(ms.InstrumentationLibrary()) + assert.EqualValues(t, generateTestInstrumentationLibrary(), ms.InstrumentationLibrary()) +} + +func TestInstrumentationLibraryLogs_Logs(t *testing.T) { + ms := NewInstrumentationLibraryLogs() + assert.EqualValues(t, NewLogSlice(), ms.Logs()) + fillTestLogSlice(ms.Logs()) + testValLogs := generateTestLogSlice() + assert.EqualValues(t, testValLogs, ms.Logs()) +} + +func TestLogSlice(t *testing.T) { + es := NewLogSlice() + assert.EqualValues(t, 0, es.Len()) + es = newLogSlice(&[]*otlplogs.LogRecord{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewLogRecord() + testVal := generateTestLogRecord() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestLogRecord(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestLogSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestLogSlice() + dest := NewLogSlice() + src := generateTestLogSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestLogSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestLogSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestLogSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestLogSlice_CopyTo(t *testing.T) { + dest := NewLogSlice() + // Test CopyTo to empty + NewLogSlice().CopyTo(dest) + assert.EqualValues(t, NewLogSlice(), dest) + + // Test CopyTo larger slice + generateTestLogSlice().CopyTo(dest) + assert.EqualValues(t, generateTestLogSlice(), dest) + + // Test CopyTo same size slice + generateTestLogSlice().CopyTo(dest) + assert.EqualValues(t, generateTestLogSlice(), dest) +} + +func TestLogSlice_Resize(t *testing.T) { + es := generateTestLogSlice() + emptyVal := NewLogRecord() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlplogs.LogRecord]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlplogs.LogRecord]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlplogs.LogRecord]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlplogs.LogRecord]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestLogSlice_Append(t *testing.T) { + es := generateTestLogSlice() + + emptyVal := NewLogRecord() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewLogRecord() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestLogRecord_CopyTo(t *testing.T) { + ms := NewLogRecord() + generateTestLogRecord().CopyTo(ms) + assert.EqualValues(t, generateTestLogRecord(), ms) +} + +func TestLogRecord_Timestamp(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestLogRecord_TraceID(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, NewTraceID([16]byte{}), ms.TraceID()) + testValTraceID := NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) + ms.SetTraceID(testValTraceID) + assert.EqualValues(t, testValTraceID, ms.TraceID()) +} + +func TestLogRecord_SpanID(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, NewSpanID([8]byte{}), ms.SpanID()) + testValSpanID := NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) + ms.SetSpanID(testValSpanID) + assert.EqualValues(t, testValSpanID, ms.SpanID()) +} + +func TestLogRecord_Flags(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, uint32(0), ms.Flags()) + testValFlags := uint32(0x01) + ms.SetFlags(testValFlags) + assert.EqualValues(t, testValFlags, ms.Flags()) +} + +func TestLogRecord_SeverityText(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, "", ms.SeverityText()) + testValSeverityText := "INFO" + ms.SetSeverityText(testValSeverityText) + assert.EqualValues(t, testValSeverityText, ms.SeverityText()) +} + +func TestLogRecord_SeverityNumber(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, SeverityNumberUNDEFINED, ms.SeverityNumber()) + testValSeverityNumber := SeverityNumberINFO + ms.SetSeverityNumber(testValSeverityNumber) + assert.EqualValues(t, testValSeverityNumber, ms.SeverityNumber()) +} + +func TestLogRecord_Name(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, "", ms.Name()) + testValName := "test_name" + ms.SetName(testValName) + assert.EqualValues(t, testValName, ms.Name()) +} + +func TestLogRecord_Body(t *testing.T) { + ms := NewLogRecord() + fillTestAttributeValue(ms.Body()) + assert.EqualValues(t, generateTestAttributeValue(), ms.Body()) +} + +func TestLogRecord_Attributes(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, NewAttributeMap(), ms.Attributes()) + fillTestAttributeMap(ms.Attributes()) + testValAttributes := generateTestAttributeMap() + assert.EqualValues(t, testValAttributes, ms.Attributes()) +} + +func TestLogRecord_DroppedAttributesCount(t *testing.T) { + ms := NewLogRecord() + assert.EqualValues(t, uint32(0), ms.DroppedAttributesCount()) + testValDroppedAttributesCount := uint32(17) + ms.SetDroppedAttributesCount(testValDroppedAttributesCount) + assert.EqualValues(t, testValDroppedAttributesCount, ms.DroppedAttributesCount()) +} + +func generateTestResourceLogsSlice() ResourceLogsSlice { + tv := NewResourceLogsSlice() + fillTestResourceLogsSlice(tv) + return tv +} + +func fillTestResourceLogsSlice(tv ResourceLogsSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestResourceLogs(tv.At(i)) + } +} + +func generateTestResourceLogs() ResourceLogs { + tv := NewResourceLogs() + fillTestResourceLogs(tv) + return tv +} + +func fillTestResourceLogs(tv ResourceLogs) { + fillTestResource(tv.Resource()) + fillTestInstrumentationLibraryLogsSlice(tv.InstrumentationLibraryLogs()) +} + +func generateTestInstrumentationLibraryLogsSlice() InstrumentationLibraryLogsSlice { + tv := NewInstrumentationLibraryLogsSlice() + fillTestInstrumentationLibraryLogsSlice(tv) + return tv +} + +func fillTestInstrumentationLibraryLogsSlice(tv InstrumentationLibraryLogsSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestInstrumentationLibraryLogs(tv.At(i)) + } +} + +func generateTestInstrumentationLibraryLogs() InstrumentationLibraryLogs { + tv := NewInstrumentationLibraryLogs() + fillTestInstrumentationLibraryLogs(tv) + return tv +} + +func fillTestInstrumentationLibraryLogs(tv InstrumentationLibraryLogs) { + fillTestInstrumentationLibrary(tv.InstrumentationLibrary()) + fillTestLogSlice(tv.Logs()) +} + +func generateTestLogSlice() LogSlice { + tv := NewLogSlice() + fillTestLogSlice(tv) + return tv +} + +func fillTestLogSlice(tv LogSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestLogRecord(tv.At(i)) + } +} + +func generateTestLogRecord() LogRecord { + tv := NewLogRecord() + fillTestLogRecord(tv) + return tv +} + +func fillTestLogRecord(tv LogRecord) { + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetTraceID(NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})) + tv.SetSpanID(NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + tv.SetFlags(uint32(0x01)) + tv.SetSeverityText("INFO") + tv.SetSeverityNumber(SeverityNumberINFO) + tv.SetName("test_name") + fillTestAttributeValue(tv.Body()) + fillTestAttributeMap(tv.Attributes()) + tv.SetDroppedAttributesCount(uint32(17)) +} diff --git a/internal/otel_collector/consumer/pdata/generated_metrics.go b/internal/otel_collector/consumer/pdata/generated_metrics.go new file mode 100644 index 00000000000..5aa528023be --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_metrics.go @@ -0,0 +1,2491 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +// ResourceMetricsSlice logically represents a slice of ResourceMetrics. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceMetricsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceMetricsSlice struct { + // orig points to the slice otlpmetrics.ResourceMetrics field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.ResourceMetrics +} + +func newResourceMetricsSlice(orig *[]*otlpmetrics.ResourceMetrics) ResourceMetricsSlice { + return ResourceMetricsSlice{orig} +} + +// NewResourceMetricsSlice creates a ResourceMetricsSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewResourceMetricsSlice() ResourceMetricsSlice { + orig := []*otlpmetrics.ResourceMetrics(nil) + return ResourceMetricsSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceMetricsSlice()". +func (es ResourceMetricsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceMetricsSlice) At(ix int) ResourceMetrics { + return newResourceMetrics((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceMetricsSlice) MoveAndAppendTo(dest ResourceMetricsSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es ResourceMetricsSlice) CopyTo(dest ResourceMetricsSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceMetrics((*es.orig)[i]).CopyTo(newResourceMetrics((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.ResourceMetrics, srcLen) + wrappers := make([]*otlpmetrics.ResourceMetrics, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceMetrics((*es.orig)[i]).CopyTo(newResourceMetrics(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new ResourceMetricsSlice can be initialized: +// es := NewResourceMetricsSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es ResourceMetricsSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.ResourceMetrics, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.ResourceMetrics, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the ResourceMetricsSlice by one and set the +// given ResourceMetrics at that new position. The original ResourceMetrics +// could still be referenced so do not reuse it after passing it to this +// method. +func (es ResourceMetricsSlice) Append(e ResourceMetrics) { + *es.orig = append(*es.orig, e.orig) +} + +// InstrumentationLibraryMetrics is a collection of metrics from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceMetrics function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceMetrics struct { + orig *otlpmetrics.ResourceMetrics +} + +func newResourceMetrics(orig *otlpmetrics.ResourceMetrics) ResourceMetrics { + return ResourceMetrics{orig: orig} +} + +// NewResourceMetrics creates a new empty ResourceMetrics. +// +// This must be used only in testing code since no "Set" method available. +func NewResourceMetrics() ResourceMetrics { + return newResourceMetrics(&otlpmetrics.ResourceMetrics{}) +} + +// Resource returns the resource associated with this ResourceMetrics. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ResourceMetrics) Resource() Resource { + return newResource(&(*ms.orig).Resource) +} + +// InstrumentationLibraryMetrics returns the InstrumentationLibraryMetrics associated with this ResourceMetrics. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ResourceMetrics) InstrumentationLibraryMetrics() InstrumentationLibraryMetricsSlice { + return newInstrumentationLibraryMetricsSlice(&(*ms.orig).InstrumentationLibraryMetrics) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms ResourceMetrics) CopyTo(dest ResourceMetrics) { + ms.Resource().CopyTo(dest.Resource()) + ms.InstrumentationLibraryMetrics().CopyTo(dest.InstrumentationLibraryMetrics()) +} + +// InstrumentationLibraryMetricsSlice logically represents a slice of InstrumentationLibraryMetrics. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewInstrumentationLibraryMetricsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type InstrumentationLibraryMetricsSlice struct { + // orig points to the slice otlpmetrics.InstrumentationLibraryMetrics field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.InstrumentationLibraryMetrics +} + +func newInstrumentationLibraryMetricsSlice(orig *[]*otlpmetrics.InstrumentationLibraryMetrics) InstrumentationLibraryMetricsSlice { + return InstrumentationLibraryMetricsSlice{orig} +} + +// NewInstrumentationLibraryMetricsSlice creates a InstrumentationLibraryMetricsSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewInstrumentationLibraryMetricsSlice() InstrumentationLibraryMetricsSlice { + orig := []*otlpmetrics.InstrumentationLibraryMetrics(nil) + return InstrumentationLibraryMetricsSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewInstrumentationLibraryMetricsSlice()". +func (es InstrumentationLibraryMetricsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es InstrumentationLibraryMetricsSlice) At(ix int) InstrumentationLibraryMetrics { + return newInstrumentationLibraryMetrics((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es InstrumentationLibraryMetricsSlice) MoveAndAppendTo(dest InstrumentationLibraryMetricsSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es InstrumentationLibraryMetricsSlice) CopyTo(dest InstrumentationLibraryMetricsSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newInstrumentationLibraryMetrics((*es.orig)[i]).CopyTo(newInstrumentationLibraryMetrics((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.InstrumentationLibraryMetrics, srcLen) + wrappers := make([]*otlpmetrics.InstrumentationLibraryMetrics, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newInstrumentationLibraryMetrics((*es.orig)[i]).CopyTo(newInstrumentationLibraryMetrics(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new InstrumentationLibraryMetricsSlice can be initialized: +// es := NewInstrumentationLibraryMetricsSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es InstrumentationLibraryMetricsSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.InstrumentationLibraryMetrics, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.InstrumentationLibraryMetrics, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the InstrumentationLibraryMetricsSlice by one and set the +// given InstrumentationLibraryMetrics at that new position. The original InstrumentationLibraryMetrics +// could still be referenced so do not reuse it after passing it to this +// method. +func (es InstrumentationLibraryMetricsSlice) Append(e InstrumentationLibraryMetrics) { + *es.orig = append(*es.orig, e.orig) +} + +// InstrumentationLibraryMetrics is a collection of metrics from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewInstrumentationLibraryMetrics function to create new instances. +// Important: zero-initialized instance is not valid for use. +type InstrumentationLibraryMetrics struct { + orig *otlpmetrics.InstrumentationLibraryMetrics +} + +func newInstrumentationLibraryMetrics(orig *otlpmetrics.InstrumentationLibraryMetrics) InstrumentationLibraryMetrics { + return InstrumentationLibraryMetrics{orig: orig} +} + +// NewInstrumentationLibraryMetrics creates a new empty InstrumentationLibraryMetrics. +// +// This must be used only in testing code since no "Set" method available. +func NewInstrumentationLibraryMetrics() InstrumentationLibraryMetrics { + return newInstrumentationLibraryMetrics(&otlpmetrics.InstrumentationLibraryMetrics{}) +} + +// InstrumentationLibrary returns the instrumentationlibrary associated with this InstrumentationLibraryMetrics. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibraryMetrics) InstrumentationLibrary() InstrumentationLibrary { + return newInstrumentationLibrary(&(*ms.orig).InstrumentationLibrary) +} + +// Metrics returns the Metrics associated with this InstrumentationLibraryMetrics. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibraryMetrics) Metrics() MetricSlice { + return newMetricSlice(&(*ms.orig).Metrics) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms InstrumentationLibraryMetrics) CopyTo(dest InstrumentationLibraryMetrics) { + ms.InstrumentationLibrary().CopyTo(dest.InstrumentationLibrary()) + ms.Metrics().CopyTo(dest.Metrics()) +} + +// MetricSlice logically represents a slice of Metric. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewMetricSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type MetricSlice struct { + // orig points to the slice otlpmetrics.Metric field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.Metric +} + +func newMetricSlice(orig *[]*otlpmetrics.Metric) MetricSlice { + return MetricSlice{orig} +} + +// NewMetricSlice creates a MetricSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewMetricSlice() MetricSlice { + orig := []*otlpmetrics.Metric(nil) + return MetricSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewMetricSlice()". +func (es MetricSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es MetricSlice) At(ix int) Metric { + return newMetric((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es MetricSlice) MoveAndAppendTo(dest MetricSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es MetricSlice) CopyTo(dest MetricSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newMetric((*es.orig)[i]).CopyTo(newMetric((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.Metric, srcLen) + wrappers := make([]*otlpmetrics.Metric, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newMetric((*es.orig)[i]).CopyTo(newMetric(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new MetricSlice can be initialized: +// es := NewMetricSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es MetricSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.Metric, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.Metric, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the MetricSlice by one and set the +// given Metric at that new position. The original Metric +// could still be referenced so do not reuse it after passing it to this +// method. +func (es MetricSlice) Append(e Metric) { + *es.orig = append(*es.orig, e.orig) +} + +// Metric represents one metric as a collection of datapoints. +// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/master/opentelemetry/proto/metrics/v1/metrics.proto +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewMetric function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Metric struct { + orig *otlpmetrics.Metric +} + +func newMetric(orig *otlpmetrics.Metric) Metric { + return Metric{orig: orig} +} + +// NewMetric creates a new empty Metric. +// +// This must be used only in testing code since no "Set" method available. +func NewMetric() Metric { + return newMetric(&otlpmetrics.Metric{}) +} + +// Name returns the name associated with this Metric. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Metric) Name() string { + return (*ms.orig).Name +} + +// SetName replaces the name associated with this Metric. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Metric) SetName(v string) { + (*ms.orig).Name = v +} + +// Description returns the description associated with this Metric. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Metric) Description() string { + return (*ms.orig).Description +} + +// SetDescription replaces the description associated with this Metric. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Metric) SetDescription(v string) { + (*ms.orig).Description = v +} + +// Unit returns the unit associated with this Metric. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Metric) Unit() string { + return (*ms.orig).Unit +} + +// SetUnit replaces the unit associated with this Metric. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Metric) SetUnit(v string) { + (*ms.orig).Unit = v +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms Metric) CopyTo(dest Metric) { + dest.SetName(ms.Name()) + dest.SetDescription(ms.Description()) + dest.SetUnit(ms.Unit()) + copyData(ms.orig, dest.orig) +} + +// IntGauge represents the type of a int scalar metric that always exports the "current value" for every data point. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntGauge function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntGauge struct { + orig *otlpmetrics.IntGauge +} + +func newIntGauge(orig *otlpmetrics.IntGauge) IntGauge { + return IntGauge{orig: orig} +} + +// NewIntGauge creates a new empty IntGauge. +// +// This must be used only in testing code since no "Set" method available. +func NewIntGauge() IntGauge { + return newIntGauge(&otlpmetrics.IntGauge{}) +} + +// DataPoints returns the DataPoints associated with this IntGauge. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntGauge) DataPoints() IntDataPointSlice { + return newIntDataPointSlice(&(*ms.orig).DataPoints) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms IntGauge) CopyTo(dest IntGauge) { + ms.DataPoints().CopyTo(dest.DataPoints()) +} + +// DoubleGauge represents the type of a double scalar metric that always exports the "current value" for every data point. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleGauge function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleGauge struct { + orig *otlpmetrics.DoubleGauge +} + +func newDoubleGauge(orig *otlpmetrics.DoubleGauge) DoubleGauge { + return DoubleGauge{orig: orig} +} + +// NewDoubleGauge creates a new empty DoubleGauge. +// +// This must be used only in testing code since no "Set" method available. +func NewDoubleGauge() DoubleGauge { + return newDoubleGauge(&otlpmetrics.DoubleGauge{}) +} + +// DataPoints returns the DataPoints associated with this DoubleGauge. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleGauge) DataPoints() DoubleDataPointSlice { + return newDoubleDataPointSlice(&(*ms.orig).DataPoints) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms DoubleGauge) CopyTo(dest DoubleGauge) { + ms.DataPoints().CopyTo(dest.DataPoints()) +} + +// IntSum represents the type of a numeric int scalar metric that is calculated as a sum of all reported measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntSum function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntSum struct { + orig *otlpmetrics.IntSum +} + +func newIntSum(orig *otlpmetrics.IntSum) IntSum { + return IntSum{orig: orig} +} + +// NewIntSum creates a new empty IntSum. +// +// This must be used only in testing code since no "Set" method available. +func NewIntSum() IntSum { + return newIntSum(&otlpmetrics.IntSum{}) +} + +// AggregationTemporality returns the aggregationtemporality associated with this IntSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntSum) AggregationTemporality() AggregationTemporality { + return AggregationTemporality((*ms.orig).AggregationTemporality) +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this IntSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntSum) SetAggregationTemporality(v AggregationTemporality) { + (*ms.orig).AggregationTemporality = otlpmetrics.AggregationTemporality(v) +} + +// IsMonotonic returns the ismonotonic associated with this IntSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntSum) IsMonotonic() bool { + return (*ms.orig).IsMonotonic +} + +// SetIsMonotonic replaces the ismonotonic associated with this IntSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntSum) SetIsMonotonic(v bool) { + (*ms.orig).IsMonotonic = v +} + +// DataPoints returns the DataPoints associated with this IntSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntSum) DataPoints() IntDataPointSlice { + return newIntDataPointSlice(&(*ms.orig).DataPoints) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms IntSum) CopyTo(dest IntSum) { + dest.SetAggregationTemporality(ms.AggregationTemporality()) + dest.SetIsMonotonic(ms.IsMonotonic()) + ms.DataPoints().CopyTo(dest.DataPoints()) +} + +// DoubleSum represents the type of a numeric double scalar metric that is calculated as a sum of all reported measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleSum function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleSum struct { + orig *otlpmetrics.DoubleSum +} + +func newDoubleSum(orig *otlpmetrics.DoubleSum) DoubleSum { + return DoubleSum{orig: orig} +} + +// NewDoubleSum creates a new empty DoubleSum. +// +// This must be used only in testing code since no "Set" method available. +func NewDoubleSum() DoubleSum { + return newDoubleSum(&otlpmetrics.DoubleSum{}) +} + +// AggregationTemporality returns the aggregationtemporality associated with this DoubleSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSum) AggregationTemporality() AggregationTemporality { + return AggregationTemporality((*ms.orig).AggregationTemporality) +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this DoubleSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSum) SetAggregationTemporality(v AggregationTemporality) { + (*ms.orig).AggregationTemporality = otlpmetrics.AggregationTemporality(v) +} + +// IsMonotonic returns the ismonotonic associated with this DoubleSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSum) IsMonotonic() bool { + return (*ms.orig).IsMonotonic +} + +// SetIsMonotonic replaces the ismonotonic associated with this DoubleSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSum) SetIsMonotonic(v bool) { + (*ms.orig).IsMonotonic = v +} + +// DataPoints returns the DataPoints associated with this DoubleSum. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSum) DataPoints() DoubleDataPointSlice { + return newDoubleDataPointSlice(&(*ms.orig).DataPoints) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms DoubleSum) CopyTo(dest DoubleSum) { + dest.SetAggregationTemporality(ms.AggregationTemporality()) + dest.SetIsMonotonic(ms.IsMonotonic()) + ms.DataPoints().CopyTo(dest.DataPoints()) +} + +// IntHistogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported double measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntHistogram function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntHistogram struct { + orig *otlpmetrics.IntHistogram +} + +func newIntHistogram(orig *otlpmetrics.IntHistogram) IntHistogram { + return IntHistogram{orig: orig} +} + +// NewIntHistogram creates a new empty IntHistogram. +// +// This must be used only in testing code since no "Set" method available. +func NewIntHistogram() IntHistogram { + return newIntHistogram(&otlpmetrics.IntHistogram{}) +} + +// AggregationTemporality returns the aggregationtemporality associated with this IntHistogram. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogram) AggregationTemporality() AggregationTemporality { + return AggregationTemporality((*ms.orig).AggregationTemporality) +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this IntHistogram. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogram) SetAggregationTemporality(v AggregationTemporality) { + (*ms.orig).AggregationTemporality = otlpmetrics.AggregationTemporality(v) +} + +// DataPoints returns the DataPoints associated with this IntHistogram. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogram) DataPoints() IntHistogramDataPointSlice { + return newIntHistogramDataPointSlice(&(*ms.orig).DataPoints) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms IntHistogram) CopyTo(dest IntHistogram) { + dest.SetAggregationTemporality(ms.AggregationTemporality()) + ms.DataPoints().CopyTo(dest.DataPoints()) +} + +// DoubleHistogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported double measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleHistogram function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleHistogram struct { + orig *otlpmetrics.DoubleHistogram +} + +func newDoubleHistogram(orig *otlpmetrics.DoubleHistogram) DoubleHistogram { + return DoubleHistogram{orig: orig} +} + +// NewDoubleHistogram creates a new empty DoubleHistogram. +// +// This must be used only in testing code since no "Set" method available. +func NewDoubleHistogram() DoubleHistogram { + return newDoubleHistogram(&otlpmetrics.DoubleHistogram{}) +} + +// AggregationTemporality returns the aggregationtemporality associated with this DoubleHistogram. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogram) AggregationTemporality() AggregationTemporality { + return AggregationTemporality((*ms.orig).AggregationTemporality) +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this DoubleHistogram. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogram) SetAggregationTemporality(v AggregationTemporality) { + (*ms.orig).AggregationTemporality = otlpmetrics.AggregationTemporality(v) +} + +// DataPoints returns the DataPoints associated with this DoubleHistogram. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogram) DataPoints() DoubleHistogramDataPointSlice { + return newDoubleHistogramDataPointSlice(&(*ms.orig).DataPoints) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms DoubleHistogram) CopyTo(dest DoubleHistogram) { + dest.SetAggregationTemporality(ms.AggregationTemporality()) + ms.DataPoints().CopyTo(dest.DataPoints()) +} + +// DoubleSummary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleSummary function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleSummary struct { + orig *otlpmetrics.DoubleSummary +} + +func newDoubleSummary(orig *otlpmetrics.DoubleSummary) DoubleSummary { + return DoubleSummary{orig: orig} +} + +// NewDoubleSummary creates a new empty DoubleSummary. +// +// This must be used only in testing code since no "Set" method available. +func NewDoubleSummary() DoubleSummary { + return newDoubleSummary(&otlpmetrics.DoubleSummary{}) +} + +// DataPoints returns the DataPoints associated with this DoubleSummary. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummary) DataPoints() DoubleSummaryDataPointSlice { + return newDoubleSummaryDataPointSlice(&(*ms.orig).DataPoints) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms DoubleSummary) CopyTo(dest DoubleSummary) { + ms.DataPoints().CopyTo(dest.DataPoints()) +} + +// IntDataPointSlice logically represents a slice of IntDataPoint. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntDataPointSlice struct { + // orig points to the slice otlpmetrics.IntDataPoint field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.IntDataPoint +} + +func newIntDataPointSlice(orig *[]*otlpmetrics.IntDataPoint) IntDataPointSlice { + return IntDataPointSlice{orig} +} + +// NewIntDataPointSlice creates a IntDataPointSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewIntDataPointSlice() IntDataPointSlice { + orig := []*otlpmetrics.IntDataPoint(nil) + return IntDataPointSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewIntDataPointSlice()". +func (es IntDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es IntDataPointSlice) At(ix int) IntDataPoint { + return newIntDataPoint((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es IntDataPointSlice) MoveAndAppendTo(dest IntDataPointSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es IntDataPointSlice) CopyTo(dest IntDataPointSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newIntDataPoint((*es.orig)[i]).CopyTo(newIntDataPoint((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.IntDataPoint, srcLen) + wrappers := make([]*otlpmetrics.IntDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newIntDataPoint((*es.orig)[i]).CopyTo(newIntDataPoint(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new IntDataPointSlice can be initialized: +// es := NewIntDataPointSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es IntDataPointSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.IntDataPoint, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.IntDataPoint, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the IntDataPointSlice by one and set the +// given IntDataPoint at that new position. The original IntDataPoint +// could still be referenced so do not reuse it after passing it to this +// method. +func (es IntDataPointSlice) Append(e IntDataPoint) { + *es.orig = append(*es.orig, e.orig) +} + +// IntDataPoint is a single data point in a timeseries that describes the time-varying values of a scalar int metric. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntDataPoint struct { + orig *otlpmetrics.IntDataPoint +} + +func newIntDataPoint(orig *otlpmetrics.IntDataPoint) IntDataPoint { + return IntDataPoint{orig: orig} +} + +// NewIntDataPoint creates a new empty IntDataPoint. +// +// This must be used only in testing code since no "Set" method available. +func NewIntDataPoint() IntDataPoint { + return newIntDataPoint(&otlpmetrics.IntDataPoint{}) +} + +// LabelsMap returns the Labels associated with this IntDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntDataPoint) LabelsMap() StringMap { + return newStringMap(&(*ms.orig).Labels) +} + +// StartTime returns the starttime associated with this IntDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntDataPoint) StartTime() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).StartTimeUnixNano) +} + +// SetStartTime replaces the starttime associated with this IntDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntDataPoint) SetStartTime(v TimestampUnixNano) { + (*ms.orig).StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this IntDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntDataPoint) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this IntDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntDataPoint) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// Value returns the value associated with this IntDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntDataPoint) Value() int64 { + return (*ms.orig).Value +} + +// SetValue replaces the value associated with this IntDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntDataPoint) SetValue(v int64) { + (*ms.orig).Value = v +} + +// Exemplars returns the Exemplars associated with this IntDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntDataPoint) Exemplars() IntExemplarSlice { + return newIntExemplarSlice(&(*ms.orig).Exemplars) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms IntDataPoint) CopyTo(dest IntDataPoint) { + ms.LabelsMap().CopyTo(dest.LabelsMap()) + dest.SetStartTime(ms.StartTime()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetValue(ms.Value()) + ms.Exemplars().CopyTo(dest.Exemplars()) +} + +// DoubleDataPointSlice logically represents a slice of DoubleDataPoint. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleDataPointSlice struct { + // orig points to the slice otlpmetrics.DoubleDataPoint field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.DoubleDataPoint +} + +func newDoubleDataPointSlice(orig *[]*otlpmetrics.DoubleDataPoint) DoubleDataPointSlice { + return DoubleDataPointSlice{orig} +} + +// NewDoubleDataPointSlice creates a DoubleDataPointSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewDoubleDataPointSlice() DoubleDataPointSlice { + orig := []*otlpmetrics.DoubleDataPoint(nil) + return DoubleDataPointSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewDoubleDataPointSlice()". +func (es DoubleDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es DoubleDataPointSlice) At(ix int) DoubleDataPoint { + return newDoubleDataPoint((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es DoubleDataPointSlice) MoveAndAppendTo(dest DoubleDataPointSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es DoubleDataPointSlice) CopyTo(dest DoubleDataPointSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newDoubleDataPoint((*es.orig)[i]).CopyTo(newDoubleDataPoint((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.DoubleDataPoint, srcLen) + wrappers := make([]*otlpmetrics.DoubleDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newDoubleDataPoint((*es.orig)[i]).CopyTo(newDoubleDataPoint(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new DoubleDataPointSlice can be initialized: +// es := NewDoubleDataPointSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es DoubleDataPointSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.DoubleDataPoint, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.DoubleDataPoint, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the DoubleDataPointSlice by one and set the +// given DoubleDataPoint at that new position. The original DoubleDataPoint +// could still be referenced so do not reuse it after passing it to this +// method. +func (es DoubleDataPointSlice) Append(e DoubleDataPoint) { + *es.orig = append(*es.orig, e.orig) +} + +// DoubleDataPoint is a single data point in a timeseries that describes the time-varying value of a double metric. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleDataPoint struct { + orig *otlpmetrics.DoubleDataPoint +} + +func newDoubleDataPoint(orig *otlpmetrics.DoubleDataPoint) DoubleDataPoint { + return DoubleDataPoint{orig: orig} +} + +// NewDoubleDataPoint creates a new empty DoubleDataPoint. +// +// This must be used only in testing code since no "Set" method available. +func NewDoubleDataPoint() DoubleDataPoint { + return newDoubleDataPoint(&otlpmetrics.DoubleDataPoint{}) +} + +// LabelsMap returns the Labels associated with this DoubleDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleDataPoint) LabelsMap() StringMap { + return newStringMap(&(*ms.orig).Labels) +} + +// StartTime returns the starttime associated with this DoubleDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleDataPoint) StartTime() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).StartTimeUnixNano) +} + +// SetStartTime replaces the starttime associated with this DoubleDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleDataPoint) SetStartTime(v TimestampUnixNano) { + (*ms.orig).StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this DoubleDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleDataPoint) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this DoubleDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleDataPoint) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// Value returns the value associated with this DoubleDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleDataPoint) Value() float64 { + return (*ms.orig).Value +} + +// SetValue replaces the value associated with this DoubleDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleDataPoint) SetValue(v float64) { + (*ms.orig).Value = v +} + +// Exemplars returns the Exemplars associated with this DoubleDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleDataPoint) Exemplars() DoubleExemplarSlice { + return newDoubleExemplarSlice(&(*ms.orig).Exemplars) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms DoubleDataPoint) CopyTo(dest DoubleDataPoint) { + ms.LabelsMap().CopyTo(dest.LabelsMap()) + dest.SetStartTime(ms.StartTime()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetValue(ms.Value()) + ms.Exemplars().CopyTo(dest.Exemplars()) +} + +// IntHistogramDataPointSlice logically represents a slice of IntHistogramDataPoint. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntHistogramDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntHistogramDataPointSlice struct { + // orig points to the slice otlpmetrics.IntHistogramDataPoint field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.IntHistogramDataPoint +} + +func newIntHistogramDataPointSlice(orig *[]*otlpmetrics.IntHistogramDataPoint) IntHistogramDataPointSlice { + return IntHistogramDataPointSlice{orig} +} + +// NewIntHistogramDataPointSlice creates a IntHistogramDataPointSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewIntHistogramDataPointSlice() IntHistogramDataPointSlice { + orig := []*otlpmetrics.IntHistogramDataPoint(nil) + return IntHistogramDataPointSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewIntHistogramDataPointSlice()". +func (es IntHistogramDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es IntHistogramDataPointSlice) At(ix int) IntHistogramDataPoint { + return newIntHistogramDataPoint((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es IntHistogramDataPointSlice) MoveAndAppendTo(dest IntHistogramDataPointSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es IntHistogramDataPointSlice) CopyTo(dest IntHistogramDataPointSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newIntHistogramDataPoint((*es.orig)[i]).CopyTo(newIntHistogramDataPoint((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.IntHistogramDataPoint, srcLen) + wrappers := make([]*otlpmetrics.IntHistogramDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newIntHistogramDataPoint((*es.orig)[i]).CopyTo(newIntHistogramDataPoint(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new IntHistogramDataPointSlice can be initialized: +// es := NewIntHistogramDataPointSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es IntHistogramDataPointSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.IntHistogramDataPoint, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.IntHistogramDataPoint, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the IntHistogramDataPointSlice by one and set the +// given IntHistogramDataPoint at that new position. The original IntHistogramDataPoint +// could still be referenced so do not reuse it after passing it to this +// method. +func (es IntHistogramDataPointSlice) Append(e IntHistogramDataPoint) { + *es.orig = append(*es.orig, e.orig) +} + +// IntHistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of int values. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntHistogramDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntHistogramDataPoint struct { + orig *otlpmetrics.IntHistogramDataPoint +} + +func newIntHistogramDataPoint(orig *otlpmetrics.IntHistogramDataPoint) IntHistogramDataPoint { + return IntHistogramDataPoint{orig: orig} +} + +// NewIntHistogramDataPoint creates a new empty IntHistogramDataPoint. +// +// This must be used only in testing code since no "Set" method available. +func NewIntHistogramDataPoint() IntHistogramDataPoint { + return newIntHistogramDataPoint(&otlpmetrics.IntHistogramDataPoint{}) +} + +// LabelsMap returns the Labels associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) LabelsMap() StringMap { + return newStringMap(&(*ms.orig).Labels) +} + +// StartTime returns the starttime associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) StartTime() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).StartTimeUnixNano) +} + +// SetStartTime replaces the starttime associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) SetStartTime(v TimestampUnixNano) { + (*ms.orig).StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// Count returns the count associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) Count() uint64 { + return (*ms.orig).Count +} + +// SetCount replaces the count associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) SetCount(v uint64) { + (*ms.orig).Count = v +} + +// Sum returns the sum associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) Sum() int64 { + return (*ms.orig).Sum +} + +// SetSum replaces the sum associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) SetSum(v int64) { + (*ms.orig).Sum = v +} + +// BucketCounts returns the bucketcounts associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) BucketCounts() []uint64 { + return (*ms.orig).BucketCounts +} + +// SetBucketCounts replaces the bucketcounts associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) SetBucketCounts(v []uint64) { + (*ms.orig).BucketCounts = v +} + +// ExplicitBounds returns the explicitbounds associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) ExplicitBounds() []float64 { + return (*ms.orig).ExplicitBounds +} + +// SetExplicitBounds replaces the explicitbounds associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) SetExplicitBounds(v []float64) { + (*ms.orig).ExplicitBounds = v +} + +// Exemplars returns the Exemplars associated with this IntHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntHistogramDataPoint) Exemplars() IntExemplarSlice { + return newIntExemplarSlice(&(*ms.orig).Exemplars) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms IntHistogramDataPoint) CopyTo(dest IntHistogramDataPoint) { + ms.LabelsMap().CopyTo(dest.LabelsMap()) + dest.SetStartTime(ms.StartTime()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetCount(ms.Count()) + dest.SetSum(ms.Sum()) + dest.SetBucketCounts(ms.BucketCounts()) + dest.SetExplicitBounds(ms.ExplicitBounds()) + ms.Exemplars().CopyTo(dest.Exemplars()) +} + +// DoubleHistogramDataPointSlice logically represents a slice of DoubleHistogramDataPoint. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleHistogramDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleHistogramDataPointSlice struct { + // orig points to the slice otlpmetrics.DoubleHistogramDataPoint field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.DoubleHistogramDataPoint +} + +func newDoubleHistogramDataPointSlice(orig *[]*otlpmetrics.DoubleHistogramDataPoint) DoubleHistogramDataPointSlice { + return DoubleHistogramDataPointSlice{orig} +} + +// NewDoubleHistogramDataPointSlice creates a DoubleHistogramDataPointSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewDoubleHistogramDataPointSlice() DoubleHistogramDataPointSlice { + orig := []*otlpmetrics.DoubleHistogramDataPoint(nil) + return DoubleHistogramDataPointSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewDoubleHistogramDataPointSlice()". +func (es DoubleHistogramDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es DoubleHistogramDataPointSlice) At(ix int) DoubleHistogramDataPoint { + return newDoubleHistogramDataPoint((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es DoubleHistogramDataPointSlice) MoveAndAppendTo(dest DoubleHistogramDataPointSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es DoubleHistogramDataPointSlice) CopyTo(dest DoubleHistogramDataPointSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newDoubleHistogramDataPoint((*es.orig)[i]).CopyTo(newDoubleHistogramDataPoint((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.DoubleHistogramDataPoint, srcLen) + wrappers := make([]*otlpmetrics.DoubleHistogramDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newDoubleHistogramDataPoint((*es.orig)[i]).CopyTo(newDoubleHistogramDataPoint(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new DoubleHistogramDataPointSlice can be initialized: +// es := NewDoubleHistogramDataPointSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es DoubleHistogramDataPointSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.DoubleHistogramDataPoint, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.DoubleHistogramDataPoint, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the DoubleHistogramDataPointSlice by one and set the +// given DoubleHistogramDataPoint at that new position. The original DoubleHistogramDataPoint +// could still be referenced so do not reuse it after passing it to this +// method. +func (es DoubleHistogramDataPointSlice) Append(e DoubleHistogramDataPoint) { + *es.orig = append(*es.orig, e.orig) +} + +// DoubleHistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of double values. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleHistogramDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleHistogramDataPoint struct { + orig *otlpmetrics.DoubleHistogramDataPoint +} + +func newDoubleHistogramDataPoint(orig *otlpmetrics.DoubleHistogramDataPoint) DoubleHistogramDataPoint { + return DoubleHistogramDataPoint{orig: orig} +} + +// NewDoubleHistogramDataPoint creates a new empty DoubleHistogramDataPoint. +// +// This must be used only in testing code since no "Set" method available. +func NewDoubleHistogramDataPoint() DoubleHistogramDataPoint { + return newDoubleHistogramDataPoint(&otlpmetrics.DoubleHistogramDataPoint{}) +} + +// LabelsMap returns the Labels associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) LabelsMap() StringMap { + return newStringMap(&(*ms.orig).Labels) +} + +// StartTime returns the starttime associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) StartTime() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).StartTimeUnixNano) +} + +// SetStartTime replaces the starttime associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) SetStartTime(v TimestampUnixNano) { + (*ms.orig).StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// Count returns the count associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) Count() uint64 { + return (*ms.orig).Count +} + +// SetCount replaces the count associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) SetCount(v uint64) { + (*ms.orig).Count = v +} + +// Sum returns the sum associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) Sum() float64 { + return (*ms.orig).Sum +} + +// SetSum replaces the sum associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) SetSum(v float64) { + (*ms.orig).Sum = v +} + +// BucketCounts returns the bucketcounts associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) BucketCounts() []uint64 { + return (*ms.orig).BucketCounts +} + +// SetBucketCounts replaces the bucketcounts associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) SetBucketCounts(v []uint64) { + (*ms.orig).BucketCounts = v +} + +// ExplicitBounds returns the explicitbounds associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) ExplicitBounds() []float64 { + return (*ms.orig).ExplicitBounds +} + +// SetExplicitBounds replaces the explicitbounds associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) SetExplicitBounds(v []float64) { + (*ms.orig).ExplicitBounds = v +} + +// Exemplars returns the Exemplars associated with this DoubleHistogramDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleHistogramDataPoint) Exemplars() DoubleExemplarSlice { + return newDoubleExemplarSlice(&(*ms.orig).Exemplars) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms DoubleHistogramDataPoint) CopyTo(dest DoubleHistogramDataPoint) { + ms.LabelsMap().CopyTo(dest.LabelsMap()) + dest.SetStartTime(ms.StartTime()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetCount(ms.Count()) + dest.SetSum(ms.Sum()) + dest.SetBucketCounts(ms.BucketCounts()) + dest.SetExplicitBounds(ms.ExplicitBounds()) + ms.Exemplars().CopyTo(dest.Exemplars()) +} + +// DoubleSummaryDataPointSlice logically represents a slice of DoubleSummaryDataPoint. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleSummaryDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleSummaryDataPointSlice struct { + // orig points to the slice otlpmetrics.DoubleSummaryDataPoint field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.DoubleSummaryDataPoint +} + +func newDoubleSummaryDataPointSlice(orig *[]*otlpmetrics.DoubleSummaryDataPoint) DoubleSummaryDataPointSlice { + return DoubleSummaryDataPointSlice{orig} +} + +// NewDoubleSummaryDataPointSlice creates a DoubleSummaryDataPointSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewDoubleSummaryDataPointSlice() DoubleSummaryDataPointSlice { + orig := []*otlpmetrics.DoubleSummaryDataPoint(nil) + return DoubleSummaryDataPointSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewDoubleSummaryDataPointSlice()". +func (es DoubleSummaryDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es DoubleSummaryDataPointSlice) At(ix int) DoubleSummaryDataPoint { + return newDoubleSummaryDataPoint((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es DoubleSummaryDataPointSlice) MoveAndAppendTo(dest DoubleSummaryDataPointSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es DoubleSummaryDataPointSlice) CopyTo(dest DoubleSummaryDataPointSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newDoubleSummaryDataPoint((*es.orig)[i]).CopyTo(newDoubleSummaryDataPoint((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.DoubleSummaryDataPoint, srcLen) + wrappers := make([]*otlpmetrics.DoubleSummaryDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newDoubleSummaryDataPoint((*es.orig)[i]).CopyTo(newDoubleSummaryDataPoint(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new DoubleSummaryDataPointSlice can be initialized: +// es := NewDoubleSummaryDataPointSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es DoubleSummaryDataPointSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.DoubleSummaryDataPoint, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.DoubleSummaryDataPoint, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the DoubleSummaryDataPointSlice by one and set the +// given DoubleSummaryDataPoint at that new position. The original DoubleSummaryDataPoint +// could still be referenced so do not reuse it after passing it to this +// method. +func (es DoubleSummaryDataPointSlice) Append(e DoubleSummaryDataPoint) { + *es.orig = append(*es.orig, e.orig) +} + +// DoubleSummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleSummaryDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleSummaryDataPoint struct { + orig *otlpmetrics.DoubleSummaryDataPoint +} + +func newDoubleSummaryDataPoint(orig *otlpmetrics.DoubleSummaryDataPoint) DoubleSummaryDataPoint { + return DoubleSummaryDataPoint{orig: orig} +} + +// NewDoubleSummaryDataPoint creates a new empty DoubleSummaryDataPoint. +// +// This must be used only in testing code since no "Set" method available. +func NewDoubleSummaryDataPoint() DoubleSummaryDataPoint { + return newDoubleSummaryDataPoint(&otlpmetrics.DoubleSummaryDataPoint{}) +} + +// LabelsMap returns the Labels associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) LabelsMap() StringMap { + return newStringMap(&(*ms.orig).Labels) +} + +// StartTime returns the starttime associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) StartTime() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).StartTimeUnixNano) +} + +// SetStartTime replaces the starttime associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) SetStartTime(v TimestampUnixNano) { + (*ms.orig).StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// Count returns the count associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) Count() uint64 { + return (*ms.orig).Count +} + +// SetCount replaces the count associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) SetCount(v uint64) { + (*ms.orig).Count = v +} + +// Sum returns the sum associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) Sum() float64 { + return (*ms.orig).Sum +} + +// SetSum replaces the sum associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) SetSum(v float64) { + (*ms.orig).Sum = v +} + +// QuantileValues returns the QuantileValues associated with this DoubleSummaryDataPoint. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleSummaryDataPoint) QuantileValues() ValueAtQuantileSlice { + return newValueAtQuantileSlice(&(*ms.orig).QuantileValues) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms DoubleSummaryDataPoint) CopyTo(dest DoubleSummaryDataPoint) { + ms.LabelsMap().CopyTo(dest.LabelsMap()) + dest.SetStartTime(ms.StartTime()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetCount(ms.Count()) + dest.SetSum(ms.Sum()) + ms.QuantileValues().CopyTo(dest.QuantileValues()) +} + +// ValueAtQuantileSlice logically represents a slice of ValueAtQuantile. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewValueAtQuantileSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ValueAtQuantileSlice struct { + // orig points to the slice otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile +} + +func newValueAtQuantileSlice(orig *[]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile) ValueAtQuantileSlice { + return ValueAtQuantileSlice{orig} +} + +// NewValueAtQuantileSlice creates a ValueAtQuantileSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewValueAtQuantileSlice() ValueAtQuantileSlice { + orig := []*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile(nil) + return ValueAtQuantileSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewValueAtQuantileSlice()". +func (es ValueAtQuantileSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ValueAtQuantileSlice) At(ix int) ValueAtQuantile { + return newValueAtQuantile((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ValueAtQuantileSlice) MoveAndAppendTo(dest ValueAtQuantileSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es ValueAtQuantileSlice) CopyTo(dest ValueAtQuantileSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newValueAtQuantile((*es.orig)[i]).CopyTo(newValueAtQuantile((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile, srcLen) + wrappers := make([]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newValueAtQuantile((*es.orig)[i]).CopyTo(newValueAtQuantile(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new ValueAtQuantileSlice can be initialized: +// es := NewValueAtQuantileSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es ValueAtQuantileSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the ValueAtQuantileSlice by one and set the +// given ValueAtQuantile at that new position. The original ValueAtQuantile +// could still be referenced so do not reuse it after passing it to this +// method. +func (es ValueAtQuantileSlice) Append(e ValueAtQuantile) { + *es.orig = append(*es.orig, e.orig) +} + +// ValueAtQuantile is a quantile value within a Summary data point +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewValueAtQuantile function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ValueAtQuantile struct { + orig *otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile +} + +func newValueAtQuantile(orig *otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile) ValueAtQuantile { + return ValueAtQuantile{orig: orig} +} + +// NewValueAtQuantile creates a new empty ValueAtQuantile. +// +// This must be used only in testing code since no "Set" method available. +func NewValueAtQuantile() ValueAtQuantile { + return newValueAtQuantile(&otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile{}) +} + +// Quantile returns the quantile associated with this ValueAtQuantile. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ValueAtQuantile) Quantile() float64 { + return (*ms.orig).Quantile +} + +// SetQuantile replaces the quantile associated with this ValueAtQuantile. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ValueAtQuantile) SetQuantile(v float64) { + (*ms.orig).Quantile = v +} + +// Value returns the value associated with this ValueAtQuantile. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ValueAtQuantile) Value() float64 { + return (*ms.orig).Value +} + +// SetValue replaces the value associated with this ValueAtQuantile. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ValueAtQuantile) SetValue(v float64) { + (*ms.orig).Value = v +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms ValueAtQuantile) CopyTo(dest ValueAtQuantile) { + dest.SetQuantile(ms.Quantile()) + dest.SetValue(ms.Value()) +} + +// IntExemplarSlice logically represents a slice of IntExemplar. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntExemplarSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntExemplarSlice struct { + // orig points to the slice otlpmetrics.IntExemplar field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.IntExemplar +} + +func newIntExemplarSlice(orig *[]*otlpmetrics.IntExemplar) IntExemplarSlice { + return IntExemplarSlice{orig} +} + +// NewIntExemplarSlice creates a IntExemplarSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewIntExemplarSlice() IntExemplarSlice { + orig := []*otlpmetrics.IntExemplar(nil) + return IntExemplarSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewIntExemplarSlice()". +func (es IntExemplarSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es IntExemplarSlice) At(ix int) IntExemplar { + return newIntExemplar((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es IntExemplarSlice) MoveAndAppendTo(dest IntExemplarSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es IntExemplarSlice) CopyTo(dest IntExemplarSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newIntExemplar((*es.orig)[i]).CopyTo(newIntExemplar((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.IntExemplar, srcLen) + wrappers := make([]*otlpmetrics.IntExemplar, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newIntExemplar((*es.orig)[i]).CopyTo(newIntExemplar(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new IntExemplarSlice can be initialized: +// es := NewIntExemplarSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es IntExemplarSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.IntExemplar, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.IntExemplar, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the IntExemplarSlice by one and set the +// given IntExemplar at that new position. The original IntExemplar +// could still be referenced so do not reuse it after passing it to this +// method. +func (es IntExemplarSlice) Append(e IntExemplar) { + *es.orig = append(*es.orig, e.orig) +} + +// IntExemplar is a sample input int measurement. +// +// Exemplars also hold information about the environment when the measurement was recorded, +// for example the span and trace ID of the active span when the exemplar was recorded. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewIntExemplar function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntExemplar struct { + orig *otlpmetrics.IntExemplar +} + +func newIntExemplar(orig *otlpmetrics.IntExemplar) IntExemplar { + return IntExemplar{orig: orig} +} + +// NewIntExemplar creates a new empty IntExemplar. +// +// This must be used only in testing code since no "Set" method available. +func NewIntExemplar() IntExemplar { + return newIntExemplar(&otlpmetrics.IntExemplar{}) +} + +// Timestamp returns the timestamp associated with this IntExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntExemplar) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this IntExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntExemplar) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// Value returns the value associated with this IntExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntExemplar) Value() int64 { + return (*ms.orig).Value +} + +// SetValue replaces the value associated with this IntExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntExemplar) SetValue(v int64) { + (*ms.orig).Value = v +} + +// FilteredLabels returns the FilteredLabels associated with this IntExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms IntExemplar) FilteredLabels() StringMap { + return newStringMap(&(*ms.orig).FilteredLabels) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms IntExemplar) CopyTo(dest IntExemplar) { + dest.SetTimestamp(ms.Timestamp()) + dest.SetValue(ms.Value()) + ms.FilteredLabels().CopyTo(dest.FilteredLabels()) +} + +// DoubleExemplarSlice logically represents a slice of DoubleExemplar. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleExemplarSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleExemplarSlice struct { + // orig points to the slice otlpmetrics.DoubleExemplar field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlpmetrics.DoubleExemplar +} + +func newDoubleExemplarSlice(orig *[]*otlpmetrics.DoubleExemplar) DoubleExemplarSlice { + return DoubleExemplarSlice{orig} +} + +// NewDoubleExemplarSlice creates a DoubleExemplarSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewDoubleExemplarSlice() DoubleExemplarSlice { + orig := []*otlpmetrics.DoubleExemplar(nil) + return DoubleExemplarSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewDoubleExemplarSlice()". +func (es DoubleExemplarSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es DoubleExemplarSlice) At(ix int) DoubleExemplar { + return newDoubleExemplar((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es DoubleExemplarSlice) MoveAndAppendTo(dest DoubleExemplarSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es DoubleExemplarSlice) CopyTo(dest DoubleExemplarSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newDoubleExemplar((*es.orig)[i]).CopyTo(newDoubleExemplar((*dest.orig)[i])) + } + return + } + origs := make([]otlpmetrics.DoubleExemplar, srcLen) + wrappers := make([]*otlpmetrics.DoubleExemplar, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newDoubleExemplar((*es.orig)[i]).CopyTo(newDoubleExemplar(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new DoubleExemplarSlice can be initialized: +// es := NewDoubleExemplarSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es DoubleExemplarSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlpmetrics.DoubleExemplar, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlpmetrics.DoubleExemplar, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the DoubleExemplarSlice by one and set the +// given DoubleExemplar at that new position. The original DoubleExemplar +// could still be referenced so do not reuse it after passing it to this +// method. +func (es DoubleExemplarSlice) Append(e DoubleExemplar) { + *es.orig = append(*es.orig, e.orig) +} + +// DoubleExemplar is a sample input double measurement. +// +// Exemplars also hold information about the environment when the measurement was recorded, +// for example the span and trace ID of the active span when the exemplar was recorded. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewDoubleExemplar function to create new instances. +// Important: zero-initialized instance is not valid for use. +type DoubleExemplar struct { + orig *otlpmetrics.DoubleExemplar +} + +func newDoubleExemplar(orig *otlpmetrics.DoubleExemplar) DoubleExemplar { + return DoubleExemplar{orig: orig} +} + +// NewDoubleExemplar creates a new empty DoubleExemplar. +// +// This must be used only in testing code since no "Set" method available. +func NewDoubleExemplar() DoubleExemplar { + return newDoubleExemplar(&otlpmetrics.DoubleExemplar{}) +} + +// Timestamp returns the timestamp associated with this DoubleExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleExemplar) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this DoubleExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleExemplar) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// Value returns the value associated with this DoubleExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleExemplar) Value() float64 { + return (*ms.orig).Value +} + +// SetValue replaces the value associated with this DoubleExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleExemplar) SetValue(v float64) { + (*ms.orig).Value = v +} + +// FilteredLabels returns the FilteredLabels associated with this DoubleExemplar. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms DoubleExemplar) FilteredLabels() StringMap { + return newStringMap(&(*ms.orig).FilteredLabels) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms DoubleExemplar) CopyTo(dest DoubleExemplar) { + dest.SetTimestamp(ms.Timestamp()) + dest.SetValue(ms.Value()) + ms.FilteredLabels().CopyTo(dest.FilteredLabels()) +} diff --git a/internal/otel_collector/consumer/pdata/generated_metrics_test.go b/internal/otel_collector/consumer/pdata/generated_metrics_test.go new file mode 100644 index 00000000000..c99bce13092 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_metrics_test.go @@ -0,0 +1,2220 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +func TestResourceMetricsSlice(t *testing.T) { + es := NewResourceMetricsSlice() + assert.EqualValues(t, 0, es.Len()) + es = newResourceMetricsSlice(&[]*otlpmetrics.ResourceMetrics{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewResourceMetrics() + testVal := generateTestResourceMetrics() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestResourceMetrics(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestResourceMetricsSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestResourceMetricsSlice() + dest := NewResourceMetricsSlice() + src := generateTestResourceMetricsSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestResourceMetricsSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestResourceMetricsSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestResourceMetricsSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestResourceMetricsSlice_CopyTo(t *testing.T) { + dest := NewResourceMetricsSlice() + // Test CopyTo to empty + NewResourceMetricsSlice().CopyTo(dest) + assert.EqualValues(t, NewResourceMetricsSlice(), dest) + + // Test CopyTo larger slice + generateTestResourceMetricsSlice().CopyTo(dest) + assert.EqualValues(t, generateTestResourceMetricsSlice(), dest) + + // Test CopyTo same size slice + generateTestResourceMetricsSlice().CopyTo(dest) + assert.EqualValues(t, generateTestResourceMetricsSlice(), dest) +} + +func TestResourceMetricsSlice_Resize(t *testing.T) { + es := generateTestResourceMetricsSlice() + emptyVal := NewResourceMetrics() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.ResourceMetrics]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.ResourceMetrics]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.ResourceMetrics]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.ResourceMetrics]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestResourceMetricsSlice_Append(t *testing.T) { + es := generateTestResourceMetricsSlice() + + emptyVal := NewResourceMetrics() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewResourceMetrics() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestResourceMetrics_CopyTo(t *testing.T) { + ms := NewResourceMetrics() + generateTestResourceMetrics().CopyTo(ms) + assert.EqualValues(t, generateTestResourceMetrics(), ms) +} + +func TestResourceMetrics_Resource(t *testing.T) { + ms := NewResourceMetrics() + fillTestResource(ms.Resource()) + assert.EqualValues(t, generateTestResource(), ms.Resource()) +} + +func TestResourceMetrics_InstrumentationLibraryMetrics(t *testing.T) { + ms := NewResourceMetrics() + assert.EqualValues(t, NewInstrumentationLibraryMetricsSlice(), ms.InstrumentationLibraryMetrics()) + fillTestInstrumentationLibraryMetricsSlice(ms.InstrumentationLibraryMetrics()) + testValInstrumentationLibraryMetrics := generateTestInstrumentationLibraryMetricsSlice() + assert.EqualValues(t, testValInstrumentationLibraryMetrics, ms.InstrumentationLibraryMetrics()) +} + +func TestInstrumentationLibraryMetricsSlice(t *testing.T) { + es := NewInstrumentationLibraryMetricsSlice() + assert.EqualValues(t, 0, es.Len()) + es = newInstrumentationLibraryMetricsSlice(&[]*otlpmetrics.InstrumentationLibraryMetrics{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewInstrumentationLibraryMetrics() + testVal := generateTestInstrumentationLibraryMetrics() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestInstrumentationLibraryMetrics(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestInstrumentationLibraryMetricsSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestInstrumentationLibraryMetricsSlice() + dest := NewInstrumentationLibraryMetricsSlice() + src := generateTestInstrumentationLibraryMetricsSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibraryMetricsSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibraryMetricsSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestInstrumentationLibraryMetricsSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestInstrumentationLibraryMetricsSlice_CopyTo(t *testing.T) { + dest := NewInstrumentationLibraryMetricsSlice() + // Test CopyTo to empty + NewInstrumentationLibraryMetricsSlice().CopyTo(dest) + assert.EqualValues(t, NewInstrumentationLibraryMetricsSlice(), dest) + + // Test CopyTo larger slice + generateTestInstrumentationLibraryMetricsSlice().CopyTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibraryMetricsSlice(), dest) + + // Test CopyTo same size slice + generateTestInstrumentationLibraryMetricsSlice().CopyTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibraryMetricsSlice(), dest) +} + +func TestInstrumentationLibraryMetricsSlice_Resize(t *testing.T) { + es := generateTestInstrumentationLibraryMetricsSlice() + emptyVal := NewInstrumentationLibraryMetrics() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.InstrumentationLibraryMetrics]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.InstrumentationLibraryMetrics]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.InstrumentationLibraryMetrics]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.InstrumentationLibraryMetrics]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestInstrumentationLibraryMetricsSlice_Append(t *testing.T) { + es := generateTestInstrumentationLibraryMetricsSlice() + + emptyVal := NewInstrumentationLibraryMetrics() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewInstrumentationLibraryMetrics() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestInstrumentationLibraryMetrics_CopyTo(t *testing.T) { + ms := NewInstrumentationLibraryMetrics() + generateTestInstrumentationLibraryMetrics().CopyTo(ms) + assert.EqualValues(t, generateTestInstrumentationLibraryMetrics(), ms) +} + +func TestInstrumentationLibraryMetrics_InstrumentationLibrary(t *testing.T) { + ms := NewInstrumentationLibraryMetrics() + fillTestInstrumentationLibrary(ms.InstrumentationLibrary()) + assert.EqualValues(t, generateTestInstrumentationLibrary(), ms.InstrumentationLibrary()) +} + +func TestInstrumentationLibraryMetrics_Metrics(t *testing.T) { + ms := NewInstrumentationLibraryMetrics() + assert.EqualValues(t, NewMetricSlice(), ms.Metrics()) + fillTestMetricSlice(ms.Metrics()) + testValMetrics := generateTestMetricSlice() + assert.EqualValues(t, testValMetrics, ms.Metrics()) +} + +func TestMetricSlice(t *testing.T) { + es := NewMetricSlice() + assert.EqualValues(t, 0, es.Len()) + es = newMetricSlice(&[]*otlpmetrics.Metric{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewMetric() + testVal := generateTestMetric() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestMetric(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestMetricSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestMetricSlice() + dest := NewMetricSlice() + src := generateTestMetricSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestMetricSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestMetricSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestMetricSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestMetricSlice_CopyTo(t *testing.T) { + dest := NewMetricSlice() + // Test CopyTo to empty + NewMetricSlice().CopyTo(dest) + assert.EqualValues(t, NewMetricSlice(), dest) + + // Test CopyTo larger slice + generateTestMetricSlice().CopyTo(dest) + assert.EqualValues(t, generateTestMetricSlice(), dest) + + // Test CopyTo same size slice + generateTestMetricSlice().CopyTo(dest) + assert.EqualValues(t, generateTestMetricSlice(), dest) +} + +func TestMetricSlice_Resize(t *testing.T) { + es := generateTestMetricSlice() + emptyVal := NewMetric() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.Metric]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.Metric]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.Metric]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.Metric]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestMetricSlice_Append(t *testing.T) { + es := generateTestMetricSlice() + + emptyVal := NewMetric() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewMetric() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestMetric_CopyTo(t *testing.T) { + ms := NewMetric() + generateTestMetric().CopyTo(ms) + assert.EqualValues(t, generateTestMetric(), ms) +} + +func TestMetric_Name(t *testing.T) { + ms := NewMetric() + assert.EqualValues(t, "", ms.Name()) + testValName := "test_name" + ms.SetName(testValName) + assert.EqualValues(t, testValName, ms.Name()) +} + +func TestMetric_Description(t *testing.T) { + ms := NewMetric() + assert.EqualValues(t, "", ms.Description()) + testValDescription := "test_description" + ms.SetDescription(testValDescription) + assert.EqualValues(t, testValDescription, ms.Description()) +} + +func TestMetric_Unit(t *testing.T) { + ms := NewMetric() + assert.EqualValues(t, "", ms.Unit()) + testValUnit := "1" + ms.SetUnit(testValUnit) + assert.EqualValues(t, testValUnit, ms.Unit()) +} + +func TestIntGauge_CopyTo(t *testing.T) { + ms := NewIntGauge() + generateTestIntGauge().CopyTo(ms) + assert.EqualValues(t, generateTestIntGauge(), ms) +} + +func TestIntGauge_DataPoints(t *testing.T) { + ms := NewIntGauge() + assert.EqualValues(t, NewIntDataPointSlice(), ms.DataPoints()) + fillTestIntDataPointSlice(ms.DataPoints()) + testValDataPoints := generateTestIntDataPointSlice() + assert.EqualValues(t, testValDataPoints, ms.DataPoints()) +} + +func TestDoubleGauge_CopyTo(t *testing.T) { + ms := NewDoubleGauge() + generateTestDoubleGauge().CopyTo(ms) + assert.EqualValues(t, generateTestDoubleGauge(), ms) +} + +func TestDoubleGauge_DataPoints(t *testing.T) { + ms := NewDoubleGauge() + assert.EqualValues(t, NewDoubleDataPointSlice(), ms.DataPoints()) + fillTestDoubleDataPointSlice(ms.DataPoints()) + testValDataPoints := generateTestDoubleDataPointSlice() + assert.EqualValues(t, testValDataPoints, ms.DataPoints()) +} + +func TestIntSum_CopyTo(t *testing.T) { + ms := NewIntSum() + generateTestIntSum().CopyTo(ms) + assert.EqualValues(t, generateTestIntSum(), ms) +} + +func TestIntSum_AggregationTemporality(t *testing.T) { + ms := NewIntSum() + assert.EqualValues(t, AggregationTemporalityUnspecified, ms.AggregationTemporality()) + testValAggregationTemporality := AggregationTemporalityCumulative + ms.SetAggregationTemporality(testValAggregationTemporality) + assert.EqualValues(t, testValAggregationTemporality, ms.AggregationTemporality()) +} + +func TestIntSum_IsMonotonic(t *testing.T) { + ms := NewIntSum() + assert.EqualValues(t, false, ms.IsMonotonic()) + testValIsMonotonic := true + ms.SetIsMonotonic(testValIsMonotonic) + assert.EqualValues(t, testValIsMonotonic, ms.IsMonotonic()) +} + +func TestIntSum_DataPoints(t *testing.T) { + ms := NewIntSum() + assert.EqualValues(t, NewIntDataPointSlice(), ms.DataPoints()) + fillTestIntDataPointSlice(ms.DataPoints()) + testValDataPoints := generateTestIntDataPointSlice() + assert.EqualValues(t, testValDataPoints, ms.DataPoints()) +} + +func TestDoubleSum_CopyTo(t *testing.T) { + ms := NewDoubleSum() + generateTestDoubleSum().CopyTo(ms) + assert.EqualValues(t, generateTestDoubleSum(), ms) +} + +func TestDoubleSum_AggregationTemporality(t *testing.T) { + ms := NewDoubleSum() + assert.EqualValues(t, AggregationTemporalityUnspecified, ms.AggregationTemporality()) + testValAggregationTemporality := AggregationTemporalityCumulative + ms.SetAggregationTemporality(testValAggregationTemporality) + assert.EqualValues(t, testValAggregationTemporality, ms.AggregationTemporality()) +} + +func TestDoubleSum_IsMonotonic(t *testing.T) { + ms := NewDoubleSum() + assert.EqualValues(t, false, ms.IsMonotonic()) + testValIsMonotonic := true + ms.SetIsMonotonic(testValIsMonotonic) + assert.EqualValues(t, testValIsMonotonic, ms.IsMonotonic()) +} + +func TestDoubleSum_DataPoints(t *testing.T) { + ms := NewDoubleSum() + assert.EqualValues(t, NewDoubleDataPointSlice(), ms.DataPoints()) + fillTestDoubleDataPointSlice(ms.DataPoints()) + testValDataPoints := generateTestDoubleDataPointSlice() + assert.EqualValues(t, testValDataPoints, ms.DataPoints()) +} + +func TestIntHistogram_CopyTo(t *testing.T) { + ms := NewIntHistogram() + generateTestIntHistogram().CopyTo(ms) + assert.EqualValues(t, generateTestIntHistogram(), ms) +} + +func TestIntHistogram_AggregationTemporality(t *testing.T) { + ms := NewIntHistogram() + assert.EqualValues(t, AggregationTemporalityUnspecified, ms.AggregationTemporality()) + testValAggregationTemporality := AggregationTemporalityCumulative + ms.SetAggregationTemporality(testValAggregationTemporality) + assert.EqualValues(t, testValAggregationTemporality, ms.AggregationTemporality()) +} + +func TestIntHistogram_DataPoints(t *testing.T) { + ms := NewIntHistogram() + assert.EqualValues(t, NewIntHistogramDataPointSlice(), ms.DataPoints()) + fillTestIntHistogramDataPointSlice(ms.DataPoints()) + testValDataPoints := generateTestIntHistogramDataPointSlice() + assert.EqualValues(t, testValDataPoints, ms.DataPoints()) +} + +func TestDoubleHistogram_CopyTo(t *testing.T) { + ms := NewDoubleHistogram() + generateTestDoubleHistogram().CopyTo(ms) + assert.EqualValues(t, generateTestDoubleHistogram(), ms) +} + +func TestDoubleHistogram_AggregationTemporality(t *testing.T) { + ms := NewDoubleHistogram() + assert.EqualValues(t, AggregationTemporalityUnspecified, ms.AggregationTemporality()) + testValAggregationTemporality := AggregationTemporalityCumulative + ms.SetAggregationTemporality(testValAggregationTemporality) + assert.EqualValues(t, testValAggregationTemporality, ms.AggregationTemporality()) +} + +func TestDoubleHistogram_DataPoints(t *testing.T) { + ms := NewDoubleHistogram() + assert.EqualValues(t, NewDoubleHistogramDataPointSlice(), ms.DataPoints()) + fillTestDoubleHistogramDataPointSlice(ms.DataPoints()) + testValDataPoints := generateTestDoubleHistogramDataPointSlice() + assert.EqualValues(t, testValDataPoints, ms.DataPoints()) +} + +func TestDoubleSummary_CopyTo(t *testing.T) { + ms := NewDoubleSummary() + generateTestDoubleSummary().CopyTo(ms) + assert.EqualValues(t, generateTestDoubleSummary(), ms) +} + +func TestDoubleSummary_DataPoints(t *testing.T) { + ms := NewDoubleSummary() + assert.EqualValues(t, NewDoubleSummaryDataPointSlice(), ms.DataPoints()) + fillTestDoubleSummaryDataPointSlice(ms.DataPoints()) + testValDataPoints := generateTestDoubleSummaryDataPointSlice() + assert.EqualValues(t, testValDataPoints, ms.DataPoints()) +} + +func TestIntDataPointSlice(t *testing.T) { + es := NewIntDataPointSlice() + assert.EqualValues(t, 0, es.Len()) + es = newIntDataPointSlice(&[]*otlpmetrics.IntDataPoint{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewIntDataPoint() + testVal := generateTestIntDataPoint() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestIntDataPoint(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestIntDataPointSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestIntDataPointSlice() + dest := NewIntDataPointSlice() + src := generateTestIntDataPointSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestIntDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestIntDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestIntDataPointSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestIntDataPointSlice_CopyTo(t *testing.T) { + dest := NewIntDataPointSlice() + // Test CopyTo to empty + NewIntDataPointSlice().CopyTo(dest) + assert.EqualValues(t, NewIntDataPointSlice(), dest) + + // Test CopyTo larger slice + generateTestIntDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestIntDataPointSlice(), dest) + + // Test CopyTo same size slice + generateTestIntDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestIntDataPointSlice(), dest) +} + +func TestIntDataPointSlice_Resize(t *testing.T) { + es := generateTestIntDataPointSlice() + emptyVal := NewIntDataPoint() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.IntDataPoint]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.IntDataPoint]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.IntDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.IntDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestIntDataPointSlice_Append(t *testing.T) { + es := generateTestIntDataPointSlice() + + emptyVal := NewIntDataPoint() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewIntDataPoint() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestIntDataPoint_CopyTo(t *testing.T) { + ms := NewIntDataPoint() + generateTestIntDataPoint().CopyTo(ms) + assert.EqualValues(t, generateTestIntDataPoint(), ms) +} + +func TestIntDataPoint_LabelsMap(t *testing.T) { + ms := NewIntDataPoint() + assert.EqualValues(t, NewStringMap(), ms.LabelsMap()) + fillTestStringMap(ms.LabelsMap()) + testValLabelsMap := generateTestStringMap() + assert.EqualValues(t, testValLabelsMap, ms.LabelsMap()) +} + +func TestIntDataPoint_StartTime(t *testing.T) { + ms := NewIntDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.StartTime()) + testValStartTime := TimestampUnixNano(1234567890) + ms.SetStartTime(testValStartTime) + assert.EqualValues(t, testValStartTime, ms.StartTime()) +} + +func TestIntDataPoint_Timestamp(t *testing.T) { + ms := NewIntDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestIntDataPoint_Value(t *testing.T) { + ms := NewIntDataPoint() + assert.EqualValues(t, int64(0), ms.Value()) + testValValue := int64(-17) + ms.SetValue(testValValue) + assert.EqualValues(t, testValValue, ms.Value()) +} + +func TestIntDataPoint_Exemplars(t *testing.T) { + ms := NewIntDataPoint() + assert.EqualValues(t, NewIntExemplarSlice(), ms.Exemplars()) + fillTestIntExemplarSlice(ms.Exemplars()) + testValExemplars := generateTestIntExemplarSlice() + assert.EqualValues(t, testValExemplars, ms.Exemplars()) +} + +func TestDoubleDataPointSlice(t *testing.T) { + es := NewDoubleDataPointSlice() + assert.EqualValues(t, 0, es.Len()) + es = newDoubleDataPointSlice(&[]*otlpmetrics.DoubleDataPoint{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewDoubleDataPoint() + testVal := generateTestDoubleDataPoint() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestDoubleDataPoint(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestDoubleDataPointSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestDoubleDataPointSlice() + dest := NewDoubleDataPointSlice() + src := generateTestDoubleDataPointSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestDoubleDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestDoubleDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestDoubleDataPointSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestDoubleDataPointSlice_CopyTo(t *testing.T) { + dest := NewDoubleDataPointSlice() + // Test CopyTo to empty + NewDoubleDataPointSlice().CopyTo(dest) + assert.EqualValues(t, NewDoubleDataPointSlice(), dest) + + // Test CopyTo larger slice + generateTestDoubleDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestDoubleDataPointSlice(), dest) + + // Test CopyTo same size slice + generateTestDoubleDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestDoubleDataPointSlice(), dest) +} + +func TestDoubleDataPointSlice_Resize(t *testing.T) { + es := generateTestDoubleDataPointSlice() + emptyVal := NewDoubleDataPoint() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.DoubleDataPoint]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.DoubleDataPoint]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.DoubleDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.DoubleDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestDoubleDataPointSlice_Append(t *testing.T) { + es := generateTestDoubleDataPointSlice() + + emptyVal := NewDoubleDataPoint() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewDoubleDataPoint() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestDoubleDataPoint_CopyTo(t *testing.T) { + ms := NewDoubleDataPoint() + generateTestDoubleDataPoint().CopyTo(ms) + assert.EqualValues(t, generateTestDoubleDataPoint(), ms) +} + +func TestDoubleDataPoint_LabelsMap(t *testing.T) { + ms := NewDoubleDataPoint() + assert.EqualValues(t, NewStringMap(), ms.LabelsMap()) + fillTestStringMap(ms.LabelsMap()) + testValLabelsMap := generateTestStringMap() + assert.EqualValues(t, testValLabelsMap, ms.LabelsMap()) +} + +func TestDoubleDataPoint_StartTime(t *testing.T) { + ms := NewDoubleDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.StartTime()) + testValStartTime := TimestampUnixNano(1234567890) + ms.SetStartTime(testValStartTime) + assert.EqualValues(t, testValStartTime, ms.StartTime()) +} + +func TestDoubleDataPoint_Timestamp(t *testing.T) { + ms := NewDoubleDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestDoubleDataPoint_Value(t *testing.T) { + ms := NewDoubleDataPoint() + assert.EqualValues(t, float64(0.0), ms.Value()) + testValValue := float64(17.13) + ms.SetValue(testValValue) + assert.EqualValues(t, testValValue, ms.Value()) +} + +func TestDoubleDataPoint_Exemplars(t *testing.T) { + ms := NewDoubleDataPoint() + assert.EqualValues(t, NewDoubleExemplarSlice(), ms.Exemplars()) + fillTestDoubleExemplarSlice(ms.Exemplars()) + testValExemplars := generateTestDoubleExemplarSlice() + assert.EqualValues(t, testValExemplars, ms.Exemplars()) +} + +func TestIntHistogramDataPointSlice(t *testing.T) { + es := NewIntHistogramDataPointSlice() + assert.EqualValues(t, 0, es.Len()) + es = newIntHistogramDataPointSlice(&[]*otlpmetrics.IntHistogramDataPoint{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewIntHistogramDataPoint() + testVal := generateTestIntHistogramDataPoint() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestIntHistogramDataPoint(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestIntHistogramDataPointSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestIntHistogramDataPointSlice() + dest := NewIntHistogramDataPointSlice() + src := generateTestIntHistogramDataPointSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestIntHistogramDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestIntHistogramDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestIntHistogramDataPointSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestIntHistogramDataPointSlice_CopyTo(t *testing.T) { + dest := NewIntHistogramDataPointSlice() + // Test CopyTo to empty + NewIntHistogramDataPointSlice().CopyTo(dest) + assert.EqualValues(t, NewIntHistogramDataPointSlice(), dest) + + // Test CopyTo larger slice + generateTestIntHistogramDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestIntHistogramDataPointSlice(), dest) + + // Test CopyTo same size slice + generateTestIntHistogramDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestIntHistogramDataPointSlice(), dest) +} + +func TestIntHistogramDataPointSlice_Resize(t *testing.T) { + es := generateTestIntHistogramDataPointSlice() + emptyVal := NewIntHistogramDataPoint() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.IntHistogramDataPoint]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.IntHistogramDataPoint]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.IntHistogramDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.IntHistogramDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestIntHistogramDataPointSlice_Append(t *testing.T) { + es := generateTestIntHistogramDataPointSlice() + + emptyVal := NewIntHistogramDataPoint() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewIntHistogramDataPoint() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestIntHistogramDataPoint_CopyTo(t *testing.T) { + ms := NewIntHistogramDataPoint() + generateTestIntHistogramDataPoint().CopyTo(ms) + assert.EqualValues(t, generateTestIntHistogramDataPoint(), ms) +} + +func TestIntHistogramDataPoint_LabelsMap(t *testing.T) { + ms := NewIntHistogramDataPoint() + assert.EqualValues(t, NewStringMap(), ms.LabelsMap()) + fillTestStringMap(ms.LabelsMap()) + testValLabelsMap := generateTestStringMap() + assert.EqualValues(t, testValLabelsMap, ms.LabelsMap()) +} + +func TestIntHistogramDataPoint_StartTime(t *testing.T) { + ms := NewIntHistogramDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.StartTime()) + testValStartTime := TimestampUnixNano(1234567890) + ms.SetStartTime(testValStartTime) + assert.EqualValues(t, testValStartTime, ms.StartTime()) +} + +func TestIntHistogramDataPoint_Timestamp(t *testing.T) { + ms := NewIntHistogramDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestIntHistogramDataPoint_Count(t *testing.T) { + ms := NewIntHistogramDataPoint() + assert.EqualValues(t, uint64(0), ms.Count()) + testValCount := uint64(17) + ms.SetCount(testValCount) + assert.EqualValues(t, testValCount, ms.Count()) +} + +func TestIntHistogramDataPoint_Sum(t *testing.T) { + ms := NewIntHistogramDataPoint() + assert.EqualValues(t, int64(0.0), ms.Sum()) + testValSum := int64(1713) + ms.SetSum(testValSum) + assert.EqualValues(t, testValSum, ms.Sum()) +} + +func TestIntHistogramDataPoint_BucketCounts(t *testing.T) { + ms := NewIntHistogramDataPoint() + assert.EqualValues(t, []uint64(nil), ms.BucketCounts()) + testValBucketCounts := []uint64{1, 2, 3} + ms.SetBucketCounts(testValBucketCounts) + assert.EqualValues(t, testValBucketCounts, ms.BucketCounts()) +} + +func TestIntHistogramDataPoint_ExplicitBounds(t *testing.T) { + ms := NewIntHistogramDataPoint() + assert.EqualValues(t, []float64(nil), ms.ExplicitBounds()) + testValExplicitBounds := []float64{1, 2, 3} + ms.SetExplicitBounds(testValExplicitBounds) + assert.EqualValues(t, testValExplicitBounds, ms.ExplicitBounds()) +} + +func TestIntHistogramDataPoint_Exemplars(t *testing.T) { + ms := NewIntHistogramDataPoint() + assert.EqualValues(t, NewIntExemplarSlice(), ms.Exemplars()) + fillTestIntExemplarSlice(ms.Exemplars()) + testValExemplars := generateTestIntExemplarSlice() + assert.EqualValues(t, testValExemplars, ms.Exemplars()) +} + +func TestDoubleHistogramDataPointSlice(t *testing.T) { + es := NewDoubleHistogramDataPointSlice() + assert.EqualValues(t, 0, es.Len()) + es = newDoubleHistogramDataPointSlice(&[]*otlpmetrics.DoubleHistogramDataPoint{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewDoubleHistogramDataPoint() + testVal := generateTestDoubleHistogramDataPoint() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestDoubleHistogramDataPoint(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestDoubleHistogramDataPointSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestDoubleHistogramDataPointSlice() + dest := NewDoubleHistogramDataPointSlice() + src := generateTestDoubleHistogramDataPointSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestDoubleHistogramDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestDoubleHistogramDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestDoubleHistogramDataPointSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestDoubleHistogramDataPointSlice_CopyTo(t *testing.T) { + dest := NewDoubleHistogramDataPointSlice() + // Test CopyTo to empty + NewDoubleHistogramDataPointSlice().CopyTo(dest) + assert.EqualValues(t, NewDoubleHistogramDataPointSlice(), dest) + + // Test CopyTo larger slice + generateTestDoubleHistogramDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestDoubleHistogramDataPointSlice(), dest) + + // Test CopyTo same size slice + generateTestDoubleHistogramDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestDoubleHistogramDataPointSlice(), dest) +} + +func TestDoubleHistogramDataPointSlice_Resize(t *testing.T) { + es := generateTestDoubleHistogramDataPointSlice() + emptyVal := NewDoubleHistogramDataPoint() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.DoubleHistogramDataPoint]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.DoubleHistogramDataPoint]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.DoubleHistogramDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.DoubleHistogramDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestDoubleHistogramDataPointSlice_Append(t *testing.T) { + es := generateTestDoubleHistogramDataPointSlice() + + emptyVal := NewDoubleHistogramDataPoint() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewDoubleHistogramDataPoint() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestDoubleHistogramDataPoint_CopyTo(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + generateTestDoubleHistogramDataPoint().CopyTo(ms) + assert.EqualValues(t, generateTestDoubleHistogramDataPoint(), ms) +} + +func TestDoubleHistogramDataPoint_LabelsMap(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + assert.EqualValues(t, NewStringMap(), ms.LabelsMap()) + fillTestStringMap(ms.LabelsMap()) + testValLabelsMap := generateTestStringMap() + assert.EqualValues(t, testValLabelsMap, ms.LabelsMap()) +} + +func TestDoubleHistogramDataPoint_StartTime(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.StartTime()) + testValStartTime := TimestampUnixNano(1234567890) + ms.SetStartTime(testValStartTime) + assert.EqualValues(t, testValStartTime, ms.StartTime()) +} + +func TestDoubleHistogramDataPoint_Timestamp(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestDoubleHistogramDataPoint_Count(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + assert.EqualValues(t, uint64(0), ms.Count()) + testValCount := uint64(17) + ms.SetCount(testValCount) + assert.EqualValues(t, testValCount, ms.Count()) +} + +func TestDoubleHistogramDataPoint_Sum(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + assert.EqualValues(t, float64(0.0), ms.Sum()) + testValSum := float64(17.13) + ms.SetSum(testValSum) + assert.EqualValues(t, testValSum, ms.Sum()) +} + +func TestDoubleHistogramDataPoint_BucketCounts(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + assert.EqualValues(t, []uint64(nil), ms.BucketCounts()) + testValBucketCounts := []uint64{1, 2, 3} + ms.SetBucketCounts(testValBucketCounts) + assert.EqualValues(t, testValBucketCounts, ms.BucketCounts()) +} + +func TestDoubleHistogramDataPoint_ExplicitBounds(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + assert.EqualValues(t, []float64(nil), ms.ExplicitBounds()) + testValExplicitBounds := []float64{1, 2, 3} + ms.SetExplicitBounds(testValExplicitBounds) + assert.EqualValues(t, testValExplicitBounds, ms.ExplicitBounds()) +} + +func TestDoubleHistogramDataPoint_Exemplars(t *testing.T) { + ms := NewDoubleHistogramDataPoint() + assert.EqualValues(t, NewDoubleExemplarSlice(), ms.Exemplars()) + fillTestDoubleExemplarSlice(ms.Exemplars()) + testValExemplars := generateTestDoubleExemplarSlice() + assert.EqualValues(t, testValExemplars, ms.Exemplars()) +} + +func TestDoubleSummaryDataPointSlice(t *testing.T) { + es := NewDoubleSummaryDataPointSlice() + assert.EqualValues(t, 0, es.Len()) + es = newDoubleSummaryDataPointSlice(&[]*otlpmetrics.DoubleSummaryDataPoint{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewDoubleSummaryDataPoint() + testVal := generateTestDoubleSummaryDataPoint() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestDoubleSummaryDataPoint(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestDoubleSummaryDataPointSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestDoubleSummaryDataPointSlice() + dest := NewDoubleSummaryDataPointSlice() + src := generateTestDoubleSummaryDataPointSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestDoubleSummaryDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestDoubleSummaryDataPointSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestDoubleSummaryDataPointSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestDoubleSummaryDataPointSlice_CopyTo(t *testing.T) { + dest := NewDoubleSummaryDataPointSlice() + // Test CopyTo to empty + NewDoubleSummaryDataPointSlice().CopyTo(dest) + assert.EqualValues(t, NewDoubleSummaryDataPointSlice(), dest) + + // Test CopyTo larger slice + generateTestDoubleSummaryDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestDoubleSummaryDataPointSlice(), dest) + + // Test CopyTo same size slice + generateTestDoubleSummaryDataPointSlice().CopyTo(dest) + assert.EqualValues(t, generateTestDoubleSummaryDataPointSlice(), dest) +} + +func TestDoubleSummaryDataPointSlice_Resize(t *testing.T) { + es := generateTestDoubleSummaryDataPointSlice() + emptyVal := NewDoubleSummaryDataPoint() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.DoubleSummaryDataPoint]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.DoubleSummaryDataPoint]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.DoubleSummaryDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.DoubleSummaryDataPoint]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestDoubleSummaryDataPointSlice_Append(t *testing.T) { + es := generateTestDoubleSummaryDataPointSlice() + + emptyVal := NewDoubleSummaryDataPoint() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewDoubleSummaryDataPoint() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestDoubleSummaryDataPoint_CopyTo(t *testing.T) { + ms := NewDoubleSummaryDataPoint() + generateTestDoubleSummaryDataPoint().CopyTo(ms) + assert.EqualValues(t, generateTestDoubleSummaryDataPoint(), ms) +} + +func TestDoubleSummaryDataPoint_LabelsMap(t *testing.T) { + ms := NewDoubleSummaryDataPoint() + assert.EqualValues(t, NewStringMap(), ms.LabelsMap()) + fillTestStringMap(ms.LabelsMap()) + testValLabelsMap := generateTestStringMap() + assert.EqualValues(t, testValLabelsMap, ms.LabelsMap()) +} + +func TestDoubleSummaryDataPoint_StartTime(t *testing.T) { + ms := NewDoubleSummaryDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.StartTime()) + testValStartTime := TimestampUnixNano(1234567890) + ms.SetStartTime(testValStartTime) + assert.EqualValues(t, testValStartTime, ms.StartTime()) +} + +func TestDoubleSummaryDataPoint_Timestamp(t *testing.T) { + ms := NewDoubleSummaryDataPoint() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestDoubleSummaryDataPoint_Count(t *testing.T) { + ms := NewDoubleSummaryDataPoint() + assert.EqualValues(t, uint64(0), ms.Count()) + testValCount := uint64(17) + ms.SetCount(testValCount) + assert.EqualValues(t, testValCount, ms.Count()) +} + +func TestDoubleSummaryDataPoint_Sum(t *testing.T) { + ms := NewDoubleSummaryDataPoint() + assert.EqualValues(t, float64(0.0), ms.Sum()) + testValSum := float64(17.13) + ms.SetSum(testValSum) + assert.EqualValues(t, testValSum, ms.Sum()) +} + +func TestDoubleSummaryDataPoint_QuantileValues(t *testing.T) { + ms := NewDoubleSummaryDataPoint() + assert.EqualValues(t, NewValueAtQuantileSlice(), ms.QuantileValues()) + fillTestValueAtQuantileSlice(ms.QuantileValues()) + testValQuantileValues := generateTestValueAtQuantileSlice() + assert.EqualValues(t, testValQuantileValues, ms.QuantileValues()) +} + +func TestValueAtQuantileSlice(t *testing.T) { + es := NewValueAtQuantileSlice() + assert.EqualValues(t, 0, es.Len()) + es = newValueAtQuantileSlice(&[]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewValueAtQuantile() + testVal := generateTestValueAtQuantile() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestValueAtQuantile(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestValueAtQuantileSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestValueAtQuantileSlice() + dest := NewValueAtQuantileSlice() + src := generateTestValueAtQuantileSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestValueAtQuantileSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestValueAtQuantileSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestValueAtQuantileSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestValueAtQuantileSlice_CopyTo(t *testing.T) { + dest := NewValueAtQuantileSlice() + // Test CopyTo to empty + NewValueAtQuantileSlice().CopyTo(dest) + assert.EqualValues(t, NewValueAtQuantileSlice(), dest) + + // Test CopyTo larger slice + generateTestValueAtQuantileSlice().CopyTo(dest) + assert.EqualValues(t, generateTestValueAtQuantileSlice(), dest) + + // Test CopyTo same size slice + generateTestValueAtQuantileSlice().CopyTo(dest) + assert.EqualValues(t, generateTestValueAtQuantileSlice(), dest) +} + +func TestValueAtQuantileSlice_Resize(t *testing.T) { + es := generateTestValueAtQuantileSlice() + emptyVal := NewValueAtQuantile() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestValueAtQuantileSlice_Append(t *testing.T) { + es := generateTestValueAtQuantileSlice() + + emptyVal := NewValueAtQuantile() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewValueAtQuantile() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestValueAtQuantile_CopyTo(t *testing.T) { + ms := NewValueAtQuantile() + generateTestValueAtQuantile().CopyTo(ms) + assert.EqualValues(t, generateTestValueAtQuantile(), ms) +} + +func TestValueAtQuantile_Quantile(t *testing.T) { + ms := NewValueAtQuantile() + assert.EqualValues(t, float64(0.0), ms.Quantile()) + testValQuantile := float64(17.13) + ms.SetQuantile(testValQuantile) + assert.EqualValues(t, testValQuantile, ms.Quantile()) +} + +func TestValueAtQuantile_Value(t *testing.T) { + ms := NewValueAtQuantile() + assert.EqualValues(t, float64(0.0), ms.Value()) + testValValue := float64(17.13) + ms.SetValue(testValValue) + assert.EqualValues(t, testValValue, ms.Value()) +} + +func TestIntExemplarSlice(t *testing.T) { + es := NewIntExemplarSlice() + assert.EqualValues(t, 0, es.Len()) + es = newIntExemplarSlice(&[]*otlpmetrics.IntExemplar{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewIntExemplar() + testVal := generateTestIntExemplar() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestIntExemplar(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestIntExemplarSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestIntExemplarSlice() + dest := NewIntExemplarSlice() + src := generateTestIntExemplarSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestIntExemplarSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestIntExemplarSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestIntExemplarSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestIntExemplarSlice_CopyTo(t *testing.T) { + dest := NewIntExemplarSlice() + // Test CopyTo to empty + NewIntExemplarSlice().CopyTo(dest) + assert.EqualValues(t, NewIntExemplarSlice(), dest) + + // Test CopyTo larger slice + generateTestIntExemplarSlice().CopyTo(dest) + assert.EqualValues(t, generateTestIntExemplarSlice(), dest) + + // Test CopyTo same size slice + generateTestIntExemplarSlice().CopyTo(dest) + assert.EqualValues(t, generateTestIntExemplarSlice(), dest) +} + +func TestIntExemplarSlice_Resize(t *testing.T) { + es := generateTestIntExemplarSlice() + emptyVal := NewIntExemplar() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.IntExemplar]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.IntExemplar]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.IntExemplar]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.IntExemplar]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestIntExemplarSlice_Append(t *testing.T) { + es := generateTestIntExemplarSlice() + + emptyVal := NewIntExemplar() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewIntExemplar() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestIntExemplar_CopyTo(t *testing.T) { + ms := NewIntExemplar() + generateTestIntExemplar().CopyTo(ms) + assert.EqualValues(t, generateTestIntExemplar(), ms) +} + +func TestIntExemplar_Timestamp(t *testing.T) { + ms := NewIntExemplar() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestIntExemplar_Value(t *testing.T) { + ms := NewIntExemplar() + assert.EqualValues(t, int64(0), ms.Value()) + testValValue := int64(-17) + ms.SetValue(testValValue) + assert.EqualValues(t, testValValue, ms.Value()) +} + +func TestIntExemplar_FilteredLabels(t *testing.T) { + ms := NewIntExemplar() + assert.EqualValues(t, NewStringMap(), ms.FilteredLabels()) + fillTestStringMap(ms.FilteredLabels()) + testValFilteredLabels := generateTestStringMap() + assert.EqualValues(t, testValFilteredLabels, ms.FilteredLabels()) +} + +func TestDoubleExemplarSlice(t *testing.T) { + es := NewDoubleExemplarSlice() + assert.EqualValues(t, 0, es.Len()) + es = newDoubleExemplarSlice(&[]*otlpmetrics.DoubleExemplar{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewDoubleExemplar() + testVal := generateTestDoubleExemplar() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestDoubleExemplar(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestDoubleExemplarSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestDoubleExemplarSlice() + dest := NewDoubleExemplarSlice() + src := generateTestDoubleExemplarSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestDoubleExemplarSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestDoubleExemplarSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestDoubleExemplarSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestDoubleExemplarSlice_CopyTo(t *testing.T) { + dest := NewDoubleExemplarSlice() + // Test CopyTo to empty + NewDoubleExemplarSlice().CopyTo(dest) + assert.EqualValues(t, NewDoubleExemplarSlice(), dest) + + // Test CopyTo larger slice + generateTestDoubleExemplarSlice().CopyTo(dest) + assert.EqualValues(t, generateTestDoubleExemplarSlice(), dest) + + // Test CopyTo same size slice + generateTestDoubleExemplarSlice().CopyTo(dest) + assert.EqualValues(t, generateTestDoubleExemplarSlice(), dest) +} + +func TestDoubleExemplarSlice_Resize(t *testing.T) { + es := generateTestDoubleExemplarSlice() + emptyVal := NewDoubleExemplar() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlpmetrics.DoubleExemplar]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlpmetrics.DoubleExemplar]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlpmetrics.DoubleExemplar]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlpmetrics.DoubleExemplar]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestDoubleExemplarSlice_Append(t *testing.T) { + es := generateTestDoubleExemplarSlice() + + emptyVal := NewDoubleExemplar() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewDoubleExemplar() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestDoubleExemplar_CopyTo(t *testing.T) { + ms := NewDoubleExemplar() + generateTestDoubleExemplar().CopyTo(ms) + assert.EqualValues(t, generateTestDoubleExemplar(), ms) +} + +func TestDoubleExemplar_Timestamp(t *testing.T) { + ms := NewDoubleExemplar() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestDoubleExemplar_Value(t *testing.T) { + ms := NewDoubleExemplar() + assert.EqualValues(t, float64(0.0), ms.Value()) + testValValue := float64(17.13) + ms.SetValue(testValValue) + assert.EqualValues(t, testValValue, ms.Value()) +} + +func TestDoubleExemplar_FilteredLabels(t *testing.T) { + ms := NewDoubleExemplar() + assert.EqualValues(t, NewStringMap(), ms.FilteredLabels()) + fillTestStringMap(ms.FilteredLabels()) + testValFilteredLabels := generateTestStringMap() + assert.EqualValues(t, testValFilteredLabels, ms.FilteredLabels()) +} + +func generateTestResourceMetricsSlice() ResourceMetricsSlice { + tv := NewResourceMetricsSlice() + fillTestResourceMetricsSlice(tv) + return tv +} + +func fillTestResourceMetricsSlice(tv ResourceMetricsSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestResourceMetrics(tv.At(i)) + } +} + +func generateTestResourceMetrics() ResourceMetrics { + tv := NewResourceMetrics() + fillTestResourceMetrics(tv) + return tv +} + +func fillTestResourceMetrics(tv ResourceMetrics) { + fillTestResource(tv.Resource()) + fillTestInstrumentationLibraryMetricsSlice(tv.InstrumentationLibraryMetrics()) +} + +func generateTestInstrumentationLibraryMetricsSlice() InstrumentationLibraryMetricsSlice { + tv := NewInstrumentationLibraryMetricsSlice() + fillTestInstrumentationLibraryMetricsSlice(tv) + return tv +} + +func fillTestInstrumentationLibraryMetricsSlice(tv InstrumentationLibraryMetricsSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestInstrumentationLibraryMetrics(tv.At(i)) + } +} + +func generateTestInstrumentationLibraryMetrics() InstrumentationLibraryMetrics { + tv := NewInstrumentationLibraryMetrics() + fillTestInstrumentationLibraryMetrics(tv) + return tv +} + +func fillTestInstrumentationLibraryMetrics(tv InstrumentationLibraryMetrics) { + fillTestInstrumentationLibrary(tv.InstrumentationLibrary()) + fillTestMetricSlice(tv.Metrics()) +} + +func generateTestMetricSlice() MetricSlice { + tv := NewMetricSlice() + fillTestMetricSlice(tv) + return tv +} + +func fillTestMetricSlice(tv MetricSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestMetric(tv.At(i)) + } +} + +func generateTestMetric() Metric { + tv := NewMetric() + fillTestMetric(tv) + return tv +} + +func fillTestMetric(tv Metric) { + tv.SetName("test_name") + tv.SetDescription("test_description") + tv.SetUnit("1") + (*tv.orig).Data = &otlpmetrics.Metric_IntGauge{IntGauge: &otlpmetrics.IntGauge{}} + fillTestIntGauge(tv.IntGauge()) +} + +func generateTestIntGauge() IntGauge { + tv := NewIntGauge() + fillTestIntGauge(tv) + return tv +} + +func fillTestIntGauge(tv IntGauge) { + fillTestIntDataPointSlice(tv.DataPoints()) +} + +func generateTestDoubleGauge() DoubleGauge { + tv := NewDoubleGauge() + fillTestDoubleGauge(tv) + return tv +} + +func fillTestDoubleGauge(tv DoubleGauge) { + fillTestDoubleDataPointSlice(tv.DataPoints()) +} + +func generateTestIntSum() IntSum { + tv := NewIntSum() + fillTestIntSum(tv) + return tv +} + +func fillTestIntSum(tv IntSum) { + tv.SetAggregationTemporality(AggregationTemporalityCumulative) + tv.SetIsMonotonic(true) + fillTestIntDataPointSlice(tv.DataPoints()) +} + +func generateTestDoubleSum() DoubleSum { + tv := NewDoubleSum() + fillTestDoubleSum(tv) + return tv +} + +func fillTestDoubleSum(tv DoubleSum) { + tv.SetAggregationTemporality(AggregationTemporalityCumulative) + tv.SetIsMonotonic(true) + fillTestDoubleDataPointSlice(tv.DataPoints()) +} + +func generateTestIntHistogram() IntHistogram { + tv := NewIntHistogram() + fillTestIntHistogram(tv) + return tv +} + +func fillTestIntHistogram(tv IntHistogram) { + tv.SetAggregationTemporality(AggregationTemporalityCumulative) + fillTestIntHistogramDataPointSlice(tv.DataPoints()) +} + +func generateTestDoubleHistogram() DoubleHistogram { + tv := NewDoubleHistogram() + fillTestDoubleHistogram(tv) + return tv +} + +func fillTestDoubleHistogram(tv DoubleHistogram) { + tv.SetAggregationTemporality(AggregationTemporalityCumulative) + fillTestDoubleHistogramDataPointSlice(tv.DataPoints()) +} + +func generateTestDoubleSummary() DoubleSummary { + tv := NewDoubleSummary() + fillTestDoubleSummary(tv) + return tv +} + +func fillTestDoubleSummary(tv DoubleSummary) { + fillTestDoubleSummaryDataPointSlice(tv.DataPoints()) +} + +func generateTestIntDataPointSlice() IntDataPointSlice { + tv := NewIntDataPointSlice() + fillTestIntDataPointSlice(tv) + return tv +} + +func fillTestIntDataPointSlice(tv IntDataPointSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestIntDataPoint(tv.At(i)) + } +} + +func generateTestIntDataPoint() IntDataPoint { + tv := NewIntDataPoint() + fillTestIntDataPoint(tv) + return tv +} + +func fillTestIntDataPoint(tv IntDataPoint) { + fillTestStringMap(tv.LabelsMap()) + tv.SetStartTime(TimestampUnixNano(1234567890)) + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetValue(int64(-17)) + fillTestIntExemplarSlice(tv.Exemplars()) +} + +func generateTestDoubleDataPointSlice() DoubleDataPointSlice { + tv := NewDoubleDataPointSlice() + fillTestDoubleDataPointSlice(tv) + return tv +} + +func fillTestDoubleDataPointSlice(tv DoubleDataPointSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestDoubleDataPoint(tv.At(i)) + } +} + +func generateTestDoubleDataPoint() DoubleDataPoint { + tv := NewDoubleDataPoint() + fillTestDoubleDataPoint(tv) + return tv +} + +func fillTestDoubleDataPoint(tv DoubleDataPoint) { + fillTestStringMap(tv.LabelsMap()) + tv.SetStartTime(TimestampUnixNano(1234567890)) + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetValue(float64(17.13)) + fillTestDoubleExemplarSlice(tv.Exemplars()) +} + +func generateTestIntHistogramDataPointSlice() IntHistogramDataPointSlice { + tv := NewIntHistogramDataPointSlice() + fillTestIntHistogramDataPointSlice(tv) + return tv +} + +func fillTestIntHistogramDataPointSlice(tv IntHistogramDataPointSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestIntHistogramDataPoint(tv.At(i)) + } +} + +func generateTestIntHistogramDataPoint() IntHistogramDataPoint { + tv := NewIntHistogramDataPoint() + fillTestIntHistogramDataPoint(tv) + return tv +} + +func fillTestIntHistogramDataPoint(tv IntHistogramDataPoint) { + fillTestStringMap(tv.LabelsMap()) + tv.SetStartTime(TimestampUnixNano(1234567890)) + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetCount(uint64(17)) + tv.SetSum(int64(1713)) + tv.SetBucketCounts([]uint64{1, 2, 3}) + tv.SetExplicitBounds([]float64{1, 2, 3}) + fillTestIntExemplarSlice(tv.Exemplars()) +} + +func generateTestDoubleHistogramDataPointSlice() DoubleHistogramDataPointSlice { + tv := NewDoubleHistogramDataPointSlice() + fillTestDoubleHistogramDataPointSlice(tv) + return tv +} + +func fillTestDoubleHistogramDataPointSlice(tv DoubleHistogramDataPointSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestDoubleHistogramDataPoint(tv.At(i)) + } +} + +func generateTestDoubleHistogramDataPoint() DoubleHistogramDataPoint { + tv := NewDoubleHistogramDataPoint() + fillTestDoubleHistogramDataPoint(tv) + return tv +} + +func fillTestDoubleHistogramDataPoint(tv DoubleHistogramDataPoint) { + fillTestStringMap(tv.LabelsMap()) + tv.SetStartTime(TimestampUnixNano(1234567890)) + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetCount(uint64(17)) + tv.SetSum(float64(17.13)) + tv.SetBucketCounts([]uint64{1, 2, 3}) + tv.SetExplicitBounds([]float64{1, 2, 3}) + fillTestDoubleExemplarSlice(tv.Exemplars()) +} + +func generateTestDoubleSummaryDataPointSlice() DoubleSummaryDataPointSlice { + tv := NewDoubleSummaryDataPointSlice() + fillTestDoubleSummaryDataPointSlice(tv) + return tv +} + +func fillTestDoubleSummaryDataPointSlice(tv DoubleSummaryDataPointSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestDoubleSummaryDataPoint(tv.At(i)) + } +} + +func generateTestDoubleSummaryDataPoint() DoubleSummaryDataPoint { + tv := NewDoubleSummaryDataPoint() + fillTestDoubleSummaryDataPoint(tv) + return tv +} + +func fillTestDoubleSummaryDataPoint(tv DoubleSummaryDataPoint) { + fillTestStringMap(tv.LabelsMap()) + tv.SetStartTime(TimestampUnixNano(1234567890)) + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetCount(uint64(17)) + tv.SetSum(float64(17.13)) + fillTestValueAtQuantileSlice(tv.QuantileValues()) +} + +func generateTestValueAtQuantileSlice() ValueAtQuantileSlice { + tv := NewValueAtQuantileSlice() + fillTestValueAtQuantileSlice(tv) + return tv +} + +func fillTestValueAtQuantileSlice(tv ValueAtQuantileSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestValueAtQuantile(tv.At(i)) + } +} + +func generateTestValueAtQuantile() ValueAtQuantile { + tv := NewValueAtQuantile() + fillTestValueAtQuantile(tv) + return tv +} + +func fillTestValueAtQuantile(tv ValueAtQuantile) { + tv.SetQuantile(float64(17.13)) + tv.SetValue(float64(17.13)) +} + +func generateTestIntExemplarSlice() IntExemplarSlice { + tv := NewIntExemplarSlice() + fillTestIntExemplarSlice(tv) + return tv +} + +func fillTestIntExemplarSlice(tv IntExemplarSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestIntExemplar(tv.At(i)) + } +} + +func generateTestIntExemplar() IntExemplar { + tv := NewIntExemplar() + fillTestIntExemplar(tv) + return tv +} + +func fillTestIntExemplar(tv IntExemplar) { + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetValue(int64(-17)) + fillTestStringMap(tv.FilteredLabels()) +} + +func generateTestDoubleExemplarSlice() DoubleExemplarSlice { + tv := NewDoubleExemplarSlice() + fillTestDoubleExemplarSlice(tv) + return tv +} + +func fillTestDoubleExemplarSlice(tv DoubleExemplarSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestDoubleExemplar(tv.At(i)) + } +} + +func generateTestDoubleExemplar() DoubleExemplar { + tv := NewDoubleExemplar() + fillTestDoubleExemplar(tv) + return tv +} + +func fillTestDoubleExemplar(tv DoubleExemplar) { + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetValue(float64(17.13)) + fillTestStringMap(tv.FilteredLabels()) +} diff --git a/internal/otel_collector/consumer/pdata/generated_resource.go b/internal/otel_collector/consumer/pdata/generated_resource.go new file mode 100644 index 00000000000..d50abb781e6 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_resource.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" +) + +// Resource information. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResource function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Resource struct { + orig *otlpresource.Resource +} + +func newResource(orig *otlpresource.Resource) Resource { + return Resource{orig: orig} +} + +// NewResource creates a new empty Resource. +// +// This must be used only in testing code since no "Set" method available. +func NewResource() Resource { + return newResource(&otlpresource.Resource{}) +} + +// Attributes returns the Attributes associated with this Resource. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Resource) Attributes() AttributeMap { + return newAttributeMap(&(*ms.orig).Attributes) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms Resource) CopyTo(dest Resource) { + ms.Attributes().CopyTo(dest.Attributes()) +} diff --git a/internal/otel_collector/consumer/pdata/generated_resource_test.go b/internal/otel_collector/consumer/pdata/generated_resource_test.go new file mode 100644 index 00000000000..cff3992e35f --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_resource_test.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResource_CopyTo(t *testing.T) { + ms := NewResource() + generateTestResource().CopyTo(ms) + assert.EqualValues(t, generateTestResource(), ms) +} + +func TestResource_Attributes(t *testing.T) { + ms := NewResource() + assert.EqualValues(t, NewAttributeMap(), ms.Attributes()) + fillTestAttributeMap(ms.Attributes()) + testValAttributes := generateTestAttributeMap() + assert.EqualValues(t, testValAttributes, ms.Attributes()) +} + +func generateTestResource() Resource { + tv := NewResource() + fillTestResource(tv) + return tv +} + +func fillTestResource(tv Resource) { + fillTestAttributeMap(tv.Attributes()) +} diff --git a/internal/otel_collector/consumer/pdata/generated_trace.go b/internal/otel_collector/consumer/pdata/generated_trace.go new file mode 100644 index 00000000000..2158d3ebf79 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_trace.go @@ -0,0 +1,1141 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + "go.opentelemetry.io/collector/internal/data" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" +) + +// ResourceSpansSlice logically represents a slice of ResourceSpans. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceSpansSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceSpansSlice struct { + // orig points to the slice otlptrace.ResourceSpans field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlptrace.ResourceSpans +} + +func newResourceSpansSlice(orig *[]*otlptrace.ResourceSpans) ResourceSpansSlice { + return ResourceSpansSlice{orig} +} + +// NewResourceSpansSlice creates a ResourceSpansSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewResourceSpansSlice() ResourceSpansSlice { + orig := []*otlptrace.ResourceSpans(nil) + return ResourceSpansSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceSpansSlice()". +func (es ResourceSpansSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceSpansSlice) At(ix int) ResourceSpans { + return newResourceSpans((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceSpansSlice) MoveAndAppendTo(dest ResourceSpansSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceSpans((*es.orig)[i]).CopyTo(newResourceSpans((*dest.orig)[i])) + } + return + } + origs := make([]otlptrace.ResourceSpans, srcLen) + wrappers := make([]*otlptrace.ResourceSpans, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceSpans((*es.orig)[i]).CopyTo(newResourceSpans(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new ResourceSpansSlice can be initialized: +// es := NewResourceSpansSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es ResourceSpansSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlptrace.ResourceSpans, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlptrace.ResourceSpans, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the ResourceSpansSlice by one and set the +// given ResourceSpans at that new position. The original ResourceSpans +// could still be referenced so do not reuse it after passing it to this +// method. +func (es ResourceSpansSlice) Append(e ResourceSpans) { + *es.orig = append(*es.orig, e.orig) +} + +// InstrumentationLibrarySpans is a collection of spans from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceSpans function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceSpans struct { + orig *otlptrace.ResourceSpans +} + +func newResourceSpans(orig *otlptrace.ResourceSpans) ResourceSpans { + return ResourceSpans{orig: orig} +} + +// NewResourceSpans creates a new empty ResourceSpans. +// +// This must be used only in testing code since no "Set" method available. +func NewResourceSpans() ResourceSpans { + return newResourceSpans(&otlptrace.ResourceSpans{}) +} + +// Resource returns the resource associated with this ResourceSpans. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ResourceSpans) Resource() Resource { + return newResource(&(*ms.orig).Resource) +} + +// InstrumentationLibrarySpans returns the InstrumentationLibrarySpans associated with this ResourceSpans. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms ResourceSpans) InstrumentationLibrarySpans() InstrumentationLibrarySpansSlice { + return newInstrumentationLibrarySpansSlice(&(*ms.orig).InstrumentationLibrarySpans) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms ResourceSpans) CopyTo(dest ResourceSpans) { + ms.Resource().CopyTo(dest.Resource()) + ms.InstrumentationLibrarySpans().CopyTo(dest.InstrumentationLibrarySpans()) +} + +// InstrumentationLibrarySpansSlice logically represents a slice of InstrumentationLibrarySpans. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewInstrumentationLibrarySpansSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type InstrumentationLibrarySpansSlice struct { + // orig points to the slice otlptrace.InstrumentationLibrarySpans field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlptrace.InstrumentationLibrarySpans +} + +func newInstrumentationLibrarySpansSlice(orig *[]*otlptrace.InstrumentationLibrarySpans) InstrumentationLibrarySpansSlice { + return InstrumentationLibrarySpansSlice{orig} +} + +// NewInstrumentationLibrarySpansSlice creates a InstrumentationLibrarySpansSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewInstrumentationLibrarySpansSlice() InstrumentationLibrarySpansSlice { + orig := []*otlptrace.InstrumentationLibrarySpans(nil) + return InstrumentationLibrarySpansSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewInstrumentationLibrarySpansSlice()". +func (es InstrumentationLibrarySpansSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es InstrumentationLibrarySpansSlice) At(ix int) InstrumentationLibrarySpans { + return newInstrumentationLibrarySpans((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es InstrumentationLibrarySpansSlice) MoveAndAppendTo(dest InstrumentationLibrarySpansSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es InstrumentationLibrarySpansSlice) CopyTo(dest InstrumentationLibrarySpansSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newInstrumentationLibrarySpans((*es.orig)[i]).CopyTo(newInstrumentationLibrarySpans((*dest.orig)[i])) + } + return + } + origs := make([]otlptrace.InstrumentationLibrarySpans, srcLen) + wrappers := make([]*otlptrace.InstrumentationLibrarySpans, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newInstrumentationLibrarySpans((*es.orig)[i]).CopyTo(newInstrumentationLibrarySpans(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new InstrumentationLibrarySpansSlice can be initialized: +// es := NewInstrumentationLibrarySpansSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es InstrumentationLibrarySpansSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlptrace.InstrumentationLibrarySpans, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlptrace.InstrumentationLibrarySpans, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the InstrumentationLibrarySpansSlice by one and set the +// given InstrumentationLibrarySpans at that new position. The original InstrumentationLibrarySpans +// could still be referenced so do not reuse it after passing it to this +// method. +func (es InstrumentationLibrarySpansSlice) Append(e InstrumentationLibrarySpans) { + *es.orig = append(*es.orig, e.orig) +} + +// InstrumentationLibrarySpans is a collection of spans from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewInstrumentationLibrarySpans function to create new instances. +// Important: zero-initialized instance is not valid for use. +type InstrumentationLibrarySpans struct { + orig *otlptrace.InstrumentationLibrarySpans +} + +func newInstrumentationLibrarySpans(orig *otlptrace.InstrumentationLibrarySpans) InstrumentationLibrarySpans { + return InstrumentationLibrarySpans{orig: orig} +} + +// NewInstrumentationLibrarySpans creates a new empty InstrumentationLibrarySpans. +// +// This must be used only in testing code since no "Set" method available. +func NewInstrumentationLibrarySpans() InstrumentationLibrarySpans { + return newInstrumentationLibrarySpans(&otlptrace.InstrumentationLibrarySpans{}) +} + +// InstrumentationLibrary returns the instrumentationlibrary associated with this InstrumentationLibrarySpans. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibrarySpans) InstrumentationLibrary() InstrumentationLibrary { + return newInstrumentationLibrary(&(*ms.orig).InstrumentationLibrary) +} + +// Spans returns the Spans associated with this InstrumentationLibrarySpans. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms InstrumentationLibrarySpans) Spans() SpanSlice { + return newSpanSlice(&(*ms.orig).Spans) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms InstrumentationLibrarySpans) CopyTo(dest InstrumentationLibrarySpans) { + ms.InstrumentationLibrary().CopyTo(dest.InstrumentationLibrary()) + ms.Spans().CopyTo(dest.Spans()) +} + +// SpanSlice logically represents a slice of Span. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpanSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanSlice struct { + // orig points to the slice otlptrace.Span field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlptrace.Span +} + +func newSpanSlice(orig *[]*otlptrace.Span) SpanSlice { + return SpanSlice{orig} +} + +// NewSpanSlice creates a SpanSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewSpanSlice() SpanSlice { + orig := []*otlptrace.Span(nil) + return SpanSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSpanSlice()". +func (es SpanSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SpanSlice) At(ix int) Span { + return newSpan((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SpanSlice) MoveAndAppendTo(dest SpanSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es SpanSlice) CopyTo(dest SpanSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSpan((*es.orig)[i]).CopyTo(newSpan((*dest.orig)[i])) + } + return + } + origs := make([]otlptrace.Span, srcLen) + wrappers := make([]*otlptrace.Span, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSpan((*es.orig)[i]).CopyTo(newSpan(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new SpanSlice can be initialized: +// es := NewSpanSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es SpanSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlptrace.Span, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlptrace.Span, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the SpanSlice by one and set the +// given Span at that new position. The original Span +// could still be referenced so do not reuse it after passing it to this +// method. +func (es SpanSlice) Append(e Span) { + *es.orig = append(*es.orig, e.orig) +} + +// Span represents a single operation within a trace. +// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/master/opentelemetry/proto/trace/v1/trace.proto#L37 +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpan function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Span struct { + orig *otlptrace.Span +} + +func newSpan(orig *otlptrace.Span) Span { + return Span{orig: orig} +} + +// NewSpan creates a new empty Span. +// +// This must be used only in testing code since no "Set" method available. +func NewSpan() Span { + return newSpan(&otlptrace.Span{}) +} + +// TraceID returns the traceid associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) TraceID() TraceID { + return TraceID((*ms.orig).TraceId) +} + +// SetTraceID replaces the traceid associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetTraceID(v TraceID) { + (*ms.orig).TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SpanID() SpanID { + return SpanID((*ms.orig).SpanId) +} + +// SetSpanID replaces the spanid associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetSpanID(v SpanID) { + (*ms.orig).SpanId = data.SpanID(v) +} + +// TraceState returns the tracestate associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) TraceState() TraceState { + return TraceState((*ms.orig).TraceState) +} + +// SetTraceState replaces the tracestate associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetTraceState(v TraceState) { + (*ms.orig).TraceState = string(v) +} + +// ParentSpanID returns the parentspanid associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) ParentSpanID() SpanID { + return SpanID((*ms.orig).ParentSpanId) +} + +// SetParentSpanID replaces the parentspanid associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetParentSpanID(v SpanID) { + (*ms.orig).ParentSpanId = data.SpanID(v) +} + +// Name returns the name associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) Name() string { + return (*ms.orig).Name +} + +// SetName replaces the name associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetName(v string) { + (*ms.orig).Name = v +} + +// Kind returns the kind associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) Kind() SpanKind { + return SpanKind((*ms.orig).Kind) +} + +// SetKind replaces the kind associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetKind(v SpanKind) { + (*ms.orig).Kind = otlptrace.Span_SpanKind(v) +} + +// StartTime returns the starttime associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) StartTime() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).StartTimeUnixNano) +} + +// SetStartTime replaces the starttime associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetStartTime(v TimestampUnixNano) { + (*ms.orig).StartTimeUnixNano = uint64(v) +} + +// EndTime returns the endtime associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) EndTime() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).EndTimeUnixNano) +} + +// SetEndTime replaces the endtime associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetEndTime(v TimestampUnixNano) { + (*ms.orig).EndTimeUnixNano = uint64(v) +} + +// Attributes returns the Attributes associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) Attributes() AttributeMap { + return newAttributeMap(&(*ms.orig).Attributes) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) DroppedAttributesCount() uint32 { + return (*ms.orig).DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetDroppedAttributesCount(v uint32) { + (*ms.orig).DroppedAttributesCount = v +} + +// Events returns the Events associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) Events() SpanEventSlice { + return newSpanEventSlice(&(*ms.orig).Events) +} + +// DroppedEventsCount returns the droppedeventscount associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) DroppedEventsCount() uint32 { + return (*ms.orig).DroppedEventsCount +} + +// SetDroppedEventsCount replaces the droppedeventscount associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetDroppedEventsCount(v uint32) { + (*ms.orig).DroppedEventsCount = v +} + +// Links returns the Links associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) Links() SpanLinkSlice { + return newSpanLinkSlice(&(*ms.orig).Links) +} + +// DroppedLinksCount returns the droppedlinkscount associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) DroppedLinksCount() uint32 { + return (*ms.orig).DroppedLinksCount +} + +// SetDroppedLinksCount replaces the droppedlinkscount associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) SetDroppedLinksCount(v uint32) { + (*ms.orig).DroppedLinksCount = v +} + +// Status returns the status associated with this Span. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms Span) Status() SpanStatus { + return newSpanStatus(&(*ms.orig).Status) +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms Span) CopyTo(dest Span) { + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) + dest.SetTraceState(ms.TraceState()) + dest.SetParentSpanID(ms.ParentSpanID()) + dest.SetName(ms.Name()) + dest.SetKind(ms.Kind()) + dest.SetStartTime(ms.StartTime()) + dest.SetEndTime(ms.EndTime()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) + ms.Events().CopyTo(dest.Events()) + dest.SetDroppedEventsCount(ms.DroppedEventsCount()) + ms.Links().CopyTo(dest.Links()) + dest.SetDroppedLinksCount(ms.DroppedLinksCount()) + ms.Status().CopyTo(dest.Status()) +} + +// SpanEventSlice logically represents a slice of SpanEvent. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpanEventSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanEventSlice struct { + // orig points to the slice otlptrace.Span_Event field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlptrace.Span_Event +} + +func newSpanEventSlice(orig *[]*otlptrace.Span_Event) SpanEventSlice { + return SpanEventSlice{orig} +} + +// NewSpanEventSlice creates a SpanEventSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewSpanEventSlice() SpanEventSlice { + orig := []*otlptrace.Span_Event(nil) + return SpanEventSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSpanEventSlice()". +func (es SpanEventSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SpanEventSlice) At(ix int) SpanEvent { + return newSpanEvent((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SpanEventSlice) MoveAndAppendTo(dest SpanEventSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es SpanEventSlice) CopyTo(dest SpanEventSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSpanEvent((*es.orig)[i]).CopyTo(newSpanEvent((*dest.orig)[i])) + } + return + } + origs := make([]otlptrace.Span_Event, srcLen) + wrappers := make([]*otlptrace.Span_Event, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSpanEvent((*es.orig)[i]).CopyTo(newSpanEvent(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new SpanEventSlice can be initialized: +// es := NewSpanEventSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es SpanEventSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlptrace.Span_Event, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlptrace.Span_Event, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the SpanEventSlice by one and set the +// given SpanEvent at that new position. The original SpanEvent +// could still be referenced so do not reuse it after passing it to this +// method. +func (es SpanEventSlice) Append(e SpanEvent) { + *es.orig = append(*es.orig, e.orig) +} + +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. See OTLP for event definition. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpanEvent function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanEvent struct { + orig *otlptrace.Span_Event +} + +func newSpanEvent(orig *otlptrace.Span_Event) SpanEvent { + return SpanEvent{orig: orig} +} + +// NewSpanEvent creates a new empty SpanEvent. +// +// This must be used only in testing code since no "Set" method available. +func NewSpanEvent() SpanEvent { + return newSpanEvent(&otlptrace.Span_Event{}) +} + +// Timestamp returns the timestamp associated with this SpanEvent. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanEvent) Timestamp() TimestampUnixNano { + return TimestampUnixNano((*ms.orig).TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this SpanEvent. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanEvent) SetTimestamp(v TimestampUnixNano) { + (*ms.orig).TimeUnixNano = uint64(v) +} + +// Name returns the name associated with this SpanEvent. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanEvent) Name() string { + return (*ms.orig).Name +} + +// SetName replaces the name associated with this SpanEvent. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanEvent) SetName(v string) { + (*ms.orig).Name = v +} + +// Attributes returns the Attributes associated with this SpanEvent. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanEvent) Attributes() AttributeMap { + return newAttributeMap(&(*ms.orig).Attributes) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanEvent) DroppedAttributesCount() uint32 { + return (*ms.orig).DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanEvent. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanEvent) SetDroppedAttributesCount(v uint32) { + (*ms.orig).DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms SpanEvent) CopyTo(dest SpanEvent) { + dest.SetTimestamp(ms.Timestamp()) + dest.SetName(ms.Name()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} + +// SpanLinkSlice logically represents a slice of SpanLink. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpanLinkSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanLinkSlice struct { + // orig points to the slice otlptrace.Span_Link field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like Resize. + orig *[]*otlptrace.Span_Link +} + +func newSpanLinkSlice(orig *[]*otlptrace.Span_Link) SpanLinkSlice { + return SpanLinkSlice{orig} +} + +// NewSpanLinkSlice creates a SpanLinkSlice with 0 elements. +// Can use "Resize" to initialize with a given length. +func NewSpanLinkSlice() SpanLinkSlice { + orig := []*otlptrace.Span_Link(nil) + return SpanLinkSlice{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSpanLinkSlice()". +func (es SpanLinkSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SpanLinkSlice) At(ix int) SpanLink { + return newSpanLink((*es.orig)[ix]) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SpanLinkSlice) MoveAndAppendTo(dest SpanLinkSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// CopyTo copies all elements from the current slice to the dest. +func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSpanLink((*es.orig)[i]).CopyTo(newSpanLink((*dest.orig)[i])) + } + return + } + origs := make([]otlptrace.Span_Link, srcLen) + wrappers := make([]*otlptrace.Span_Link, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSpanLink((*es.orig)[i]).CopyTo(newSpanLink(wrappers[i])) + } + *dest.orig = wrappers +} + +// Resize is an operation that resizes the slice: +// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. +// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. +// +// Here is how a new SpanLinkSlice can be initialized: +// es := NewSpanLinkSlice() +// es.Resize(4) +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// // Here should set all the values for e. +// } +func (es SpanLinkSlice) Resize(newLen int) { + oldLen := len(*es.orig) + oldCap := cap(*es.orig) + if newLen <= oldLen { + *es.orig = (*es.orig)[:newLen:oldCap] + return + } + + if newLen > oldCap { + newOrig := make([]*otlptrace.Span_Link, oldLen, newLen) + copy(newOrig, *es.orig) + *es.orig = newOrig + } + + // Add extra empty elements to the array. + extraOrigs := make([]otlptrace.Span_Link, newLen-oldLen) + for i := range extraOrigs { + *es.orig = append(*es.orig, &extraOrigs[i]) + } +} + +// Append will increase the length of the SpanLinkSlice by one and set the +// given SpanLink at that new position. The original SpanLink +// could still be referenced so do not reuse it after passing it to this +// method. +func (es SpanLinkSlice) Append(e SpanLink) { + *es.orig = append(*es.orig, e.orig) +} + +// SpanLink is a pointer from the current span to another span in the same trace or in a +// different trace. See OTLP for link definition. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpanLink function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanLink struct { + orig *otlptrace.Span_Link +} + +func newSpanLink(orig *otlptrace.Span_Link) SpanLink { + return SpanLink{orig: orig} +} + +// NewSpanLink creates a new empty SpanLink. +// +// This must be used only in testing code since no "Set" method available. +func NewSpanLink() SpanLink { + return newSpanLink(&otlptrace.Span_Link{}) +} + +// TraceID returns the traceid associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) TraceID() TraceID { + return TraceID((*ms.orig).TraceId) +} + +// SetTraceID replaces the traceid associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) SetTraceID(v TraceID) { + (*ms.orig).TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) SpanID() SpanID { + return SpanID((*ms.orig).SpanId) +} + +// SetSpanID replaces the spanid associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) SetSpanID(v SpanID) { + (*ms.orig).SpanId = data.SpanID(v) +} + +// TraceState returns the tracestate associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) TraceState() TraceState { + return TraceState((*ms.orig).TraceState) +} + +// SetTraceState replaces the tracestate associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) SetTraceState(v TraceState) { + (*ms.orig).TraceState = string(v) +} + +// Attributes returns the Attributes associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) Attributes() AttributeMap { + return newAttributeMap(&(*ms.orig).Attributes) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) DroppedAttributesCount() uint32 { + return (*ms.orig).DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanLink. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanLink) SetDroppedAttributesCount(v uint32) { + (*ms.orig).DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms SpanLink) CopyTo(dest SpanLink) { + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) + dest.SetTraceState(ms.TraceState()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} + +// SpanStatus is an optional final status for this span. Semantically when Status wasn't set +// it is means span ended without errors and assume Status.Ok (code = 0). +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSpanStatus function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SpanStatus struct { + orig *otlptrace.Status +} + +func newSpanStatus(orig *otlptrace.Status) SpanStatus { + return SpanStatus{orig: orig} +} + +// NewSpanStatus creates a new empty SpanStatus. +// +// This must be used only in testing code since no "Set" method available. +func NewSpanStatus() SpanStatus { + return newSpanStatus(&otlptrace.Status{}) +} + +// Code returns the code associated with this SpanStatus. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanStatus) Code() StatusCode { + return StatusCode((*ms.orig).Code) +} + +// DeprecatedCode returns the deprecatedcode associated with this SpanStatus. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanStatus) DeprecatedCode() DeprecatedStatusCode { + return DeprecatedStatusCode((*ms.orig).DeprecatedCode) +} + +// SetDeprecatedCode replaces the deprecatedcode associated with this SpanStatus. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanStatus) SetDeprecatedCode(v DeprecatedStatusCode) { + (*ms.orig).DeprecatedCode = otlptrace.Status_DeprecatedStatusCode(v) +} + +// Message returns the message associated with this SpanStatus. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanStatus) Message() string { + return (*ms.orig).Message +} + +// SetMessage replaces the message associated with this SpanStatus. +// +// Important: This causes a runtime error if IsNil() returns "true". +func (ms SpanStatus) SetMessage(v string) { + (*ms.orig).Message = v +} + +// CopyTo copies all properties from the current struct to the dest. +func (ms SpanStatus) CopyTo(dest SpanStatus) { + dest.SetCode(ms.Code()) + dest.SetDeprecatedCode(ms.DeprecatedCode()) + dest.SetMessage(ms.Message()) +} diff --git a/internal/otel_collector/consumer/pdata/generated_trace_test.go b/internal/otel_collector/consumer/pdata/generated_trace_test.go new file mode 100644 index 00000000000..c3188829bdc --- /dev/null +++ b/internal/otel_collector/consumer/pdata/generated_trace_test.go @@ -0,0 +1,1019 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" +) + +func TestResourceSpansSlice(t *testing.T) { + es := NewResourceSpansSlice() + assert.EqualValues(t, 0, es.Len()) + es = newResourceSpansSlice(&[]*otlptrace.ResourceSpans{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewResourceSpans() + testVal := generateTestResourceSpans() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestResourceSpans(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestResourceSpansSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestResourceSpansSlice() + dest := NewResourceSpansSlice() + src := generateTestResourceSpansSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestResourceSpansSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestResourceSpansSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestResourceSpansSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestResourceSpansSlice_CopyTo(t *testing.T) { + dest := NewResourceSpansSlice() + // Test CopyTo to empty + NewResourceSpansSlice().CopyTo(dest) + assert.EqualValues(t, NewResourceSpansSlice(), dest) + + // Test CopyTo larger slice + generateTestResourceSpansSlice().CopyTo(dest) + assert.EqualValues(t, generateTestResourceSpansSlice(), dest) + + // Test CopyTo same size slice + generateTestResourceSpansSlice().CopyTo(dest) + assert.EqualValues(t, generateTestResourceSpansSlice(), dest) +} + +func TestResourceSpansSlice_Resize(t *testing.T) { + es := generateTestResourceSpansSlice() + emptyVal := NewResourceSpans() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlptrace.ResourceSpans]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlptrace.ResourceSpans]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlptrace.ResourceSpans]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlptrace.ResourceSpans]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestResourceSpansSlice_Append(t *testing.T) { + es := generateTestResourceSpansSlice() + + emptyVal := NewResourceSpans() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewResourceSpans() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestResourceSpans_CopyTo(t *testing.T) { + ms := NewResourceSpans() + generateTestResourceSpans().CopyTo(ms) + assert.EqualValues(t, generateTestResourceSpans(), ms) +} + +func TestResourceSpans_Resource(t *testing.T) { + ms := NewResourceSpans() + fillTestResource(ms.Resource()) + assert.EqualValues(t, generateTestResource(), ms.Resource()) +} + +func TestResourceSpans_InstrumentationLibrarySpans(t *testing.T) { + ms := NewResourceSpans() + assert.EqualValues(t, NewInstrumentationLibrarySpansSlice(), ms.InstrumentationLibrarySpans()) + fillTestInstrumentationLibrarySpansSlice(ms.InstrumentationLibrarySpans()) + testValInstrumentationLibrarySpans := generateTestInstrumentationLibrarySpansSlice() + assert.EqualValues(t, testValInstrumentationLibrarySpans, ms.InstrumentationLibrarySpans()) +} + +func TestInstrumentationLibrarySpansSlice(t *testing.T) { + es := NewInstrumentationLibrarySpansSlice() + assert.EqualValues(t, 0, es.Len()) + es = newInstrumentationLibrarySpansSlice(&[]*otlptrace.InstrumentationLibrarySpans{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewInstrumentationLibrarySpans() + testVal := generateTestInstrumentationLibrarySpans() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestInstrumentationLibrarySpans(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestInstrumentationLibrarySpansSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestInstrumentationLibrarySpansSlice() + dest := NewInstrumentationLibrarySpansSlice() + src := generateTestInstrumentationLibrarySpansSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibrarySpansSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibrarySpansSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestInstrumentationLibrarySpansSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestInstrumentationLibrarySpansSlice_CopyTo(t *testing.T) { + dest := NewInstrumentationLibrarySpansSlice() + // Test CopyTo to empty + NewInstrumentationLibrarySpansSlice().CopyTo(dest) + assert.EqualValues(t, NewInstrumentationLibrarySpansSlice(), dest) + + // Test CopyTo larger slice + generateTestInstrumentationLibrarySpansSlice().CopyTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibrarySpansSlice(), dest) + + // Test CopyTo same size slice + generateTestInstrumentationLibrarySpansSlice().CopyTo(dest) + assert.EqualValues(t, generateTestInstrumentationLibrarySpansSlice(), dest) +} + +func TestInstrumentationLibrarySpansSlice_Resize(t *testing.T) { + es := generateTestInstrumentationLibrarySpansSlice() + emptyVal := NewInstrumentationLibrarySpans() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlptrace.InstrumentationLibrarySpans]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlptrace.InstrumentationLibrarySpans]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlptrace.InstrumentationLibrarySpans]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlptrace.InstrumentationLibrarySpans]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestInstrumentationLibrarySpansSlice_Append(t *testing.T) { + es := generateTestInstrumentationLibrarySpansSlice() + + emptyVal := NewInstrumentationLibrarySpans() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewInstrumentationLibrarySpans() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestInstrumentationLibrarySpans_CopyTo(t *testing.T) { + ms := NewInstrumentationLibrarySpans() + generateTestInstrumentationLibrarySpans().CopyTo(ms) + assert.EqualValues(t, generateTestInstrumentationLibrarySpans(), ms) +} + +func TestInstrumentationLibrarySpans_InstrumentationLibrary(t *testing.T) { + ms := NewInstrumentationLibrarySpans() + fillTestInstrumentationLibrary(ms.InstrumentationLibrary()) + assert.EqualValues(t, generateTestInstrumentationLibrary(), ms.InstrumentationLibrary()) +} + +func TestInstrumentationLibrarySpans_Spans(t *testing.T) { + ms := NewInstrumentationLibrarySpans() + assert.EqualValues(t, NewSpanSlice(), ms.Spans()) + fillTestSpanSlice(ms.Spans()) + testValSpans := generateTestSpanSlice() + assert.EqualValues(t, testValSpans, ms.Spans()) +} + +func TestSpanSlice(t *testing.T) { + es := NewSpanSlice() + assert.EqualValues(t, 0, es.Len()) + es = newSpanSlice(&[]*otlptrace.Span{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewSpan() + testVal := generateTestSpan() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestSpan(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestSpanSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestSpanSlice() + dest := NewSpanSlice() + src := generateTestSpanSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestSpanSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestSpanSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestSpanSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestSpanSlice_CopyTo(t *testing.T) { + dest := NewSpanSlice() + // Test CopyTo to empty + NewSpanSlice().CopyTo(dest) + assert.EqualValues(t, NewSpanSlice(), dest) + + // Test CopyTo larger slice + generateTestSpanSlice().CopyTo(dest) + assert.EqualValues(t, generateTestSpanSlice(), dest) + + // Test CopyTo same size slice + generateTestSpanSlice().CopyTo(dest) + assert.EqualValues(t, generateTestSpanSlice(), dest) +} + +func TestSpanSlice_Resize(t *testing.T) { + es := generateTestSpanSlice() + emptyVal := NewSpan() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlptrace.Span]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlptrace.Span]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlptrace.Span]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlptrace.Span]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestSpanSlice_Append(t *testing.T) { + es := generateTestSpanSlice() + + emptyVal := NewSpan() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewSpan() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestSpan_CopyTo(t *testing.T) { + ms := NewSpan() + generateTestSpan().CopyTo(ms) + assert.EqualValues(t, generateTestSpan(), ms) +} + +func TestSpan_TraceID(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, NewTraceID([16]byte{}), ms.TraceID()) + testValTraceID := NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) + ms.SetTraceID(testValTraceID) + assert.EqualValues(t, testValTraceID, ms.TraceID()) +} + +func TestSpan_SpanID(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, NewSpanID([8]byte{}), ms.SpanID()) + testValSpanID := NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) + ms.SetSpanID(testValSpanID) + assert.EqualValues(t, testValSpanID, ms.SpanID()) +} + +func TestSpan_TraceState(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, TraceState(""), ms.TraceState()) + testValTraceState := TraceState("congo=congos") + ms.SetTraceState(testValTraceState) + assert.EqualValues(t, testValTraceState, ms.TraceState()) +} + +func TestSpan_ParentSpanID(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, NewSpanID([8]byte{}), ms.ParentSpanID()) + testValParentSpanID := NewSpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) + ms.SetParentSpanID(testValParentSpanID) + assert.EqualValues(t, testValParentSpanID, ms.ParentSpanID()) +} + +func TestSpan_Name(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, "", ms.Name()) + testValName := "test_name" + ms.SetName(testValName) + assert.EqualValues(t, testValName, ms.Name()) +} + +func TestSpan_Kind(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, SpanKindUNSPECIFIED, ms.Kind()) + testValKind := SpanKindSERVER + ms.SetKind(testValKind) + assert.EqualValues(t, testValKind, ms.Kind()) +} + +func TestSpan_StartTime(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, TimestampUnixNano(0), ms.StartTime()) + testValStartTime := TimestampUnixNano(1234567890) + ms.SetStartTime(testValStartTime) + assert.EqualValues(t, testValStartTime, ms.StartTime()) +} + +func TestSpan_EndTime(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, TimestampUnixNano(0), ms.EndTime()) + testValEndTime := TimestampUnixNano(1234567890) + ms.SetEndTime(testValEndTime) + assert.EqualValues(t, testValEndTime, ms.EndTime()) +} + +func TestSpan_Attributes(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, NewAttributeMap(), ms.Attributes()) + fillTestAttributeMap(ms.Attributes()) + testValAttributes := generateTestAttributeMap() + assert.EqualValues(t, testValAttributes, ms.Attributes()) +} + +func TestSpan_DroppedAttributesCount(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, uint32(0), ms.DroppedAttributesCount()) + testValDroppedAttributesCount := uint32(17) + ms.SetDroppedAttributesCount(testValDroppedAttributesCount) + assert.EqualValues(t, testValDroppedAttributesCount, ms.DroppedAttributesCount()) +} + +func TestSpan_Events(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, NewSpanEventSlice(), ms.Events()) + fillTestSpanEventSlice(ms.Events()) + testValEvents := generateTestSpanEventSlice() + assert.EqualValues(t, testValEvents, ms.Events()) +} + +func TestSpan_DroppedEventsCount(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, uint32(0), ms.DroppedEventsCount()) + testValDroppedEventsCount := uint32(17) + ms.SetDroppedEventsCount(testValDroppedEventsCount) + assert.EqualValues(t, testValDroppedEventsCount, ms.DroppedEventsCount()) +} + +func TestSpan_Links(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, NewSpanLinkSlice(), ms.Links()) + fillTestSpanLinkSlice(ms.Links()) + testValLinks := generateTestSpanLinkSlice() + assert.EqualValues(t, testValLinks, ms.Links()) +} + +func TestSpan_DroppedLinksCount(t *testing.T) { + ms := NewSpan() + assert.EqualValues(t, uint32(0), ms.DroppedLinksCount()) + testValDroppedLinksCount := uint32(17) + ms.SetDroppedLinksCount(testValDroppedLinksCount) + assert.EqualValues(t, testValDroppedLinksCount, ms.DroppedLinksCount()) +} + +func TestSpan_Status(t *testing.T) { + ms := NewSpan() + fillTestSpanStatus(ms.Status()) + assert.EqualValues(t, generateTestSpanStatus(), ms.Status()) +} + +func TestSpanEventSlice(t *testing.T) { + es := NewSpanEventSlice() + assert.EqualValues(t, 0, es.Len()) + es = newSpanEventSlice(&[]*otlptrace.Span_Event{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewSpanEvent() + testVal := generateTestSpanEvent() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestSpanEvent(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestSpanEventSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestSpanEventSlice() + dest := NewSpanEventSlice() + src := generateTestSpanEventSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestSpanEventSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestSpanEventSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestSpanEventSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestSpanEventSlice_CopyTo(t *testing.T) { + dest := NewSpanEventSlice() + // Test CopyTo to empty + NewSpanEventSlice().CopyTo(dest) + assert.EqualValues(t, NewSpanEventSlice(), dest) + + // Test CopyTo larger slice + generateTestSpanEventSlice().CopyTo(dest) + assert.EqualValues(t, generateTestSpanEventSlice(), dest) + + // Test CopyTo same size slice + generateTestSpanEventSlice().CopyTo(dest) + assert.EqualValues(t, generateTestSpanEventSlice(), dest) +} + +func TestSpanEventSlice_Resize(t *testing.T) { + es := generateTestSpanEventSlice() + emptyVal := NewSpanEvent() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlptrace.Span_Event]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlptrace.Span_Event]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlptrace.Span_Event]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlptrace.Span_Event]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestSpanEventSlice_Append(t *testing.T) { + es := generateTestSpanEventSlice() + + emptyVal := NewSpanEvent() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewSpanEvent() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestSpanEvent_CopyTo(t *testing.T) { + ms := NewSpanEvent() + generateTestSpanEvent().CopyTo(ms) + assert.EqualValues(t, generateTestSpanEvent(), ms) +} + +func TestSpanEvent_Timestamp(t *testing.T) { + ms := NewSpanEvent() + assert.EqualValues(t, TimestampUnixNano(0), ms.Timestamp()) + testValTimestamp := TimestampUnixNano(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.EqualValues(t, testValTimestamp, ms.Timestamp()) +} + +func TestSpanEvent_Name(t *testing.T) { + ms := NewSpanEvent() + assert.EqualValues(t, "", ms.Name()) + testValName := "test_name" + ms.SetName(testValName) + assert.EqualValues(t, testValName, ms.Name()) +} + +func TestSpanEvent_Attributes(t *testing.T) { + ms := NewSpanEvent() + assert.EqualValues(t, NewAttributeMap(), ms.Attributes()) + fillTestAttributeMap(ms.Attributes()) + testValAttributes := generateTestAttributeMap() + assert.EqualValues(t, testValAttributes, ms.Attributes()) +} + +func TestSpanEvent_DroppedAttributesCount(t *testing.T) { + ms := NewSpanEvent() + assert.EqualValues(t, uint32(0), ms.DroppedAttributesCount()) + testValDroppedAttributesCount := uint32(17) + ms.SetDroppedAttributesCount(testValDroppedAttributesCount) + assert.EqualValues(t, testValDroppedAttributesCount, ms.DroppedAttributesCount()) +} + +func TestSpanLinkSlice(t *testing.T) { + es := NewSpanLinkSlice() + assert.EqualValues(t, 0, es.Len()) + es = newSpanLinkSlice(&[]*otlptrace.Span_Link{}) + assert.EqualValues(t, 0, es.Len()) + + es.Resize(7) + emptyVal := NewSpanLink() + testVal := generateTestSpanLink() + assert.EqualValues(t, 7, es.Len()) + for i := 0; i < es.Len(); i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + fillTestSpanLink(es.At(i)) + assert.EqualValues(t, testVal, es.At(i)) + } +} + +func TestSpanLinkSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestSpanLinkSlice() + dest := NewSpanLinkSlice() + src := generateTestSpanLinkSlice() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestSpanLinkSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTestSpanLinkSlice(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestSpanLinkSlice().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestSpanLinkSlice_CopyTo(t *testing.T) { + dest := NewSpanLinkSlice() + // Test CopyTo to empty + NewSpanLinkSlice().CopyTo(dest) + assert.EqualValues(t, NewSpanLinkSlice(), dest) + + // Test CopyTo larger slice + generateTestSpanLinkSlice().CopyTo(dest) + assert.EqualValues(t, generateTestSpanLinkSlice(), dest) + + // Test CopyTo same size slice + generateTestSpanLinkSlice().CopyTo(dest) + assert.EqualValues(t, generateTestSpanLinkSlice(), dest) +} + +func TestSpanLinkSlice_Resize(t *testing.T) { + es := generateTestSpanLinkSlice() + emptyVal := NewSpanLink() + // Test Resize less elements. + const resizeSmallLen = 4 + expectedEs := make(map[*otlptrace.Span_Link]bool, resizeSmallLen) + for i := 0; i < resizeSmallLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, resizeSmallLen, len(expectedEs)) + es.Resize(resizeSmallLen) + assert.Equal(t, resizeSmallLen, es.Len()) + foundEs := make(map[*otlptrace.Span_Link]bool, resizeSmallLen) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test Resize more elements. + const resizeLargeLen = 7 + oldLen := es.Len() + expectedEs = make(map[*otlptrace.Span_Link]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.Resize(resizeLargeLen) + assert.Equal(t, resizeLargeLen, es.Len()) + foundEs = make(map[*otlptrace.Span_Link]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + for i := oldLen; i < resizeLargeLen; i++ { + assert.EqualValues(t, emptyVal, es.At(i)) + } + + // Test Resize 0 elements. + es.Resize(0) + assert.Equal(t, 0, es.Len()) +} + +func TestSpanLinkSlice_Append(t *testing.T) { + es := generateTestSpanLinkSlice() + + emptyVal := NewSpanLink() + es.Append(emptyVal) + assert.EqualValues(t, es.At(7).orig, emptyVal.orig) + + emptyVal2 := NewSpanLink() + es.Append(emptyVal2) + assert.EqualValues(t, es.At(8).orig, emptyVal2.orig) + + assert.Equal(t, 9, es.Len()) +} + +func TestSpanLink_CopyTo(t *testing.T) { + ms := NewSpanLink() + generateTestSpanLink().CopyTo(ms) + assert.EqualValues(t, generateTestSpanLink(), ms) +} + +func TestSpanLink_TraceID(t *testing.T) { + ms := NewSpanLink() + assert.EqualValues(t, NewTraceID([16]byte{}), ms.TraceID()) + testValTraceID := NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) + ms.SetTraceID(testValTraceID) + assert.EqualValues(t, testValTraceID, ms.TraceID()) +} + +func TestSpanLink_SpanID(t *testing.T) { + ms := NewSpanLink() + assert.EqualValues(t, NewSpanID([8]byte{}), ms.SpanID()) + testValSpanID := NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) + ms.SetSpanID(testValSpanID) + assert.EqualValues(t, testValSpanID, ms.SpanID()) +} + +func TestSpanLink_TraceState(t *testing.T) { + ms := NewSpanLink() + assert.EqualValues(t, TraceState(""), ms.TraceState()) + testValTraceState := TraceState("congo=congos") + ms.SetTraceState(testValTraceState) + assert.EqualValues(t, testValTraceState, ms.TraceState()) +} + +func TestSpanLink_Attributes(t *testing.T) { + ms := NewSpanLink() + assert.EqualValues(t, NewAttributeMap(), ms.Attributes()) + fillTestAttributeMap(ms.Attributes()) + testValAttributes := generateTestAttributeMap() + assert.EqualValues(t, testValAttributes, ms.Attributes()) +} + +func TestSpanLink_DroppedAttributesCount(t *testing.T) { + ms := NewSpanLink() + assert.EqualValues(t, uint32(0), ms.DroppedAttributesCount()) + testValDroppedAttributesCount := uint32(17) + ms.SetDroppedAttributesCount(testValDroppedAttributesCount) + assert.EqualValues(t, testValDroppedAttributesCount, ms.DroppedAttributesCount()) +} + +func TestSpanStatus_CopyTo(t *testing.T) { + ms := NewSpanStatus() + generateTestSpanStatus().CopyTo(ms) + assert.EqualValues(t, generateTestSpanStatus(), ms) +} + +func TestSpanStatus_Code(t *testing.T) { + ms := NewSpanStatus() + assert.EqualValues(t, StatusCode(0), ms.Code()) + testValCode := StatusCode(1) + ms.SetCode(testValCode) + assert.EqualValues(t, testValCode, ms.Code()) +} + +func TestSpanStatus_DeprecatedCode(t *testing.T) { + ms := NewSpanStatus() + assert.EqualValues(t, DeprecatedStatusCode(0), ms.DeprecatedCode()) + testValDeprecatedCode := DeprecatedStatusCode(1) + ms.SetDeprecatedCode(testValDeprecatedCode) + assert.EqualValues(t, testValDeprecatedCode, ms.DeprecatedCode()) +} + +func TestSpanStatus_Message(t *testing.T) { + ms := NewSpanStatus() + assert.EqualValues(t, "", ms.Message()) + testValMessage := "cancelled" + ms.SetMessage(testValMessage) + assert.EqualValues(t, testValMessage, ms.Message()) +} + +func generateTestResourceSpansSlice() ResourceSpansSlice { + tv := NewResourceSpansSlice() + fillTestResourceSpansSlice(tv) + return tv +} + +func fillTestResourceSpansSlice(tv ResourceSpansSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestResourceSpans(tv.At(i)) + } +} + +func generateTestResourceSpans() ResourceSpans { + tv := NewResourceSpans() + fillTestResourceSpans(tv) + return tv +} + +func fillTestResourceSpans(tv ResourceSpans) { + fillTestResource(tv.Resource()) + fillTestInstrumentationLibrarySpansSlice(tv.InstrumentationLibrarySpans()) +} + +func generateTestInstrumentationLibrarySpansSlice() InstrumentationLibrarySpansSlice { + tv := NewInstrumentationLibrarySpansSlice() + fillTestInstrumentationLibrarySpansSlice(tv) + return tv +} + +func fillTestInstrumentationLibrarySpansSlice(tv InstrumentationLibrarySpansSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestInstrumentationLibrarySpans(tv.At(i)) + } +} + +func generateTestInstrumentationLibrarySpans() InstrumentationLibrarySpans { + tv := NewInstrumentationLibrarySpans() + fillTestInstrumentationLibrarySpans(tv) + return tv +} + +func fillTestInstrumentationLibrarySpans(tv InstrumentationLibrarySpans) { + fillTestInstrumentationLibrary(tv.InstrumentationLibrary()) + fillTestSpanSlice(tv.Spans()) +} + +func generateTestSpanSlice() SpanSlice { + tv := NewSpanSlice() + fillTestSpanSlice(tv) + return tv +} + +func fillTestSpanSlice(tv SpanSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestSpan(tv.At(i)) + } +} + +func generateTestSpan() Span { + tv := NewSpan() + fillTestSpan(tv) + return tv +} + +func fillTestSpan(tv Span) { + tv.SetTraceID(NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})) + tv.SetSpanID(NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + tv.SetTraceState(TraceState("congo=congos")) + tv.SetParentSpanID(NewSpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})) + tv.SetName("test_name") + tv.SetKind(SpanKindSERVER) + tv.SetStartTime(TimestampUnixNano(1234567890)) + tv.SetEndTime(TimestampUnixNano(1234567890)) + fillTestAttributeMap(tv.Attributes()) + tv.SetDroppedAttributesCount(uint32(17)) + fillTestSpanEventSlice(tv.Events()) + tv.SetDroppedEventsCount(uint32(17)) + fillTestSpanLinkSlice(tv.Links()) + tv.SetDroppedLinksCount(uint32(17)) + fillTestSpanStatus(tv.Status()) +} + +func generateTestSpanEventSlice() SpanEventSlice { + tv := NewSpanEventSlice() + fillTestSpanEventSlice(tv) + return tv +} + +func fillTestSpanEventSlice(tv SpanEventSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestSpanEvent(tv.At(i)) + } +} + +func generateTestSpanEvent() SpanEvent { + tv := NewSpanEvent() + fillTestSpanEvent(tv) + return tv +} + +func fillTestSpanEvent(tv SpanEvent) { + tv.SetTimestamp(TimestampUnixNano(1234567890)) + tv.SetName("test_name") + fillTestAttributeMap(tv.Attributes()) + tv.SetDroppedAttributesCount(uint32(17)) +} + +func generateTestSpanLinkSlice() SpanLinkSlice { + tv := NewSpanLinkSlice() + fillTestSpanLinkSlice(tv) + return tv +} + +func fillTestSpanLinkSlice(tv SpanLinkSlice) { + tv.Resize(7) + for i := 0; i < tv.Len(); i++ { + fillTestSpanLink(tv.At(i)) + } +} + +func generateTestSpanLink() SpanLink { + tv := NewSpanLink() + fillTestSpanLink(tv) + return tv +} + +func fillTestSpanLink(tv SpanLink) { + tv.SetTraceID(NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})) + tv.SetSpanID(NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + tv.SetTraceState(TraceState("congo=congos")) + fillTestAttributeMap(tv.Attributes()) + tv.SetDroppedAttributesCount(uint32(17)) +} + +func generateTestSpanStatus() SpanStatus { + tv := NewSpanStatus() + fillTestSpanStatus(tv) + return tv +} + +func fillTestSpanStatus(tv SpanStatus) { + tv.SetCode(StatusCode(1)) + tv.SetDeprecatedCode(DeprecatedStatusCode(1)) + tv.SetMessage("cancelled") +} diff --git a/internal/otel_collector/consumer/pdata/log.go b/internal/otel_collector/consumer/pdata/log.go new file mode 100644 index 00000000000..fef6a4f2aa0 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/log.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "go.opentelemetry.io/collector/internal" + otlpcollectorlog "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" +) + +// This file defines in-memory data structures to represent logs. + +// Logs is the top-level struct that is propagated through the logs pipeline. +// +// This is a reference type (like builtin map). +// +// Must use NewLogs functions to create new instances. +// Important: zero-initialized instance is not valid for use. +type Logs struct { + orig *[]*otlplogs.ResourceLogs +} + +// NewLogs creates a new Logs. +func NewLogs() Logs { + orig := []*otlplogs.ResourceLogs(nil) + return Logs{&orig} +} + +// LogsFromInternalRep creates the internal Logs representation from the ProtoBuf. Should +// not be used outside this module. This is intended to be used only by OTLP exporter and +// File exporter, which legitimately need to work with OTLP Protobuf structs. +func LogsFromInternalRep(logs internal.OtlpLogsWrapper) Logs { + return Logs{logs.Orig} +} + +// InternalRep returns internal representation of the logs. Should not be used outside +// this module. This is intended to be used only by OTLP exporter and File exporter, +// which legitimately need to work with OTLP Protobuf structs. +func (ld Logs) InternalRep() internal.OtlpLogsWrapper { + return internal.OtlpLogsWrapper{Orig: ld.orig} +} + +// ToOtlpProtoBytes returns the internal Logs to OTLP Collector ExportTraceServiceRequest +// ProtoBuf bytes. This is intended to export OTLP Protobuf bytes for OTLP/HTTP transports. +func (ld Logs) ToOtlpProtoBytes() ([]byte, error) { + logs := otlpcollectorlog.ExportLogsServiceRequest{ + ResourceLogs: *ld.orig, + } + return logs.Marshal() +} + +// FromOtlpProtoBytes converts OTLP Collector ExportLogsServiceRequest +// ProtoBuf bytes to the internal Logs. Overrides current data. +// Calling this function on zero-initialized structure causes panic. +// Use it with NewLogs or on existing initialized Logs. +func (ld Logs) FromOtlpProtoBytes(data []byte) error { + logs := otlpcollectorlog.ExportLogsServiceRequest{} + if err := logs.Unmarshal(data); err != nil { + return err + } + *ld.orig = logs.ResourceLogs + return nil +} + +// Clone returns a copy of Logs. +func (ld Logs) Clone() Logs { + rls := NewResourceLogsSlice() + ld.ResourceLogs().CopyTo(rls) + return Logs(rls) +} + +// LogRecordCount calculates the total number of log records. +func (ld Logs) LogRecordCount() int { + logCount := 0 + rss := ld.ResourceLogs() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + ill := rs.InstrumentationLibraryLogs() + for i := 0; i < ill.Len(); i++ { + logs := ill.At(i) + logCount += logs.Logs().Len() + } + } + return logCount +} + +// SizeBytes returns the number of bytes in the internal representation of the +// logs. +func (ld Logs) SizeBytes() int { + size := 0 + for i := range *ld.orig { + size += (*ld.orig)[i].Size() + } + return size +} + +func (ld Logs) ResourceLogs() ResourceLogsSlice { + return ResourceLogsSlice(ld) +} + +// SeverityNumber is the public alias of otlplogs.SeverityNumber from internal package. +type SeverityNumber otlplogs.SeverityNumber + +const ( + SeverityNumberUNDEFINED = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED) + SeverityNumberTRACE = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE) + SeverityNumberTRACE2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE2) + SeverityNumberTRACE3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE3) + SeverityNumberTRACE4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE4) + SeverityNumberDEBUG = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG) + SeverityNumberDEBUG2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG2) + SeverityNumberDEBUG3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG3) + SeverityNumberDEBUG4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG4) + SeverityNumberINFO = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO) + SeverityNumberINFO2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO2) + SeverityNumberINFO3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO3) + SeverityNumberINFO4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO4) + SeverityNumberWARN = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN) + SeverityNumberWARN2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN2) + SeverityNumberWARN3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN3) + SeverityNumberWARN4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN4) + SeverityNumberERROR = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR) + SeverityNumberERROR2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR2) + SeverityNumberERROR3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR3) + SeverityNumberERROR4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR4) + SeverityNumberFATAL = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL) + SeverityNumberFATAL2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL2) + SeverityNumberFATAL3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL3) + SeverityNumberFATAL4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL4) +) diff --git a/internal/otel_collector/consumer/pdata/log_test.go b/internal/otel_collector/consumer/pdata/log_test.go new file mode 100644 index 00000000000..c0f35c34ab4 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/log_test.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/internal" + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" +) + +func TestLogRecordCount(t *testing.T) { + md := NewLogs() + assert.EqualValues(t, 0, md.LogRecordCount()) + + md.ResourceLogs().Resize(1) + assert.EqualValues(t, 0, md.LogRecordCount()) + + md.ResourceLogs().At(0).InstrumentationLibraryLogs().Resize(1) + assert.EqualValues(t, 0, md.LogRecordCount()) + + md.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().Resize(1) + assert.EqualValues(t, 1, md.LogRecordCount()) + + rms := md.ResourceLogs() + rms.Resize(3) + rms.At(0).InstrumentationLibraryLogs().Resize(1) + rms.At(0).InstrumentationLibraryLogs().At(0).Logs().Resize(1) + rms.At(1).InstrumentationLibraryLogs().Resize(1) + rms.At(2).InstrumentationLibraryLogs().Resize(1) + rms.At(2).InstrumentationLibraryLogs().At(0).Logs().Resize(5) + assert.EqualValues(t, 6, md.LogRecordCount()) +} + +func TestLogRecordCountWithEmpty(t *testing.T) { + assert.EqualValues(t, 0, LogsFromInternalRep(internal.LogsFromOtlp([]*otlplogs.ResourceLogs{{}})).LogRecordCount()) + assert.EqualValues(t, 0, LogsFromInternalRep(internal.LogsFromOtlp([]*otlplogs.ResourceLogs{ + { + InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{{}}, + }, + })).LogRecordCount()) + assert.EqualValues(t, 1, LogsFromInternalRep(internal.LogsFromOtlp([]*otlplogs.ResourceLogs{ + { + InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ + { + Logs: []*otlplogs.LogRecord{{}}, + }, + }, + }, + })).LogRecordCount()) +} + +func TestToFromLogProto(t *testing.T) { + otlp := []*otlplogs.ResourceLogs(nil) + td := LogsFromInternalRep(internal.LogsFromOtlp(otlp)) + assert.EqualValues(t, NewLogs(), td) + assert.EqualValues(t, otlp, *td.orig) +} + +func TestLogsToFromOtlpProtoBytes(t *testing.T) { + send := NewLogs() + fillTestResourceLogsSlice(send.ResourceLogs()) + bytes, err := send.ToOtlpProtoBytes() + assert.NoError(t, err) + + recv := NewLogs() + err = recv.FromOtlpProtoBytes(bytes) + assert.NoError(t, err) + assert.EqualValues(t, send, recv) +} + +func TestLogsFromInvalidOtlpProtoBytes(t *testing.T) { + err := NewLogs().FromOtlpProtoBytes([]byte{0xFF}) + assert.EqualError(t, err, "unexpected EOF") +} + +func TestLogsClone(t *testing.T) { + logs := NewLogs() + fillTestResourceLogsSlice(logs.ResourceLogs()) + assert.EqualValues(t, logs, logs.Clone()) +} + +func BenchmarkLogsClone(b *testing.B) { + logs := NewLogs() + fillTestResourceLogsSlice(logs.ResourceLogs()) + b.ResetTimer() + for n := 0; n < b.N; n++ { + clone := logs.Clone() + if clone.ResourceLogs().Len() != logs.ResourceLogs().Len() { + b.Fail() + } + } +} + +func BenchmarkLogsToOtlp(b *testing.B) { + traces := NewLogs() + fillTestResourceLogsSlice(traces.ResourceLogs()) + b.ResetTimer() + for n := 0; n < b.N; n++ { + buf, err := traces.ToOtlpProtoBytes() + require.NoError(b, err) + assert.NotEqual(b, 0, len(buf)) + } +} + +func BenchmarkLogsFromOtlp(b *testing.B) { + baseLogs := NewLogs() + fillTestResourceLogsSlice(baseLogs.ResourceLogs()) + buf, err := baseLogs.ToOtlpProtoBytes() + require.NoError(b, err) + assert.NotEqual(b, 0, len(buf)) + b.ResetTimer() + b.ReportAllocs() + for n := 0; n < b.N; n++ { + traces := NewLogs() + require.NoError(b, traces.FromOtlpProtoBytes(buf)) + assert.Equal(b, baseLogs.ResourceLogs().Len(), traces.ResourceLogs().Len()) + } +} diff --git a/internal/otel_collector/consumer/pdata/metric.go b/internal/otel_collector/consumer/pdata/metric.go new file mode 100644 index 00000000000..d415c9f2001 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/metric.go @@ -0,0 +1,314 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +type AggregationTemporality otlpmetrics.AggregationTemporality + +const ( + AggregationTemporalityUnspecified = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED) + AggregationTemporalityDelta = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA) + AggregationTemporalityCumulative = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE) +) + +func (at AggregationTemporality) String() string { + return otlpmetrics.AggregationTemporality(at).String() +} + +// Metrics is an opaque interface that allows transition to the new internal Metrics data, but also facilitate the +// transition to the new components especially for traces. +// +// Outside of the core repository the metrics pipeline cannot be converted to the new model since data.MetricData is +// part of the internal package. +type Metrics struct { + orig *[]*otlpmetrics.ResourceMetrics +} + +// NewMetricData creates a new MetricData. +func NewMetrics() Metrics { + orig := []*otlpmetrics.ResourceMetrics(nil) + return Metrics{&orig} +} + +// MetricDataFromOtlp creates the internal MetricData representation from the OTLP. +func MetricsFromOtlp(orig []*otlpmetrics.ResourceMetrics) Metrics { + return Metrics{&orig} +} + +// MetricDataToOtlp converts the internal MetricData to the OTLP. +func MetricsToOtlp(md Metrics) []*otlpmetrics.ResourceMetrics { + return *md.orig +} + +// ToOtlpProtoBytes returns the internal MetricData to the OTLP Collector +// ExportMetricsServiceRequest ProtoBuf bytes. This is intended to export +// OTLP Protobuf bytes for OTLP/HTTP transports. +func (md Metrics) ToOtlpProtoBytes() ([]byte, error) { + metrics := otlpcollectormetrics.ExportMetricsServiceRequest{ + ResourceMetrics: *md.orig, + } + return metrics.Marshal() +} + +// FromOtlpProtoBytes converts OTLP Collector ExportMetricsServiceRequest +// ProtoBuf bytes to the internal Metrics. Overrides current data. +// Calling this function on zero-initialized structure causes panic. +// Use it with NewMetrics or on existing initialized Metrics. +func (md Metrics) FromOtlpProtoBytes(data []byte) error { + metrics := otlpcollectormetrics.ExportMetricsServiceRequest{} + if err := metrics.Unmarshal(data); err != nil { + return err + } + *md.orig = metrics.ResourceMetrics + return nil +} + +// Clone returns a copy of MetricData. +func (md Metrics) Clone() Metrics { + rms := NewResourceMetricsSlice() + md.ResourceMetrics().CopyTo(rms) + return Metrics(rms) +} + +func (md Metrics) ResourceMetrics() ResourceMetricsSlice { + return newResourceMetricsSlice(md.orig) +} + +// MetricCount calculates the total number of metrics. +func (md Metrics) MetricCount() int { + metricCount := 0 + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + ilms := rm.InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + metricCount += ilm.Metrics().Len() + } + } + return metricCount +} + +// Size returns size in bytes. +func (md Metrics) Size() int { + size := 0 + for i := 0; i < len(*md.orig); i++ { + if (*md.orig)[i] == nil { + continue + } + size += (*(*md.orig)[i]).Size() + } + return size +} + +// MetricAndDataPointCount calculates the total number of metrics and data points. +func (md Metrics) MetricAndDataPointCount() (metricCount int, dataPointCount int) { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + ilms := rm.InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + metrics := ilm.Metrics() + metricCount += metrics.Len() + ms := ilm.Metrics() + for k := 0; k < ms.Len(); k++ { + m := ms.At(k) + switch m.DataType() { + case MetricDataTypeIntGauge: + dataPointCount += m.IntGauge().DataPoints().Len() + case MetricDataTypeDoubleGauge: + dataPointCount += m.DoubleGauge().DataPoints().Len() + case MetricDataTypeIntSum: + dataPointCount += m.IntSum().DataPoints().Len() + case MetricDataTypeDoubleSum: + dataPointCount += m.DoubleSum().DataPoints().Len() + case MetricDataTypeIntHistogram: + dataPointCount += m.IntHistogram().DataPoints().Len() + case MetricDataTypeDoubleHistogram: + dataPointCount += m.DoubleHistogram().DataPoints().Len() + case MetricDataTypeDoubleSummary: + dataPointCount += m.DoubleSummary().DataPoints().Len() + } + } + } + } + return +} + +// MetricDataType specifies the type of data in a Metric. +type MetricDataType int + +const ( + MetricDataTypeNone MetricDataType = iota + MetricDataTypeIntGauge + MetricDataTypeDoubleGauge + MetricDataTypeIntSum + MetricDataTypeDoubleSum + MetricDataTypeIntHistogram + MetricDataTypeDoubleHistogram + MetricDataTypeDoubleSummary +) + +func (mdt MetricDataType) String() string { + switch mdt { + case MetricDataTypeNone: + return "None" + case MetricDataTypeIntGauge: + return "IntGauge" + case MetricDataTypeDoubleGauge: + return "DoubleGauge" + case MetricDataTypeIntSum: + return "IntSum" + case MetricDataTypeDoubleSum: + return "DoubleSum" + case MetricDataTypeIntHistogram: + return "IntHistogram" + case MetricDataTypeDoubleHistogram: + return "DoubleHistogram" + case MetricDataTypeDoubleSummary: + return "DoubleSummary" + } + return "" +} + +// DataType returns the type of the data for this Metric. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) DataType() MetricDataType { + switch ms.orig.Data.(type) { + case *otlpmetrics.Metric_IntGauge: + return MetricDataTypeIntGauge + case *otlpmetrics.Metric_DoubleGauge: + return MetricDataTypeDoubleGauge + case *otlpmetrics.Metric_IntSum: + return MetricDataTypeIntSum + case *otlpmetrics.Metric_DoubleSum: + return MetricDataTypeDoubleSum + case *otlpmetrics.Metric_IntHistogram: + return MetricDataTypeIntHistogram + case *otlpmetrics.Metric_DoubleHistogram: + return MetricDataTypeDoubleHistogram + case *otlpmetrics.Metric_DoubleSummary: + return MetricDataTypeDoubleSummary + } + return MetricDataTypeNone +} + +// SetDataType clears any existing data and initialize it with an empty data of the given type. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) SetDataType(ty MetricDataType) { + switch ty { + case MetricDataTypeIntGauge: + ms.orig.Data = &otlpmetrics.Metric_IntGauge{IntGauge: &otlpmetrics.IntGauge{}} + case MetricDataTypeDoubleGauge: + ms.orig.Data = &otlpmetrics.Metric_DoubleGauge{DoubleGauge: &otlpmetrics.DoubleGauge{}} + case MetricDataTypeIntSum: + ms.orig.Data = &otlpmetrics.Metric_IntSum{IntSum: &otlpmetrics.IntSum{}} + case MetricDataTypeDoubleSum: + ms.orig.Data = &otlpmetrics.Metric_DoubleSum{DoubleSum: &otlpmetrics.DoubleSum{}} + case MetricDataTypeIntHistogram: + ms.orig.Data = &otlpmetrics.Metric_IntHistogram{IntHistogram: &otlpmetrics.IntHistogram{}} + case MetricDataTypeDoubleHistogram: + ms.orig.Data = &otlpmetrics.Metric_DoubleHistogram{DoubleHistogram: &otlpmetrics.DoubleHistogram{}} + case MetricDataTypeDoubleSummary: + ms.orig.Data = &otlpmetrics.Metric_DoubleSummary{DoubleSummary: &otlpmetrics.DoubleSummary{}} + } +} + +// IntGauge returns the data as IntGauge. +// Calling this function when DataType() != MetricDataTypeIntGauge will cause a panic. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) IntGauge() IntGauge { + return newIntGauge(ms.orig.Data.(*otlpmetrics.Metric_IntGauge).IntGauge) +} + +// DoubleGauge returns the data as DoubleGauge. +// Calling this function when DataType() != MetricDataTypeDoubleGauge will cause a panic. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) DoubleGauge() DoubleGauge { + return newDoubleGauge(ms.orig.Data.(*otlpmetrics.Metric_DoubleGauge).DoubleGauge) +} + +// IntSum returns the data as IntSum. +// Calling this function when DataType() != MetricDataTypeIntSum will cause a panic. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) IntSum() IntSum { + return newIntSum(ms.orig.Data.(*otlpmetrics.Metric_IntSum).IntSum) +} + +// DoubleSum returns the data as DoubleSum. +// Calling this function when DataType() != MetricDataTypeDoubleSum will cause a panic. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) DoubleSum() DoubleSum { + return newDoubleSum(ms.orig.Data.(*otlpmetrics.Metric_DoubleSum).DoubleSum) +} + +// IntHistogram returns the data as IntHistogram. +// Calling this function when DataType() != MetricDataTypeIntHistogram will cause a panic. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) IntHistogram() IntHistogram { + return newIntHistogram(ms.orig.Data.(*otlpmetrics.Metric_IntHistogram).IntHistogram) +} + +// DoubleHistogram returns the data as DoubleHistogram. +// Calling this function when DataType() != MetricDataTypeDoubleHistogram will cause a panic. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) DoubleHistogram() DoubleHistogram { + return newDoubleHistogram(ms.orig.Data.(*otlpmetrics.Metric_DoubleHistogram).DoubleHistogram) +} + +// DoubleSummary returns the data as DoubleSummary. +// Calling this function when DataType() != MetricDataTypeDoubleSummary will cause a panic. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) DoubleSummary() DoubleSummary { + return newDoubleSummary(ms.orig.Data.(*otlpmetrics.Metric_DoubleSummary).DoubleSummary) +} + +func copyData(src, dest *otlpmetrics.Metric) { + switch srcData := (src).Data.(type) { + case *otlpmetrics.Metric_IntGauge: + data := &otlpmetrics.Metric_IntGauge{IntGauge: &otlpmetrics.IntGauge{}} + newIntGauge(srcData.IntGauge).CopyTo(newIntGauge(data.IntGauge)) + dest.Data = data + case *otlpmetrics.Metric_DoubleGauge: + data := &otlpmetrics.Metric_DoubleGauge{DoubleGauge: &otlpmetrics.DoubleGauge{}} + newDoubleGauge(srcData.DoubleGauge).CopyTo(newDoubleGauge(data.DoubleGauge)) + dest.Data = data + case *otlpmetrics.Metric_IntSum: + data := &otlpmetrics.Metric_IntSum{IntSum: &otlpmetrics.IntSum{}} + newIntSum(srcData.IntSum).CopyTo(newIntSum(data.IntSum)) + dest.Data = data + case *otlpmetrics.Metric_DoubleSum: + data := &otlpmetrics.Metric_DoubleSum{DoubleSum: &otlpmetrics.DoubleSum{}} + newDoubleSum(srcData.DoubleSum).CopyTo(newDoubleSum(data.DoubleSum)) + dest.Data = data + case *otlpmetrics.Metric_IntHistogram: + data := &otlpmetrics.Metric_IntHistogram{IntHistogram: &otlpmetrics.IntHistogram{}} + newIntHistogram(srcData.IntHistogram).CopyTo(newIntHistogram(data.IntHistogram)) + dest.Data = data + case *otlpmetrics.Metric_DoubleHistogram: + data := &otlpmetrics.Metric_DoubleHistogram{DoubleHistogram: &otlpmetrics.DoubleHistogram{}} + newDoubleHistogram(srcData.DoubleHistogram).CopyTo(newDoubleHistogram(data.DoubleHistogram)) + dest.Data = data + case *otlpmetrics.Metric_DoubleSummary: + data := &otlpmetrics.Metric_DoubleSummary{DoubleSummary: &otlpmetrics.DoubleSummary{}} + newDoubleSummary(srcData.DoubleSummary).CopyTo(newDoubleSummary(data.DoubleSummary)) + dest.Data = data + } +} diff --git a/internal/otel_collector/consumer/pdata/metric_test.go b/internal/otel_collector/consumer/pdata/metric_test.go new file mode 100644 index 00000000000..70477a88558 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/metric_test.go @@ -0,0 +1,988 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "testing" + + gogoproto "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + goproto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" + + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" + otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" +) + +const ( + startTime = uint64(12578940000000012345) + endTime = uint64(12578940000000054321) +) + +func TestCopyData(t *testing.T) { + tests := []struct { + name string + src *otlpmetrics.Metric + }{ + { + name: "IntGauge", + src: &otlpmetrics.Metric{ + Data: &otlpmetrics.Metric_IntGauge{ + IntGauge: &otlpmetrics.IntGauge{}, + }, + }, + }, + { + name: "DoubleGauge", + src: &otlpmetrics.Metric{ + Data: &otlpmetrics.Metric_DoubleGauge{ + DoubleGauge: &otlpmetrics.DoubleGauge{}, + }, + }, + }, + { + name: "IntSum", + src: &otlpmetrics.Metric{ + Data: &otlpmetrics.Metric_IntSum{ + IntSum: &otlpmetrics.IntSum{}, + }, + }, + }, + { + name: "DoubleSum", + src: &otlpmetrics.Metric{ + Data: &otlpmetrics.Metric_DoubleSum{ + DoubleSum: &otlpmetrics.DoubleSum{}, + }, + }, + }, + { + name: "IntHistogram", + src: &otlpmetrics.Metric{ + Data: &otlpmetrics.Metric_IntHistogram{ + IntHistogram: &otlpmetrics.IntHistogram{}, + }, + }, + }, + { + name: "DoubleHistogram", + src: &otlpmetrics.Metric{ + Data: &otlpmetrics.Metric_DoubleHistogram{ + DoubleHistogram: &otlpmetrics.DoubleHistogram{}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + dest := &otlpmetrics.Metric{} + assert.Nil(t, dest.Data) + assert.NotNil(t, test.src.Data) + copyData(test.src, dest) + assert.EqualValues(t, test.src, dest) + }) + } +} + +func TestDataType(t *testing.T) { + m := NewMetric() + assert.Equal(t, MetricDataTypeNone, m.DataType()) + m.SetDataType(MetricDataTypeIntGauge) + assert.Equal(t, MetricDataTypeIntGauge, m.DataType()) + m.SetDataType(MetricDataTypeDoubleGauge) + assert.Equal(t, MetricDataTypeDoubleGauge, m.DataType()) + m.SetDataType(MetricDataTypeIntSum) + assert.Equal(t, MetricDataTypeIntSum, m.DataType()) + m.SetDataType(MetricDataTypeDoubleSum) + assert.Equal(t, MetricDataTypeDoubleSum, m.DataType()) + m.SetDataType(MetricDataTypeIntHistogram) + assert.Equal(t, MetricDataTypeIntHistogram, m.DataType()) + m.SetDataType(MetricDataTypeDoubleHistogram) + assert.Equal(t, MetricDataTypeDoubleHistogram, m.DataType()) + m.SetDataType(MetricDataTypeDoubleSummary) + assert.Equal(t, MetricDataTypeDoubleSummary, m.DataType()) +} + +func TestResourceMetricsWireCompatibility(t *testing.T) { + // This test verifies that OTLP ProtoBufs generated using goproto lib in + // opentelemetry-proto repository OTLP ProtoBufs generated using gogoproto lib in + // this repository are wire compatible. + + // Generate ResourceMetrics as pdata struct. + pdataRM := generateTestResourceMetrics() + + // Marshal its underlying ProtoBuf to wire. + wire1, err := gogoproto.Marshal(pdataRM.orig) + assert.NoError(t, err) + assert.NotNil(t, wire1) + + // Unmarshal from the wire to OTLP Protobuf in goproto's representation. + var goprotoMessage emptypb.Empty + err = goproto.Unmarshal(wire1, &goprotoMessage) + assert.NoError(t, err) + + // Marshal to the wire again. + wire2, err := goproto.Marshal(&goprotoMessage) + assert.NoError(t, err) + assert.NotNil(t, wire2) + + // Unmarshal from the wire into gogoproto's representation. + var gogoprotoRM otlpmetrics.ResourceMetrics + err = gogoproto.Unmarshal(wire2, &gogoprotoRM) + assert.NoError(t, err) + + // Now compare that the original and final ProtoBuf messages are the same. + // This proves that goproto and gogoproto marshaling/unmarshaling are wire compatible. + assert.True(t, assert.EqualValues(t, pdataRM.orig, &gogoprotoRM)) +} + +func TestMetricCount(t *testing.T) { + md := NewMetrics() + assert.EqualValues(t, 0, md.MetricCount()) + + md.ResourceMetrics().Resize(1) + assert.EqualValues(t, 0, md.MetricCount()) + + md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().Resize(1) + assert.EqualValues(t, 0, md.MetricCount()) + + md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().Resize(1) + assert.EqualValues(t, 1, md.MetricCount()) + + rms := md.ResourceMetrics() + rms.Resize(3) + rms.At(0).InstrumentationLibraryMetrics().Resize(1) + rms.At(0).InstrumentationLibraryMetrics().At(0).Metrics().Resize(1) + rms.At(1).InstrumentationLibraryMetrics().Resize(1) + rms.At(2).InstrumentationLibraryMetrics().Resize(1) + rms.At(2).InstrumentationLibraryMetrics().At(0).Metrics().Resize(5) + assert.EqualValues(t, 6, md.MetricCount()) +} + +func TestMetricSize(t *testing.T) { + md := NewMetrics() + assert.Equal(t, 0, md.Size()) + rms := md.ResourceMetrics() + rms.Resize(1) + rms.At(0).InstrumentationLibraryMetrics().Resize(1) + rms.At(0).InstrumentationLibraryMetrics().At(0).Metrics().Resize(1) + metric := rms.At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) + metric.SetDataType(MetricDataTypeDoubleHistogram) + doubleHistogram := metric.DoubleHistogram() + doubleHistogram.DataPoints().Resize(1) + doubleHistogram.DataPoints().At(0).SetCount(123) + doubleHistogram.DataPoints().At(0).SetSum(123) + otlp := MetricsToOtlp(md) + size := 0 + sizeBytes := 0 + for _, rmerics := range otlp { + size += rmerics.Size() + bts, err := rmerics.Marshal() + require.NoError(t, err) + sizeBytes += len(bts) + } + assert.Equal(t, size, md.Size()) + assert.Equal(t, sizeBytes, md.Size()) +} + +func TestMetricsSizeWithNil(t *testing.T) { + assert.Equal(t, 0, MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{nil}).Size()) +} + +func TestMetricCountWithEmpty(t *testing.T) { + assert.EqualValues(t, 0, MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{{}}).MetricCount()) + assert.EqualValues(t, 0, MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{{}}, + }, + }).MetricCount()) + assert.EqualValues(t, 1, MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{{}}, + }, + }, + }, + }).MetricCount()) +} + +func TestMetricAndDataPointCount(t *testing.T) { + md := NewMetrics() + ms, dps := md.MetricAndDataPointCount() + assert.EqualValues(t, 0, ms) + assert.EqualValues(t, 0, dps) + + rms := md.ResourceMetrics() + rms.Resize(1) + ms, dps = md.MetricAndDataPointCount() + assert.EqualValues(t, 0, ms) + assert.EqualValues(t, 0, dps) + + ilms := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics() + ilms.Resize(1) + ms, dps = md.MetricAndDataPointCount() + assert.EqualValues(t, 0, ms) + assert.EqualValues(t, 0, dps) + + ilms.At(0).Metrics().Resize(1) + ms, dps = md.MetricAndDataPointCount() + assert.EqualValues(t, 1, ms) + assert.EqualValues(t, 0, dps) + ilms.At(0).Metrics().At(0).SetDataType(MetricDataTypeIntSum) + intSum := ilms.At(0).Metrics().At(0).IntSum() + intSum.DataPoints().Resize(3) + _, dps = md.MetricAndDataPointCount() + assert.EqualValues(t, 3, dps) + + md = NewMetrics() + rms = md.ResourceMetrics() + rms.Resize(3) + rms.At(0).InstrumentationLibraryMetrics().Resize(1) + rms.At(0).InstrumentationLibraryMetrics().At(0).Metrics().Resize(1) + rms.At(1).InstrumentationLibraryMetrics().Resize(1) + rms.At(2).InstrumentationLibraryMetrics().Resize(1) + ilms = rms.At(2).InstrumentationLibraryMetrics() + ilms.Resize(1) + ilms.At(0).Metrics().Resize(5) + ms, dps = md.MetricAndDataPointCount() + assert.EqualValues(t, 6, ms) + assert.EqualValues(t, 0, dps) + ilms.At(0).Metrics().At(1).SetDataType(MetricDataTypeDoubleGauge) + doubleGauge := ilms.At(0).Metrics().At(1).DoubleGauge() + doubleGauge.DataPoints().Resize(1) + ilms.At(0).Metrics().At(3).SetDataType(MetricDataTypeIntHistogram) + intHistogram := ilms.At(0).Metrics().At(3).IntHistogram() + intHistogram.DataPoints().Resize(3) + ms, dps = md.MetricAndDataPointCount() + assert.EqualValues(t, 6, ms) + assert.EqualValues(t, 4, dps) +} + +func TestMetricAndDataPointCountWithEmpty(t *testing.T) { + ms, dps := MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{{}}).MetricAndDataPointCount() + assert.EqualValues(t, 0, ms) + assert.EqualValues(t, 0, dps) + + ms, dps = MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{{}}, + }, + }).MetricAndDataPointCount() + assert.EqualValues(t, 0, ms) + assert.EqualValues(t, 0, dps) + + ms, dps = MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{{}}, + }, + }, + }, + }).MetricAndDataPointCount() + assert.EqualValues(t, 1, ms) + assert.EqualValues(t, 0, dps) + + ms, dps = MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{{ + Data: &otlpmetrics.Metric_DoubleGauge{ + DoubleGauge: &otlpmetrics.DoubleGauge{ + DataPoints: []*otlpmetrics.DoubleDataPoint{ + {}, + }, + }, + }, + }}, + }, + }, + }, + }).MetricAndDataPointCount() + assert.EqualValues(t, 1, ms) + assert.EqualValues(t, 1, dps) + +} + +func TestMetricAndDataPointCountWithNilDataPoints(t *testing.T) { + metrics := NewMetrics() + metrics.ResourceMetrics().Resize(1) + rm := metrics.ResourceMetrics().At(0) + rm.InstrumentationLibraryMetrics().Resize(1) + ilm := rm.InstrumentationLibraryMetrics().At(0) + intGauge := NewMetric() + ilm.Metrics().Append(intGauge) + intGauge.SetDataType(MetricDataTypeIntGauge) + doubleGauge := NewMetric() + ilm.Metrics().Append(doubleGauge) + doubleGauge.SetDataType(MetricDataTypeDoubleGauge) + intHistogram := NewMetric() + ilm.Metrics().Append(intHistogram) + intHistogram.SetDataType(MetricDataTypeIntHistogram) + doubleHistogram := NewMetric() + ilm.Metrics().Append(doubleHistogram) + doubleHistogram.SetDataType(MetricDataTypeDoubleHistogram) + intSum := NewMetric() + ilm.Metrics().Append(intSum) + intSum.SetDataType(MetricDataTypeIntSum) + doubleSum := NewMetric() + ilm.Metrics().Append(doubleSum) + doubleSum.SetDataType(MetricDataTypeDoubleSum) + + ms, dps := metrics.MetricAndDataPointCount() + + assert.EqualValues(t, 6, ms) + assert.EqualValues(t, 0, dps) +} + +func TestOtlpToInternalReadOnly(t *testing.T) { + metricData := MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoIntGaugeMetric(), generateTestProtoDoubleSumMetric(), generateTestProtoDoubleHistogramMetric()}, + }, + }, + }, + }) + resourceMetrics := metricData.ResourceMetrics() + assert.EqualValues(t, 1, resourceMetrics.Len()) + + resourceMetric := resourceMetrics.At(0) + assert.EqualValues(t, NewAttributeMap().InitFromMap(map[string]AttributeValue{ + "string": NewAttributeValueString("string-resource"), + }), resourceMetric.Resource().Attributes()) + metrics := resourceMetric.InstrumentationLibraryMetrics().At(0).Metrics() + assert.EqualValues(t, 3, metrics.Len()) + + // Check int64 metric + metricInt := metrics.At(0) + assert.EqualValues(t, "my_metric_int", metricInt.Name()) + assert.EqualValues(t, "My metric", metricInt.Description()) + assert.EqualValues(t, "ms", metricInt.Unit()) + assert.EqualValues(t, MetricDataTypeIntGauge, metricInt.DataType()) + int64DataPoints := metricInt.IntGauge().DataPoints() + assert.EqualValues(t, 2, int64DataPoints.Len()) + // First point + assert.EqualValues(t, startTime, int64DataPoints.At(0).StartTime()) + assert.EqualValues(t, endTime, int64DataPoints.At(0).Timestamp()) + assert.EqualValues(t, 123, int64DataPoints.At(0).Value()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"key0": "value0"}), int64DataPoints.At(0).LabelsMap()) + // Second point + assert.EqualValues(t, startTime, int64DataPoints.At(1).StartTime()) + assert.EqualValues(t, endTime, int64DataPoints.At(1).Timestamp()) + assert.EqualValues(t, 456, int64DataPoints.At(1).Value()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"key1": "value1"}), int64DataPoints.At(1).LabelsMap()) + + // Check double metric + metricDouble := metrics.At(1) + assert.EqualValues(t, "my_metric_double", metricDouble.Name()) + assert.EqualValues(t, "My metric", metricDouble.Description()) + assert.EqualValues(t, "ms", metricDouble.Unit()) + assert.EqualValues(t, MetricDataTypeDoubleSum, metricDouble.DataType()) + dsd := metricDouble.DoubleSum() + assert.EqualValues(t, AggregationTemporalityCumulative, dsd.AggregationTemporality()) + doubleDataPoints := dsd.DataPoints() + assert.EqualValues(t, 2, doubleDataPoints.Len()) + // First point + assert.EqualValues(t, startTime, doubleDataPoints.At(0).StartTime()) + assert.EqualValues(t, endTime, doubleDataPoints.At(0).Timestamp()) + assert.EqualValues(t, 123.1, doubleDataPoints.At(0).Value()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"key0": "value0"}), doubleDataPoints.At(0).LabelsMap()) + // Second point + assert.EqualValues(t, startTime, doubleDataPoints.At(1).StartTime()) + assert.EqualValues(t, endTime, doubleDataPoints.At(1).Timestamp()) + assert.EqualValues(t, 456.1, doubleDataPoints.At(1).Value()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"key1": "value1"}), doubleDataPoints.At(1).LabelsMap()) + + // Check histogram metric + metricHistogram := metrics.At(2) + assert.EqualValues(t, "my_metric_histogram", metricHistogram.Name()) + assert.EqualValues(t, "My metric", metricHistogram.Description()) + assert.EqualValues(t, "ms", metricHistogram.Unit()) + assert.EqualValues(t, MetricDataTypeDoubleHistogram, metricHistogram.DataType()) + dhd := metricHistogram.DoubleHistogram() + assert.EqualValues(t, AggregationTemporalityDelta, dhd.AggregationTemporality()) + histogramDataPoints := dhd.DataPoints() + assert.EqualValues(t, 2, histogramDataPoints.Len()) + // First point + assert.EqualValues(t, startTime, histogramDataPoints.At(0).StartTime()) + assert.EqualValues(t, endTime, histogramDataPoints.At(0).Timestamp()) + assert.EqualValues(t, []float64{1, 2}, histogramDataPoints.At(0).ExplicitBounds()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"key0": "value0"}), histogramDataPoints.At(0).LabelsMap()) + assert.EqualValues(t, []uint64{10, 15, 1}, histogramDataPoints.At(0).BucketCounts()) + // Second point + assert.EqualValues(t, startTime, histogramDataPoints.At(1).StartTime()) + assert.EqualValues(t, endTime, histogramDataPoints.At(1).Timestamp()) + assert.EqualValues(t, []float64{1}, histogramDataPoints.At(1).ExplicitBounds()) + assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{"key1": "value1"}), histogramDataPoints.At(1).LabelsMap()) + assert.EqualValues(t, []uint64{10, 1}, histogramDataPoints.At(1).BucketCounts()) +} + +func TestOtlpToFromInternalReadOnly(t *testing.T) { + metricData := MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoIntGaugeMetric(), generateTestProtoDoubleSumMetric(), generateTestProtoDoubleHistogramMetric()}, + }, + }, + }, + }) + // Test that nothing changed + assert.EqualValues(t, []*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoIntGaugeMetric(), generateTestProtoDoubleSumMetric(), generateTestProtoDoubleHistogramMetric()}, + }, + }, + }, + }, MetricsToOtlp(metricData)) +} + +func TestOtlpToFromInternalIntGaugeMutating(t *testing.T) { + newLabels := NewStringMap().InitFromMap(map[string]string{"k": "v"}) + + metricData := MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoIntGaugeMetric()}, + }, + }, + }, + }) + resourceMetrics := metricData.ResourceMetrics() + metric := resourceMetrics.At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) + // Mutate MetricDescriptor + metric.SetName("new_my_metric_int") + assert.EqualValues(t, "new_my_metric_int", metric.Name()) + metric.SetDescription("My new metric") + assert.EqualValues(t, "My new metric", metric.Description()) + metric.SetUnit("1") + assert.EqualValues(t, "1", metric.Unit()) + // Mutate DataPoints + igd := metric.IntGauge() + assert.EqualValues(t, 2, igd.DataPoints().Len()) + igd.DataPoints().Resize(1) + assert.EqualValues(t, 1, igd.DataPoints().Len()) + int64DataPoints := igd.DataPoints() + int64DataPoints.At(0).SetStartTime(TimestampUnixNano(startTime + 1)) + assert.EqualValues(t, startTime+1, int64DataPoints.At(0).StartTime()) + int64DataPoints.At(0).SetTimestamp(TimestampUnixNano(endTime + 1)) + assert.EqualValues(t, endTime+1, int64DataPoints.At(0).Timestamp()) + int64DataPoints.At(0).SetValue(124) + assert.EqualValues(t, 124, int64DataPoints.At(0).Value()) + int64DataPoints.At(0).LabelsMap().Delete("key0") + int64DataPoints.At(0).LabelsMap().Upsert("k", "v") + assert.EqualValues(t, newLabels, int64DataPoints.At(0).LabelsMap()) + + // Test that everything is updated. + assert.EqualValues(t, []*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{ + { + Name: "new_my_metric_int", + Description: "My new metric", + Unit: "1", + Data: &otlpmetrics.Metric_IntGauge{ + IntGauge: &otlpmetrics.IntGauge{ + DataPoints: []*otlpmetrics.IntDataPoint{ + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "k", + Value: "v", + }, + }, + StartTimeUnixNano: startTime + 1, + TimeUnixNano: endTime + 1, + Value: 124, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, MetricsToOtlp(metricData)) +} + +func TestOtlpToFromInternalDoubleSumMutating(t *testing.T) { + newLabels := NewStringMap().InitFromMap(map[string]string{"k": "v"}) + + metricData := MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoDoubleSumMetric()}, + }, + }, + }, + }) + resourceMetrics := metricData.ResourceMetrics() + metric := resourceMetrics.At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) + // Mutate MetricDescriptor + metric.SetName("new_my_metric_double") + assert.EqualValues(t, "new_my_metric_double", metric.Name()) + metric.SetDescription("My new metric") + assert.EqualValues(t, "My new metric", metric.Description()) + metric.SetUnit("1") + assert.EqualValues(t, "1", metric.Unit()) + // Mutate DataPoints + dsd := metric.DoubleSum() + assert.EqualValues(t, 2, dsd.DataPoints().Len()) + dsd.DataPoints().Resize(1) + assert.EqualValues(t, 1, dsd.DataPoints().Len()) + doubleDataPoints := dsd.DataPoints() + doubleDataPoints.At(0).SetStartTime(TimestampUnixNano(startTime + 1)) + assert.EqualValues(t, startTime+1, doubleDataPoints.At(0).StartTime()) + doubleDataPoints.At(0).SetTimestamp(TimestampUnixNano(endTime + 1)) + assert.EqualValues(t, endTime+1, doubleDataPoints.At(0).Timestamp()) + doubleDataPoints.At(0).SetValue(124.1) + assert.EqualValues(t, 124.1, doubleDataPoints.At(0).Value()) + doubleDataPoints.At(0).LabelsMap().Delete("key0") + doubleDataPoints.At(0).LabelsMap().Upsert("k", "v") + assert.EqualValues(t, newLabels, doubleDataPoints.At(0).LabelsMap()) + + // Test that everything is updated. + assert.EqualValues(t, []*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{ + { + Name: "new_my_metric_double", + Description: "My new metric", + Unit: "1", + Data: &otlpmetrics.Metric_DoubleSum{ + DoubleSum: &otlpmetrics.DoubleSum{ + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + DataPoints: []*otlpmetrics.DoubleDataPoint{ + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "k", + Value: "v", + }, + }, + StartTimeUnixNano: startTime + 1, + TimeUnixNano: endTime + 1, + Value: 124.1, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, MetricsToOtlp(metricData)) +} + +func TestOtlpToFromInternalHistogramMutating(t *testing.T) { + newLabels := NewStringMap().InitFromMap(map[string]string{"k": "v"}) + + metricData := MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoDoubleHistogramMetric()}, + }, + }, + }, + }) + resourceMetrics := metricData.ResourceMetrics() + metric := resourceMetrics.At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) + // Mutate MetricDescriptor + metric.SetName("new_my_metric_histogram") + assert.EqualValues(t, "new_my_metric_histogram", metric.Name()) + metric.SetDescription("My new metric") + assert.EqualValues(t, "My new metric", metric.Description()) + metric.SetUnit("1") + assert.EqualValues(t, "1", metric.Unit()) + // Mutate DataPoints + dhd := metric.DoubleHistogram() + assert.EqualValues(t, 2, dhd.DataPoints().Len()) + dhd.DataPoints().Resize(1) + assert.EqualValues(t, 1, dhd.DataPoints().Len()) + histogramDataPoints := dhd.DataPoints() + histogramDataPoints.At(0).SetStartTime(TimestampUnixNano(startTime + 1)) + assert.EqualValues(t, startTime+1, histogramDataPoints.At(0).StartTime()) + histogramDataPoints.At(0).SetTimestamp(TimestampUnixNano(endTime + 1)) + assert.EqualValues(t, endTime+1, histogramDataPoints.At(0).Timestamp()) + histogramDataPoints.At(0).LabelsMap().Delete("key0") + histogramDataPoints.At(0).LabelsMap().Upsert("k", "v") + assert.EqualValues(t, newLabels, histogramDataPoints.At(0).LabelsMap()) + histogramDataPoints.At(0).SetExplicitBounds([]float64{1}) + assert.EqualValues(t, []float64{1}, histogramDataPoints.At(0).ExplicitBounds()) + histogramDataPoints.At(0).SetBucketCounts([]uint64{21, 32}) + // Test that everything is updated. + assert.EqualValues(t, []*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{ + { + Name: "new_my_metric_histogram", + Description: "My new metric", + Unit: "1", + Data: &otlpmetrics.Metric_DoubleHistogram{ + DoubleHistogram: &otlpmetrics.DoubleHistogram{ + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + DataPoints: []*otlpmetrics.DoubleHistogramDataPoint{ + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "k", + Value: "v", + }, + }, + StartTimeUnixNano: startTime + 1, + TimeUnixNano: endTime + 1, + BucketCounts: []uint64{21, 32}, + ExplicitBounds: []float64{1}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, MetricsToOtlp(metricData)) +} + +func TestMetricsToFromOtlpProtoBytes(t *testing.T) { + send := NewMetrics() + fillTestResourceMetricsSlice(send.ResourceMetrics()) + bytes, err := send.ToOtlpProtoBytes() + assert.NoError(t, err) + + recv := NewMetrics() + err = recv.FromOtlpProtoBytes(bytes) + assert.NoError(t, err) + assert.EqualValues(t, send, recv) +} + +func TestMetricsFromInvalidOtlpProtoBytes(t *testing.T) { + err := NewMetrics().FromOtlpProtoBytes([]byte{0xFF}) + assert.EqualError(t, err, "unexpected EOF") +} + +func TestMetricsClone(t *testing.T) { + metrics := NewMetrics() + fillTestResourceMetricsSlice(metrics.ResourceMetrics()) + assert.EqualValues(t, metrics, metrics.Clone()) +} + +func BenchmarkMetricsClone(b *testing.B) { + metrics := NewMetrics() + fillTestResourceMetricsSlice(metrics.ResourceMetrics()) + b.ResetTimer() + for n := 0; n < b.N; n++ { + clone := metrics.Clone() + if clone.ResourceMetrics().Len() != metrics.ResourceMetrics().Len() { + b.Fail() + } + } +} + +func BenchmarkOtlpToFromInternal_PassThrough(b *testing.B) { + resourceMetricsList := []*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoIntGaugeMetric(), generateTestProtoDoubleSumMetric(), generateTestProtoDoubleHistogramMetric()}, + }, + }, + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + md := MetricsFromOtlp(resourceMetricsList) + MetricsToOtlp(md) + } +} + +func BenchmarkOtlpToFromInternal_IntGauge_MutateOneLabel(b *testing.B) { + resourceMetricsList := []*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoIntGaugeMetric()}, + }, + }, + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + md := MetricsFromOtlp(resourceMetricsList) + md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).IntGauge().DataPoints().At(0).LabelsMap().Upsert("key0", "value2") + MetricsToOtlp(md) + } +} + +func BenchmarkOtlpToFromInternal_DoubleSum_MutateOneLabel(b *testing.B) { + resourceMetricsList := []*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoDoubleSumMetric()}, + }, + }, + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + md := MetricsFromOtlp(resourceMetricsList) + md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).DoubleSum().DataPoints().At(0).LabelsMap().Upsert("key0", "value2") + MetricsToOtlp(md) + } +} + +func BenchmarkOtlpToFromInternal_HistogramPoints_MutateOneLabel(b *testing.B) { + resourceMetricsList := []*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoDoubleHistogramMetric()}, + }, + }, + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + md := MetricsFromOtlp(resourceMetricsList) + md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).DoubleHistogram().DataPoints().At(0).LabelsMap().Upsert("key0", "value2") + MetricsToOtlp(md) + } +} + +func BenchmarkMetrics_ToOtlpProtoBytes_PassThrough(b *testing.B) { + metrics := MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{ + { + Resource: generateTestProtoResource(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + InstrumentationLibrary: generateTestProtoInstrumentationLibrary(), + Metrics: []*otlpmetrics.Metric{generateTestProtoIntGaugeMetric(), generateTestProtoDoubleSumMetric(), generateTestProtoDoubleHistogramMetric()}, + }, + }, + }, + }) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, _ = metrics.ToOtlpProtoBytes() + } +} + +func BenchmarkMetricsToOtlp(b *testing.B) { + traces := NewMetrics() + fillTestResourceMetricsSlice(traces.ResourceMetrics()) + b.ResetTimer() + for n := 0; n < b.N; n++ { + buf, err := traces.ToOtlpProtoBytes() + require.NoError(b, err) + assert.NotEqual(b, 0, len(buf)) + } +} + +func BenchmarkMetricsFromOtlp(b *testing.B) { + baseMetrics := NewMetrics() + fillTestResourceMetricsSlice(baseMetrics.ResourceMetrics()) + buf, err := baseMetrics.ToOtlpProtoBytes() + require.NoError(b, err) + assert.NotEqual(b, 0, len(buf)) + b.ResetTimer() + b.ReportAllocs() + for n := 0; n < b.N; n++ { + traces := NewMetrics() + require.NoError(b, traces.FromOtlpProtoBytes(buf)) + assert.Equal(b, baseMetrics.ResourceMetrics().Len(), traces.ResourceMetrics().Len()) + } +} + +func generateTestProtoResource() otlpresource.Resource { + return otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "string", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "string-resource"}}, + }, + }, + } +} + +func generateTestProtoInstrumentationLibrary() otlpcommon.InstrumentationLibrary { + return otlpcommon.InstrumentationLibrary{ + Name: "test", + Version: "", + } +} + +func generateTestProtoIntGaugeMetric() *otlpmetrics.Metric { + return &otlpmetrics.Metric{ + Name: "my_metric_int", + Description: "My metric", + Unit: "ms", + Data: &otlpmetrics.Metric_IntGauge{ + IntGauge: &otlpmetrics.IntGauge{ + DataPoints: []*otlpmetrics.IntDataPoint{ + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "key0", + Value: "value0", + }, + }, + StartTimeUnixNano: startTime, + TimeUnixNano: endTime, + Value: 123, + }, + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "key1", + Value: "value1", + }, + }, + StartTimeUnixNano: startTime, + TimeUnixNano: endTime, + Value: 456, + }, + }, + }, + }, + } +} +func generateTestProtoDoubleSumMetric() *otlpmetrics.Metric { + return &otlpmetrics.Metric{ + Name: "my_metric_double", + Description: "My metric", + Unit: "ms", + Data: &otlpmetrics.Metric_DoubleSum{ + DoubleSum: &otlpmetrics.DoubleSum{ + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + DataPoints: []*otlpmetrics.DoubleDataPoint{ + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "key0", + Value: "value0", + }, + }, + StartTimeUnixNano: startTime, + TimeUnixNano: endTime, + Value: 123.1, + }, + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "key1", + Value: "value1", + }, + }, + StartTimeUnixNano: startTime, + TimeUnixNano: endTime, + Value: 456.1, + }, + }, + }, + }, + } +} + +func generateTestProtoDoubleHistogramMetric() *otlpmetrics.Metric { + return &otlpmetrics.Metric{ + Name: "my_metric_histogram", + Description: "My metric", + Unit: "ms", + Data: &otlpmetrics.Metric_DoubleHistogram{ + DoubleHistogram: &otlpmetrics.DoubleHistogram{ + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + DataPoints: []*otlpmetrics.DoubleHistogramDataPoint{ + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "key0", + Value: "value0", + }, + }, + StartTimeUnixNano: startTime, + TimeUnixNano: endTime, + BucketCounts: []uint64{10, 15, 1}, + ExplicitBounds: []float64{1, 2}, + }, + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "key1", + Value: "value1", + }, + }, + StartTimeUnixNano: startTime, + TimeUnixNano: endTime, + BucketCounts: []uint64{10, 1}, + ExplicitBounds: []float64{1}, + }, + }, + }, + }, + } +} diff --git a/internal/otel_collector/consumer/pdata/spanid.go b/internal/otel_collector/consumer/pdata/spanid.go new file mode 100644 index 00000000000..52a35a1bfdd --- /dev/null +++ b/internal/otel_collector/consumer/pdata/spanid.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "go.opentelemetry.io/collector/internal/data" +) + +// SpanID is an alias of OTLP SpanID data type. +type SpanID data.SpanID + +func InvalidSpanID() SpanID { + return SpanID(data.NewSpanID([8]byte{})) +} + +func NewSpanID(bytes [8]byte) SpanID { + return SpanID(data.NewSpanID(bytes)) +} + +// Bytes returns the byte array representation of the SpanID. +func (t SpanID) Bytes() [8]byte { + return data.SpanID(t).Bytes() +} + +// HexString returns hex representation of the SpanID. +func (t SpanID) HexString() string { + return data.SpanID(t).HexString() +} + +// IsValid returns true if id contains at leas one non-zero byte. +func (t SpanID) IsValid() bool { + return data.SpanID(t).IsValid() +} diff --git a/internal/otel_collector/consumer/pdata/timestamp.go b/internal/otel_collector/consumer/pdata/timestamp.go new file mode 100644 index 00000000000..6178ae55165 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/timestamp.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "time" + + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TimestampToUnixNano(ts *timestamppb.Timestamp) (t TimestampUnixNano) { + if ts == nil { + return + } + return TimestampUnixNano(uint64(ts.AsTime().UnixNano())) +} + +func UnixNanoToTimestamp(u TimestampUnixNano) *timestamppb.Timestamp { + // 0 is a special case and want to make sure we return nil. + if u == 0 { + return nil + } + return timestamppb.New(UnixNanoToTime(u)) +} + +func UnixNanoToTime(u TimestampUnixNano) time.Time { + // 0 is a special case and want to make sure we return a time that IsZero() returns true. + if u == 0 { + return time.Time{} + } + return time.Unix(0, int64(u)).UTC() +} + +func TimeToUnixNano(t time.Time) TimestampUnixNano { + // 0 is a special case and want to make sure we return zero timestamp to support inverse function for UnixNanoToTime + if t.IsZero() { + return 0 + } + return TimestampUnixNano(uint64(t.UnixNano())) +} diff --git a/internal/otel_collector/consumer/pdata/timestamp_test.go b/internal/otel_collector/consumer/pdata/timestamp_test.go new file mode 100644 index 00000000000..eb868b677eb --- /dev/null +++ b/internal/otel_collector/consumer/pdata/timestamp_test.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestUnixNanosConverters(t *testing.T) { + t1 := time.Date(2020, 03, 24, 1, 13, 23, 789, time.UTC) + tun := TimestampUnixNano(t1.UnixNano()) + + assert.EqualValues(t, uint64(1585012403000000789), tun) + tp := UnixNanoToTimestamp(tun) + assert.EqualValues(t, ×tamppb.Timestamp{Seconds: 1585012403, Nanos: 789}, tp) + assert.EqualValues(t, tun, TimestampToUnixNano(tp)) + assert.EqualValues(t, tun, TimeToUnixNano(t1)) + assert.EqualValues(t, t1, UnixNanoToTime(TimeToUnixNano(t1))) +} + +func TestZeroTimestamps(t *testing.T) { + assert.Zero(t, TimestampToUnixNano(nil)) + assert.Nil(t, UnixNanoToTimestamp(0)) + assert.True(t, UnixNanoToTime(0).IsZero()) + assert.Zero(t, TimeToUnixNano(time.Time{})) +} diff --git a/internal/otel_collector/consumer/pdata/trace.go b/internal/otel_collector/consumer/pdata/trace.go new file mode 100644 index 00000000000..0ccb4078749 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/trace.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + otlpcollectortrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" +) + +// This file defines in-memory data structures to represent traces (spans). + +// Traces is the top-level struct that is propagated through the traces pipeline. +// This is the newer version of consumerdata.Traces, but uses more efficient +// in-memory representation. +type Traces struct { + orig *[]*otlptrace.ResourceSpans +} + +// NewTraces creates a new Traces. +func NewTraces() Traces { + orig := []*otlptrace.ResourceSpans(nil) + return Traces{&orig} +} + +// TracesFromOtlp creates the internal Traces representation from the OTLP. +func TracesFromOtlp(orig []*otlptrace.ResourceSpans) Traces { + return Traces{&orig} +} + +// TracesToOtlp converts the internal Traces to the OTLP. +func TracesToOtlp(td Traces) []*otlptrace.ResourceSpans { + return *td.orig +} + +// ToOtlpProtoBytes converts the internal Traces to OTLP Collector +// ExportTraceServiceRequest ProtoBuf bytes. +func (td Traces) ToOtlpProtoBytes() ([]byte, error) { + traces := otlpcollectortrace.ExportTraceServiceRequest{ + ResourceSpans: *td.orig, + } + return traces.Marshal() +} + +// FromOtlpProtoBytes converts OTLP Collector ExportTraceServiceRequest +// ProtoBuf bytes to the internal Traces. Overrides current data. +// Calling this function on zero-initialized structure causes panic. +// Use it with NewTraces or on existing initialized Traces. +func (td Traces) FromOtlpProtoBytes(data []byte) error { + traces := otlpcollectortrace.ExportTraceServiceRequest{} + if err := traces.Unmarshal(data); err != nil { + return err + } + *td.orig = traces.ResourceSpans + return nil +} + +// Clone returns a copy of Traces. +func (td Traces) Clone() Traces { + rss := NewResourceSpansSlice() + td.ResourceSpans().CopyTo(rss) + return Traces(rss) +} + +// SpanCount calculates the total number of spans. +func (td Traces) SpanCount() int { + spanCount := 0 + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + ilss := rs.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + spanCount += ilss.At(j).Spans().Len() + } + } + return spanCount +} + +// Size returns size in bytes. +func (td Traces) Size() int { + size := 0 + for i := 0; i < len(*td.orig); i++ { + if (*td.orig)[i] == nil { + continue + } + size += (*td.orig)[i].Size() + } + return size +} + +func (td Traces) ResourceSpans() ResourceSpansSlice { + return newResourceSpansSlice(td.orig) +} + +// TraceState in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header +type TraceState string + +type SpanKind otlptrace.Span_SpanKind + +func (sk SpanKind) String() string { return otlptrace.Span_SpanKind(sk).String() } + +const ( + TraceStateEmpty TraceState = "" +) + +const ( + SpanKindUNSPECIFIED = SpanKind(0) + SpanKindINTERNAL = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL) + SpanKindSERVER = SpanKind(otlptrace.Span_SPAN_KIND_SERVER) + SpanKindCLIENT = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT) + SpanKindPRODUCER = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER) + SpanKindCONSUMER = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER) +) + +// DeprecatedStatusCode is the deprecated status code used previously. +// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status +// Deprecated: use StatusCode instead. +type DeprecatedStatusCode otlptrace.Status_DeprecatedStatusCode + +const ( + DeprecatedStatusCodeOk = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_OK) + DeprecatedStatusCodeCancelled = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_CANCELLED) + DeprecatedStatusCodeUnknownError = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR) + DeprecatedStatusCodeInvalidArgument = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_INVALID_ARGUMENT) + DeprecatedStatusCodeDeadlineExceeded = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED) + DeprecatedStatusCodeNotFound = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_NOT_FOUND) + DeprecatedStatusCodeAlreadyExists = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_ALREADY_EXISTS) + DeprecatedStatusCodePermissionDenied = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_PERMISSION_DENIED) + DeprecatedStatusCodeResourceExhausted = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED) + DeprecatedStatusCodeFailedPrecondition = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_FAILED_PRECONDITION) + DeprecatedStatusCodeAborted = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_ABORTED) + DeprecatedStatusCodeOutOfRange = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_OUT_OF_RANGE) + DeprecatedStatusCodeUnimplemented = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_UNIMPLEMENTED) + DeprecatedStatusCodeInternalError = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_INTERNAL_ERROR) + DeprecatedStatusCodeUnavailable = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_UNAVAILABLE) + DeprecatedStatusCodeDataLoss = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_DATA_LOSS) + DeprecatedStatusCodeUnauthenticated = DeprecatedStatusCode(otlptrace.Status_DEPRECATED_STATUS_CODE_UNAUTHENTICATED) +) + +func (sc DeprecatedStatusCode) String() string { + return otlptrace.Status_DeprecatedStatusCode(sc).String() +} + +// StatusCode mirrors the codes defined at +// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status +type StatusCode otlptrace.Status_StatusCode + +const ( + StatusCodeUnset = StatusCode(otlptrace.Status_STATUS_CODE_UNSET) + StatusCodeOk = StatusCode(otlptrace.Status_STATUS_CODE_OK) + StatusCodeError = StatusCode(otlptrace.Status_STATUS_CODE_ERROR) +) + +func (sc StatusCode) String() string { return otlptrace.Status_StatusCode(sc).String() } + +// SetCode replaces the code associated with this SpanStatus. +func (ms SpanStatus) SetCode(v StatusCode) { + ms.orig.Code = otlptrace.Status_StatusCode(v) + + // According to OTLP spec we also need to set the deprecated_code field. + // See https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L231 + // + // if code==STATUS_CODE_UNSET then `deprecated_code` MUST be + // set to DEPRECATED_STATUS_CODE_OK. + // + // if code==STATUS_CODE_OK then `deprecated_code` MUST be + // set to DEPRECATED_STATUS_CODE_OK. + // + // if code==STATUS_CODE_ERROR then `deprecated_code` MUST be + // set to DEPRECATED_STATUS_CODE_UNKNOWN_ERROR. + switch v { + case StatusCodeUnset, StatusCodeOk: + ms.SetDeprecatedCode(DeprecatedStatusCodeOk) + case StatusCodeError: + ms.SetDeprecatedCode(DeprecatedStatusCodeUnknownError) + } +} diff --git a/internal/otel_collector/consumer/pdata/trace_test.go b/internal/otel_collector/consumer/pdata/trace_test.go new file mode 100644 index 00000000000..a404f2cda3e --- /dev/null +++ b/internal/otel_collector/consumer/pdata/trace_test.go @@ -0,0 +1,248 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "testing" + + gogoproto "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + goproto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" + + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" +) + +func TestSpanCount(t *testing.T) { + md := NewTraces() + assert.EqualValues(t, 0, md.SpanCount()) + + md.ResourceSpans().Resize(1) + assert.EqualValues(t, 0, md.SpanCount()) + + md.ResourceSpans().At(0).InstrumentationLibrarySpans().Resize(1) + assert.EqualValues(t, 0, md.SpanCount()) + + md.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + assert.EqualValues(t, 1, md.SpanCount()) + + rms := md.ResourceSpans() + rms.Resize(3) + rms.At(0).InstrumentationLibrarySpans().Resize(1) + rms.At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + rms.At(1).InstrumentationLibrarySpans().Resize(1) + rms.At(2).InstrumentationLibrarySpans().Resize(1) + rms.At(2).InstrumentationLibrarySpans().At(0).Spans().Resize(5) + assert.EqualValues(t, 6, md.SpanCount()) +} + +func TestSize(t *testing.T) { + md := NewTraces() + assert.Equal(t, 0, md.Size()) + rms := md.ResourceSpans() + rms.Resize(1) + rms.At(0).InstrumentationLibrarySpans().Resize(1) + rms.At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + rms.At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetName("foo") + otlp := TracesToOtlp(md) + size := 0 + sizeBytes := 0 + for _, rspans := range otlp { + size += rspans.Size() + bts, err := rspans.Marshal() + require.NoError(t, err) + sizeBytes += len(bts) + } + assert.Equal(t, size, md.Size()) + assert.Equal(t, sizeBytes, md.Size()) +} + +func TestTracesSizeWithNil(t *testing.T) { + assert.Equal(t, 0, TracesFromOtlp([]*otlptrace.ResourceSpans{nil}).Size()) +} + +func TestSpanCountWithEmpty(t *testing.T) { + assert.EqualValues(t, 0, TracesFromOtlp([]*otlptrace.ResourceSpans{{}}).SpanCount()) + assert.EqualValues(t, 0, TracesFromOtlp([]*otlptrace.ResourceSpans{ + { + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{{}}, + }, + }).SpanCount()) + assert.EqualValues(t, 1, TracesFromOtlp([]*otlptrace.ResourceSpans{ + { + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{{}}, + }, + }, + }, + }).SpanCount()) +} + +func TestSpanID(t *testing.T) { + sid := InvalidSpanID() + assert.EqualValues(t, [8]byte{}, sid.Bytes()) + + sid = NewSpanID([8]byte{1, 2, 3, 4, 4, 3, 2, 1}) + assert.EqualValues(t, [8]byte{1, 2, 3, 4, 4, 3, 2, 1}, sid.Bytes()) +} + +func TestTraceID(t *testing.T) { + tid := InvalidTraceID() + assert.EqualValues(t, [16]byte{}, tid.Bytes()) + + tid = NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) + assert.EqualValues(t, [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}, tid.Bytes()) +} + +func TestSpanStatusCode(t *testing.T) { + td := NewTraces() + rss := td.ResourceSpans() + rss.Resize(1) + rss.At(0).InstrumentationLibrarySpans().Resize(1) + rss.At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + status := rss.At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).Status() + + // Check handling of deprecated status code, see spec here: + // https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L231 + // + // 2. New senders, which are aware of the `code` field MUST set both the + // `deprecated_code` and `code` fields according to the following rules: + // + // if code==STATUS_CODE_UNSET then `deprecated_code` MUST be + // set to DEPRECATED_STATUS_CODE_OK. + status.SetDeprecatedCode(DeprecatedStatusCodeUnknownError) + assert.EqualValues(t, DeprecatedStatusCodeUnknownError, status.DeprecatedCode()) + status.SetCode(StatusCodeUnset) + assert.EqualValues(t, DeprecatedStatusCodeOk, status.DeprecatedCode()) + + // if code==STATUS_CODE_OK then `deprecated_code` MUST be + // set to DEPRECATED_STATUS_CODE_OK. + status.SetDeprecatedCode(DeprecatedStatusCodeUnknownError) + assert.EqualValues(t, DeprecatedStatusCodeUnknownError, status.DeprecatedCode()) + status.SetCode(StatusCodeOk) + assert.EqualValues(t, DeprecatedStatusCodeOk, status.DeprecatedCode()) + + // if code==STATUS_CODE_ERROR then `deprecated_code` MUST be + // set to DEPRECATED_STATUS_CODE_UNKNOWN_ERROR. + status.SetDeprecatedCode(DeprecatedStatusCodeOk) + assert.EqualValues(t, DeprecatedStatusCodeOk, status.DeprecatedCode()) + status.SetCode(StatusCodeError) + assert.EqualValues(t, DeprecatedStatusCodeUnknownError, status.DeprecatedCode()) +} + +func TestToFromOtlp(t *testing.T) { + otlp := []*otlptrace.ResourceSpans(nil) + td := TracesFromOtlp(otlp) + assert.EqualValues(t, NewTraces(), td) + assert.EqualValues(t, otlp, TracesToOtlp(td)) + // More tests in ./tracedata/trace_test.go. Cannot have them here because of + // circular dependency. +} + +func TestResourceSpansWireCompatibility(t *testing.T) { + // This test verifies that OTLP ProtoBufs generated using goproto lib in + // opentelemetry-proto repository OTLP ProtoBufs generated using gogoproto lib in + // this repository are wire compatible. + + // Generate ResourceSpans as pdata struct. + pdataRS := generateTestResourceSpans() + + // Marshal its underlying ProtoBuf to wire. + wire1, err := gogoproto.Marshal(pdataRS.orig) + assert.NoError(t, err) + assert.NotNil(t, wire1) + + // Unmarshal from the wire to OTLP Protobuf in goproto's representation. + var goprotoMessage emptypb.Empty + err = goproto.Unmarshal(wire1, &goprotoMessage) + assert.NoError(t, err) + + // Marshal to the wire again. + wire2, err := goproto.Marshal(&goprotoMessage) + assert.NoError(t, err) + assert.NotNil(t, wire2) + + // Unmarshal from the wire into gogoproto's representation. + var gogoprotoRS2 otlptrace.ResourceSpans + err = gogoproto.Unmarshal(wire2, &gogoprotoRS2) + assert.NoError(t, err) + + // Now compare that the original and final ProtoBuf messages are the same. + // This proves that goproto and gogoproto marshaling/unmarshaling are wire compatible. + assert.EqualValues(t, pdataRS.orig, &gogoprotoRS2) +} + +func TestTracesToFromOtlpProtoBytes(t *testing.T) { + send := NewTraces() + fillTestResourceSpansSlice(send.ResourceSpans()) + bytes, err := send.ToOtlpProtoBytes() + assert.NoError(t, err) + + recv := NewTraces() + err = recv.FromOtlpProtoBytes(bytes) + assert.NoError(t, err) + assert.EqualValues(t, send, recv) +} + +func TestTracesFromInvalidOtlpProtoBytes(t *testing.T) { + err := NewTraces().FromOtlpProtoBytes([]byte{0xFF}) + assert.EqualError(t, err, "unexpected EOF") +} + +func TestTracesClone(t *testing.T) { + traces := NewTraces() + fillTestResourceSpansSlice(traces.ResourceSpans()) + assert.EqualValues(t, traces, traces.Clone()) +} + +func BenchmarkTracesClone(b *testing.B) { + traces := NewTraces() + fillTestResourceSpansSlice(traces.ResourceSpans()) + b.ResetTimer() + for n := 0; n < b.N; n++ { + clone := traces.Clone() + if clone.ResourceSpans().Len() != traces.ResourceSpans().Len() { + b.Fail() + } + } +} + +func BenchmarkTracesToOtlp(b *testing.B) { + traces := NewTraces() + fillTestResourceSpansSlice(traces.ResourceSpans()) + b.ResetTimer() + for n := 0; n < b.N; n++ { + buf, err := traces.ToOtlpProtoBytes() + require.NoError(b, err) + assert.NotEqual(b, 0, len(buf)) + } +} + +func BenchmarkTracesFromOtlp(b *testing.B) { + baseTraces := NewTraces() + fillTestResourceSpansSlice(baseTraces.ResourceSpans()) + buf, err := baseTraces.ToOtlpProtoBytes() + require.NoError(b, err) + assert.NotEqual(b, 0, len(buf)) + b.ResetTimer() + b.ReportAllocs() + for n := 0; n < b.N; n++ { + traces := NewTraces() + require.NoError(b, traces.FromOtlpProtoBytes(buf)) + assert.Equal(b, baseTraces.ResourceSpans().Len(), traces.ResourceSpans().Len()) + } +} diff --git a/internal/otel_collector/consumer/pdata/traceid.go b/internal/otel_collector/consumer/pdata/traceid.go new file mode 100644 index 00000000000..d374499a3d0 --- /dev/null +++ b/internal/otel_collector/consumer/pdata/traceid.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdata + +import ( + "go.opentelemetry.io/collector/internal/data" +) + +// TraceID is an alias of OTLP TraceID data type. +type TraceID data.TraceID + +func InvalidTraceID() TraceID { + return TraceID(data.NewTraceID([16]byte{})) +} + +func NewTraceID(bytes [16]byte) TraceID { + return TraceID(data.NewTraceID(bytes)) +} + +// Bytes returns the byte array representation of the TraceID. +func (t TraceID) Bytes() [16]byte { + return data.TraceID(t).Bytes() +} + +// HexString returns hex representation of the TraceID. +func (t TraceID) HexString() string { + return data.TraceID(t).HexString() +} + +// IsValid returns true if id contains at leas one non-zero byte. +func (t TraceID) IsValid() bool { + return data.TraceID(t).IsValid() +} diff --git a/internal/otel_collector/consumer/simple/metrics.go b/internal/otel_collector/consumer/simple/metrics.go new file mode 100644 index 00000000000..cd9e5a60df8 --- /dev/null +++ b/internal/otel_collector/consumer/simple/metrics.go @@ -0,0 +1,360 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package simple + +import ( + "fmt" + "sync" + "time" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// Metrics facilitates building pdata.Metrics in receivers. It is meant +// to be much easier and more fluent than than using pdata.Metrics directly. +// All of the exported methods on it return the same instance of Metrics +// as a return value, allowing you to chain method calls easily, similar to the +// Java builder pattern. +// +// All of the public fields in this structure are meant to be set before the +// first data point is added, and should not be changed afterwards. +// +// The Metrics is designed for cases where receivers are generating +// metrics from scratch, where generally you will have a single datapoint per +// metric/label combination. +// +// One restriction this helper imposes is that a particular metric name must +// only be used with a single data type for all instances derived from a base +// helper, including the base instance. This restriction greatly simplifies +// the logic to reuse metrics for multiple datapoints and it is generally +// easier for backends to not have to deal with conflicting types anyway. +// +// It is NOT thread-safe, so you should use an external mutex if using it from +// multiple goroutines. +type Metrics struct { + // REQUIRED. A Metrics object that has been created with + // `pdata.NewMetrics()`. This is required to be set on the builder. All + // metrics added will go into this immediately upon invocation of Add* + // methods. Do not change this once initially set. + pdata.Metrics + + // MetricFactoriesByName is an optional map of metric factories that will + // be created with the appropriate name, description, and type field. This + // is intended to be used with the metadata code generation modules but can + // be used apart from that just as well. The returned metrics are expected + // to be initialized. + MetricFactoriesByName map[string]func() pdata.Metric + + // If set, this instrumentation library name will be used for all metrics + // generated by this builder. This is meant to be set once at builder + // creation and not changed later. + InstrumentationLibraryName string + // If set, this instrumentation library version will be used for all + // metrics generated by this builder. This is meant to be set once at + // builder creation and not changed later. + InstrumentationLibraryVersion string + // These attributes will be added to the Resource object on all + // ResourceMetrics instances created by the builder. This is meant to be + // set once at builder creation and not changed later. + ResourceAttributes map[string]string + // This time will be used as the Timestamp for all metrics generated. It + // can be updated with a new timestamp at any time. + Timestamp time.Time + // A set of labels that will be applied to all datapoints emitted by the + // builder. + Labels map[string]string + + resourceMetricIdx **int + metricIdxByName map[string]int +} + +func (mb *Metrics) ensureInit() { + if mb.metricIdxByName == nil { + mb.metricIdxByName = map[string]int{} + } + if mb.resourceMetricIdx == nil { + var ip *int + mb.resourceMetricIdx = &ip + } +} + +// Clone the MetricBuilder. All of the maps copied will be deeply copied. +func (mb *Metrics) clone() *Metrics { + mb.ensureInit() + + return &Metrics{ + Metrics: mb.Metrics, + MetricFactoriesByName: mb.MetricFactoriesByName, + InstrumentationLibraryName: mb.InstrumentationLibraryName, + InstrumentationLibraryVersion: mb.InstrumentationLibraryVersion, + ResourceAttributes: cloneStringMap(mb.ResourceAttributes), + Timestamp: mb.Timestamp, + Labels: cloneStringMap(mb.Labels), + resourceMetricIdx: mb.resourceMetricIdx, + metricIdxByName: mb.metricIdxByName, + } +} + +// WithLabels returns a new, independent builder with additional labels. These +// labels will be combined with the Labels that can be set on the struct. +// All subsequent calls to create metrics will create metrics that use these +// labels. The input map's entries are copied so the map can be mutated freely +// by the caller afterwards without affecting the builder. +func (mb *Metrics) WithLabels(l map[string]string) *Metrics { + out := mb.clone() + + for k, v := range l { + out.Labels[k] = v + } + + return out +} + +// AsSafeBuilder returns an instance of this builder wrapped in +// SafeMetrics that ensures all of the public methods on this instance +// will be thread-safe between goroutines. You must explicitly type these +// instances as SafeMetrics. +func (mb Metrics) AsSafe() *SafeMetrics { + return &SafeMetrics{ + Metrics: &mb, + Mutex: &sync.Mutex{}, + } +} + +func (mb *Metrics) AddGaugeDataPoint(name string, metricValue int64) *Metrics { + typ := pdata.MetricDataTypeIntGauge + mb.addDataPoint(name, typ, metricValue) + return mb +} + +func (mb *Metrics) AddDGaugeDataPoint(name string, metricValue float64) *Metrics { + typ := pdata.MetricDataTypeDoubleGauge + mb.addDataPoint(name, typ, metricValue) + return mb +} + +func (mb *Metrics) AddSumDataPoint(name string, metricValue int64) *Metrics { + typ := pdata.MetricDataTypeIntSum + mb.addDataPoint(name, typ, metricValue) + return mb +} + +func (mb *Metrics) AddDSumDataPoint(name string, metricValue float64) *Metrics { + typ := pdata.MetricDataTypeDoubleSum + mb.addDataPoint(name, typ, metricValue) + return mb +} + +func (mb *Metrics) AddHistogramRawDataPoint(name string, hist pdata.IntHistogramDataPoint) *Metrics { + mb.addDataPoint(name, pdata.MetricDataTypeIntHistogram, hist) + return mb +} + +func (mb *Metrics) AddDHistogramRawDataPoint(name string, hist pdata.DoubleHistogramDataPoint) *Metrics { + mb.addDataPoint(name, pdata.MetricDataTypeDoubleHistogram, hist) + return mb +} + +func (mb *Metrics) getMetricsSlice() pdata.MetricSlice { + rms := mb.Metrics.ResourceMetrics() + if mb.resourceMetricIdx != nil && *mb.resourceMetricIdx != nil { + return rms.At(**mb.resourceMetricIdx).InstrumentationLibraryMetrics().At(0).Metrics() + } + + rmsLen := rms.Len() + rms.Resize(rmsLen + 1) + rm := rms.At(rmsLen) + + res := rm.Resource() + for k, v := range mb.ResourceAttributes { + res.Attributes().Insert(k, pdata.NewAttributeValueString(v)) + } + + ilms := rm.InstrumentationLibraryMetrics() + ilms.Resize(1) + ilm := ilms.At(0) + + il := ilm.InstrumentationLibrary() + il.SetName(mb.InstrumentationLibraryName) + il.SetVersion(mb.InstrumentationLibraryVersion) + + *mb.resourceMetricIdx = &rmsLen + + return ilm.Metrics() +} + +func (mb *Metrics) getOrCreateMetric(name string, typ pdata.MetricDataType) pdata.Metric { + mb.ensureInit() + + metricSlice := mb.getMetricsSlice() + + idx, ok := mb.metricIdxByName[name] + if ok { + return metricSlice.At(idx) + } + + var metric pdata.Metric + if fac, ok := mb.MetricFactoriesByName[name]; ok { + metric = fac() + } else { + metric = pdata.NewMetric() + + metric.SetName(name) + metric.SetDataType(typ) + } + + metricSlice.Append(metric) + + mb.metricIdxByName[name] = metricSlice.Len() - 1 + return metric +} + +func (mb *Metrics) addDataPoint(name string, typ pdata.MetricDataType, val interface{}) { + metric := mb.getOrCreateMetric(name, typ) + + // This protects against reusing the same metric name with different types. + if metric.DataType() != typ { + panic(fmt.Errorf("mismatched metric data types for metric %q: %q vs %q", metric.Name(), metric.DataType(), typ)) + } + + tsNano := pdata.TimestampUnixNano(mb.Timestamp.UnixNano()) + + switch typ { + case pdata.MetricDataTypeIntGauge: + m := metric.IntGauge() + dps := m.DataPoints() + dp := pdata.NewIntDataPoint() + dp.LabelsMap().InitFromMap(mb.Labels) + dp.SetValue(val.(int64)) + dp.SetTimestamp(tsNano) + dps.Append(dp) + + case pdata.MetricDataTypeIntSum: + m := metric.IntSum() + dps := m.DataPoints() + dp := pdata.NewIntDataPoint() + dp.LabelsMap().InitFromMap(mb.Labels) + dp.SetValue(val.(int64)) + dp.SetTimestamp(tsNano) + dps.Append(dp) + + case pdata.MetricDataTypeDoubleGauge: + m := metric.DoubleGauge() + dps := m.DataPoints() + dp := pdata.NewDoubleDataPoint() + dp.LabelsMap().InitFromMap(mb.Labels) + dp.SetValue(val.(float64)) + dp.SetTimestamp(tsNano) + dps.Append(dp) + + case pdata.MetricDataTypeDoubleSum: + m := metric.DoubleSum() + dps := m.DataPoints() + dp := pdata.NewDoubleDataPoint() + dp.LabelsMap().InitFromMap(mb.Labels) + dp.SetValue(val.(float64)) + dp.SetTimestamp(tsNano) + dps.Append(dp) + + case pdata.MetricDataTypeIntHistogram: + m := metric.IntHistogram() + dps := m.DataPoints() + dp := pdata.NewIntHistogramDataPoint() + dp.LabelsMap().InitFromMap(mb.Labels) + val.(pdata.IntHistogramDataPoint).CopyTo(dp) + dp.SetTimestamp(tsNano) + dps.Append(dp) + + case pdata.MetricDataTypeDoubleHistogram: + m := metric.DoubleHistogram() + dps := m.DataPoints() + dp := pdata.NewDoubleHistogramDataPoint() + dp.LabelsMap().InitFromMap(mb.Labels) + val.(pdata.DoubleHistogramDataPoint).CopyTo(dp) + dp.SetTimestamp(tsNano) + dps.Append(dp) + + default: + panic("invalid metric type: " + typ.String()) + } +} + +func cloneStringMap(m map[string]string) map[string]string { + out := make(map[string]string, len(m)) + for k, v := range m { + out[k] = v + } + return out +} + +// SafeMetrics is a wrapper for Metrics that ensures the wrapped +// instance can be used safely across goroutines. It is meant to be created +// from the AsSafeBuilder on Metrics. +type SafeMetrics struct { + *sync.Mutex + *Metrics +} + +func (mb *SafeMetrics) WithLabels(l map[string]string) *SafeMetrics { + mb.Lock() + defer mb.Unlock() + + return &SafeMetrics{ + Metrics: mb.Metrics.WithLabels(l), + Mutex: mb.Mutex, + } +} + +func (mb *SafeMetrics) AddGaugeDataPoint(name string, metricValue int64) *SafeMetrics { + mb.Lock() + mb.Metrics.AddGaugeDataPoint(name, metricValue) + mb.Unlock() + return mb +} + +func (mb *SafeMetrics) AddDGaugeDataPoint(name string, metricValue float64) *SafeMetrics { + mb.Lock() + mb.Metrics.AddDGaugeDataPoint(name, metricValue) + mb.Unlock() + return mb +} + +func (mb *SafeMetrics) AddSumDataPoint(name string, metricValue int64) *SafeMetrics { + mb.Lock() + mb.Metrics.AddSumDataPoint(name, metricValue) + mb.Unlock() + return mb +} + +func (mb *SafeMetrics) AddDSumDataPoint(name string, metricValue float64) *SafeMetrics { + mb.Lock() + mb.Metrics.AddDSumDataPoint(name, metricValue) + mb.Unlock() + return mb +} + +func (mb *SafeMetrics) AddHistogramRawDataPoint(name string, hist pdata.IntHistogramDataPoint) *SafeMetrics { + mb.Lock() + mb.Metrics.AddHistogramRawDataPoint(name, hist) + mb.Unlock() + return mb +} + +func (mb *SafeMetrics) AddDHistogramRawDataPoint(name string, hist pdata.DoubleHistogramDataPoint) *SafeMetrics { + mb.Lock() + mb.Metrics.AddDHistogramRawDataPoint(name, hist) + mb.Unlock() + return mb +} diff --git a/internal/otel_collector/consumer/simple/metrics_test.go b/internal/otel_collector/consumer/simple/metrics_test.go new file mode 100644 index 00000000000..412face9c90 --- /dev/null +++ b/internal/otel_collector/consumer/simple/metrics_test.go @@ -0,0 +1,516 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package simple + +import ( + "encoding/json" + "fmt" + "strconv" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/testutil/metricstestutil" +) + +func ExampleMetrics() { + metrics := pdata.NewMetrics() + + mb := Metrics{ + Metrics: metrics, + InstrumentationLibraryName: "example", + InstrumentationLibraryVersion: "0.1", + ResourceAttributes: map[string]string{ + "host": "my-host", + }, + Timestamp: time.Now(), + } + + for _, disk := range []string{"sda", "sdb", "sdc"} { + // All metrics added after this will have these labels + diskBuilder := mb.WithLabels(map[string]string{ + "disk": disk, + }).AddGaugeDataPoint("disk.usage", 9000000) + + // Add metrics in a chained manner + diskBuilder. + AddGaugeDataPoint("disk.capacity", 9000000). + AddDGaugeDataPoint("disk.temp", 30.5) + + // Add additional labels + diskBuilder.WithLabels(map[string]string{ + "direction": "read", + }).AddSumDataPoint("disk.ops", 50) + + diskBuilder.WithLabels(map[string]string{ + "direction": "write", + }).AddSumDataPoint("disk.ops", 80) + } + + metricCount, dpCount := metrics.MetricAndDataPointCount() + fmt.Printf("Metrics: %d\nDataPoints: %d", metricCount, dpCount) + + // Do not reuse Metrics once you are done using the generated Metrics. Make + // a new instance of it along with a new instance of pdata.Metrics. + + // Output: + // Metrics: 4 + // DataPoints: 15 +} + +func TestMetrics(t *testing.T) { + metrics := pdata.NewMetrics() + + mb := Metrics{ + Metrics: metrics, + InstrumentationLibraryName: "example", + InstrumentationLibraryVersion: "0.1", + ResourceAttributes: map[string]string{ + "host": "my-host", + }, + Timestamp: time.Unix(0, 1597266546570840817), + Labels: map[string]string{ + "disk": "sda", + }, + } + + expected := `[ + { + "resource": { + "attributes": [ + { + "key": "host", + "value": { + "Value": { + "string_value": "my-host" + } + } + } + ] + }, + "instrumentation_library_metrics": [ + { + "instrumentation_library": { + "name": "example", + "version": "0.1" + }, + "metrics": [ + { + "name": "disk.capacity", + "Data": { + "int_gauge": { + "data_points": [ + { + "labels": [ + { + "key": "disk", + "value": "sda" + } + ], + "time_unix_nano": 1597266546570840817, + "value": 9000000 + } + ] + } + } + }, + { + "name": "disk.reads", + "Data": { + "int_sum": { + "data_points": [ + { + "labels": [ + { + "key": "disk", + "value": "sda" + } + ], + "time_unix_nano": 1597266546570840817, + "value": 50 + }, + { + "labels": [ + { + "key": "disk", + "value": "sda" + }, + { + "key": "partition", + "value": "1" + } + ], + "time_unix_nano": 1597266546570840817, + "value": 5 + } + ] + } + } + }, + { + "name": "disk.temp", + "Data": { + "double_gauge": { + "data_points": [ + { + "labels": [ + { + "key": "disk", + "value": "sda" + } + ], + "time_unix_nano": 1597266546570840817, + "value": 30.5 + } + ] + } + } + }, + { + "name": "disk.time_awake", + "Data": { + "double_sum": { + "data_points": [ + { + "labels": [ + { + "key": "disk", + "value": "sda" + } + ], + "time_unix_nano": 1597266546570840817, + "value": 100.6 + } + ] + } + } + }, + { + "name": "partition.capacity", + "Data": { + "int_gauge": { + "data_points": [ + { + "labels": [ + { + "key": "disk", + "value": "sda" + }, + { + "key": "partition", + "value": "1" + } + ], + "time_unix_nano": 1597266546570840817, + "value": 40000 + } + ] + } + } + }, + { + "name": "disk.times", + "Data": { + "int_histogram": { + "data_points": [ + { + "labels": [], + "time_unix_nano": 1597266546570840817 + } + ] + } + } + }, + { + "name": "disk.double_times", + "Data": { + "double_histogram": { + "data_points": [ + { + "labels": [], + "time_unix_nano": 1597266546570840817 + } + ] + } + } + } + ] + } + ] + } +]` + + mb. + AddGaugeDataPoint("disk.capacity", 9000000). + AddSumDataPoint("disk.reads", 50). + AddDGaugeDataPoint("disk.temp", 30.5). + AddDSumDataPoint("disk.time_awake", 100.6) + + intHisto := pdata.NewIntHistogramDataPoint() + doubleHisto := pdata.NewDoubleHistogramDataPoint() + + mb.WithLabels(map[string]string{ + "partition": "1", + }). + AddGaugeDataPoint("partition.capacity", 40000). + AddSumDataPoint("disk.reads", 5). + AddHistogramRawDataPoint("disk.times", intHisto). + AddDHistogramRawDataPoint("disk.double_times", doubleHisto) + + mCount, dpCount := metrics.MetricAndDataPointCount() + require.Equal(t, 7, mCount) + require.Equal(t, 8, dpCount) + asJSON, _ := json.MarshalIndent(pdata.MetricsToOtlp(metricstestutil.SortedMetrics(metrics)), "", " ") + require.Equal(t, expected, string(asJSON)) +} + +func TestMetricFactories(t *testing.T) { + mb := Metrics{ + Metrics: pdata.NewMetrics(), + MetricFactoriesByName: map[string]func() pdata.Metric{ + "disk.ops": func() pdata.Metric { + m := pdata.NewMetric() + m.SetName("disk.ops") + m.SetDescription("This counts disk operations") + m.SetDataType(pdata.MetricDataTypeIntSum) + return m + }, + }, + InstrumentationLibraryName: "example", + InstrumentationLibraryVersion: "0.1", + ResourceAttributes: map[string]string{ + "host": "my-host", + }, + Timestamp: time.Unix(0, 1597266546570840817), + Labels: map[string]string{ + "disk": "sda", + }, + } + + mb.WithLabels(map[string]string{ + "direction": "read", + }).AddSumDataPoint("disk.ops", 5) + + mb.WithLabels(map[string]string{ + "direction": "write", + }).AddSumDataPoint("disk.ops", 5) + + rms := mb.Metrics.ResourceMetrics() + require.Equal(t, 1, rms.Len()) + + ilms := rms.At(0).InstrumentationLibraryMetrics() + require.Equal(t, 1, ilms.Len()) + require.Equal(t, 1, ilms.At(0).Metrics().Len()) + m := ilms.At(0).Metrics().At(0) + require.Equal(t, "disk.ops", m.Name()) + require.Equal(t, "This counts disk operations", m.Description()) + require.Equal(t, pdata.MetricDataTypeIntSum, m.DataType()) + require.Equal(t, 2, m.IntSum().DataPoints().Len()) + + require.PanicsWithError(t, `mismatched metric data types for metric "disk.ops": "IntSum" vs "IntGauge"`, func() { + mb.AddGaugeDataPoint("disk.ops", 1) + }) + + mb.AddGaugeDataPoint("disk.temp", 25) + require.Equal(t, 2, ilms.At(0).Metrics().Len()) + m = ilms.At(0).Metrics().At(1) + require.Equal(t, "disk.temp", m.Name()) + require.Equal(t, "", m.Description()) + require.Equal(t, pdata.MetricDataTypeIntGauge, m.DataType()) + require.Equal(t, 1, m.IntGauge().DataPoints().Len()) +} + +func ExampleSafeMetrics() { + metrics := pdata.NewMetrics() + + mb := Metrics{ + Metrics: metrics, + InstrumentationLibraryName: "example", + InstrumentationLibraryVersion: "0.1", + ResourceAttributes: map[string]string{ + "host": "my-host", + }, + Timestamp: time.Now(), + }.AsSafe() + + var wg sync.WaitGroup + for _, disk := range []string{"sda", "sdb", "sdc"} { + wg.Add(1) + go func(disk string) { + // All metrics added after this will have these labels + diskBuilder := mb.WithLabels(map[string]string{ + "disk": disk, + }).AddGaugeDataPoint("disk.usage", 9000000) + + // Add metrics in a chained manner + diskBuilder. + AddGaugeDataPoint("disk.capacity", 9000000). + AddSumDataPoint("disk.reads", 50) + + // Or add them on their own + diskBuilder.AddDGaugeDataPoint("disk.temp", 30.5) + + wg.Done() + }(disk) + } + wg.Wait() + + metricCount, dpCount := metrics.MetricAndDataPointCount() + fmt.Printf("Metrics: %d\nDataPoints: %d", metricCount, dpCount) + + // Output: + // Metrics: 4 + // DataPoints: 12 +} + +func TestSafeMetrics(t *testing.T) { + metrics := pdata.NewMetrics() + + mb := Metrics{ + Metrics: metrics, + InstrumentationLibraryName: "example", + InstrumentationLibraryVersion: "0.1", + ResourceAttributes: map[string]string{ + "host": "my-host", + }, + Timestamp: time.Unix(0, 1597266546570840817), + Labels: map[string]string{ + "disk": "sda", + }, + }.AsSafe() + + ready := make(chan struct{}) + var wg sync.WaitGroup + + for i := 0; i < 1000; i++ { + wg.Add(1) + go func(idx string) { + <-ready + mb. + AddGaugeDataPoint("disk.capacity"+idx, 9000000). + AddSumDataPoint("disk.reads"+idx, 50). + AddDGaugeDataPoint("disk.temp"+idx, 30.5). + AddDSumDataPoint("disk.time_awake"+idx, 100.6) + + intHisto := pdata.NewIntHistogramDataPoint() + doubleHisto := pdata.NewDoubleHistogramDataPoint() + + for j := 0; j < 5; j++ { + mb.WithLabels(map[string]string{ + "partition": strconv.Itoa(j), + }). + AddGaugeDataPoint("partition.capacity", 40000). + AddSumDataPoint("disk.reads", 5). + AddHistogramRawDataPoint("disk.times", intHisto). + AddDHistogramRawDataPoint("disk.double_times", doubleHisto) + } + wg.Done() + }(strconv.Itoa(i)) + } + + close(ready) + wg.Wait() + + mCount, dpCount := metrics.MetricAndDataPointCount() + require.Equal(t, 4004, mCount) + require.Equal(t, 24000, dpCount) +} + +func BenchmarkSimpleMetrics(b *testing.B) { + for n := 0; n < b.N; n++ { + mb := Metrics{ + Metrics: pdata.NewMetrics(), + InstrumentationLibraryName: "example", + InstrumentationLibraryVersion: "0.1", + ResourceAttributes: map[string]string{ + "host": "my-host", + "service": "app", + }, + Timestamp: time.Now(), + Labels: map[string]string{ + "env": "prod", + "app": "myapp", + "version": "1.0", + }, + } + + for i := 0; i < 50; i++ { + name := "gauge" + strconv.Itoa(i) + mb.AddGaugeDataPoint(name, 5) + mb.AddGaugeDataPoint(name, 5) + } + } +} + +func BenchmarkPdataMetrics(b *testing.B) { + for n := 0; n < b.N; n++ { + tsNano := pdata.TimestampUnixNano(time.Now().UnixNano()) + + m := pdata.NewMetrics() + + rms := m.ResourceMetrics() + + rmsLen := rms.Len() + rms.Resize(rmsLen + 1) + rm := rms.At(rmsLen) + + res := rm.Resource() + resAttrs := res.Attributes() + resAttrs.Insert("host", pdata.NewAttributeValueString("my-host")) + resAttrs.Insert("serviceName", pdata.NewAttributeValueString("app")) + + ilms := rm.InstrumentationLibraryMetrics() + ilms.Resize(1) + ilm := ilms.At(0) + metrics := ilm.Metrics() + metrics.Resize(6) + + il := ilm.InstrumentationLibrary() + il.SetName("example") + il.SetVersion("0.1") + + for i := 0; i < 50; i++ { + metric := metrics.At(0) + metric.SetName("gauge" + strconv.Itoa(i)) + metric.SetDataType(pdata.MetricDataTypeIntGauge) + mAsType := metric.IntGauge() + dps := mAsType.DataPoints() + dps.Resize(2) + { + dp := dps.At(0) + labels := dp.LabelsMap() + labels.InitEmptyWithCapacity(3) + labels.Insert("env", "prod") + labels.Insert("app", "myapp") + labels.Insert("version", "1.0") + dp.SetValue(5) + dp.SetTimestamp(tsNano) + } + { + dp := dps.At(1) + labels := dp.LabelsMap() + labels.InitEmptyWithCapacity(3) + labels.Insert("env", "prod") + labels.Insert("app", "myapp") + labels.Insert("version", "1.0") + dp.SetValue(5) + dp.SetTimestamp(tsNano) + } + } + } +} diff --git a/internal/otel_collector/docs/design.md b/internal/otel_collector/docs/design.md new file mode 100644 index 00000000000..c5172cf8882 --- /dev/null +++ b/internal/otel_collector/docs/design.md @@ -0,0 +1,232 @@ +# OpenTelemetry Collector Architecture + +This document describes the architecture design and implementation of +OpenTelemetry Collector. + +## Summary + +OpenTelemetry Collector is an executable that allows to receive telemetry data, optionally transform it and send the data further. + +The Collector supports several popular open-source protocols for telemetry data receiving and sending as well as offering a pluggable architecture for adding more protocols. + +Data receiving, transformation and sending is done using Pipelines. The Collector can be configured to have one or more Pipelines. Each Pipeline includes a set of Receivers that receive the data, a series of optional Processors that get the data from receivers and transform it and a set of Exporters which get the data from the Processors and send it further outside the Collector. The same receiver can feed data to multiple Pipelines and multiple pipelines can feed data into the same Exporter. + +## Pipelines + +Pipeline defines a path the data follows in the Collector starting from reception, then further processing or modification and finally exiting the Collector via exporters. + +Pipelines can operate on 2 telemetry data types: traces and metrics. The data type is a property of the pipeline defined by its configuration. Receivers, exporters and processors used in a pipeline must support the particular data type otherwise `ErrDataTypeIsNotSupported` will be reported when the configuration is loaded. A pipeline can be depicted the following way: + +![Pipelines](images/design-pipelines.png) + +There can be one or more receivers in a pipeline. Data from all receivers is pushed to the first processor, which performs a processing on it and then pushes it to the next processor (or it may drop the data, e.g. if it is a “sampling” processor) and so on until the last processor in the pipeline pushes the data to the exporters. Each exporter gets a copy of each data element. The last processor uses a `FanOutConnector` to fan out the data to multiple exporters. + +The pipeline is constructed during Collector startup based on pipeline definition in the config file. + +A pipeline configuration typically looks like this: + +```yaml +service: + pipelines: # section that can contain multiple subsections, one per pipeline + traces: # type of the pipeline + receivers: [opencensus, jaeger, zipkin] + processors: [tags, tail_sampling, batch, queued_retry] + exporters: [opencensus, jaeger, stackdriver, zipkin] +``` + +The above example defines a pipeline for “traces” type of telemetry data, with 3 receivers, 4 processors and 4 exporters. + +For details of config file format see [this document](https://docs.google.com/document/d/1NeheFG7DmcUYo_h2vLtNRlia9x5wOJMlV4QKEK05FhQ/edit#). + +### Receivers + +Receivers typically listen on a network port and receive telemetry data. Usually one receiver is configured to send received data to one pipeline, however it is also possible to configure the same receiver to send the same received data to multiple pipelines. This can be done by simply listing the same receiver in the “receivers” key of several pipelines: + +```yaml +receivers: + opencensus: + endpoint: "0.0.0.0:55678" + +service: + pipelines: + traces: # a pipeline of “traces” type + receivers: [opencensus] + processors: [tags, tail_sampling, batch, queued_retry] + exporters: [jaeger] + traces/2: # another pipeline of “traces” type + receivers: [opencensus] + processors: [batch] + exporters: [opencensus] +``` + +In the above example “opencensus” receiver will send the same data to pipeline “traces” and to pipeline “traces/2”. (Note: the configuration uses composite key names in the form of `type[/name]` as defined in [this document](https://docs.google.com/document/d/1NeheFG7DmcUYo_h2vLtNRlia9x5wOJMlV4QKEK05FhQ/edit#)). + +When the Collector loads this config the result will look like this (part of processors and exporters are omitted from the diagram for brevity): + +![Receivers](images/design-receivers.png) + +Important: when the same receiver is referenced in more than one pipeline the Collector will create only one receiver instance at runtime that will send the data to `FanOutConnector` which in turn will send the data to the first processor of each pipeline. The data propagation from receiver to `FanOutConnector` and then to processors is via synchronous function call. This means that if one processor blocks the call the other pipelines that are attached to this receiver will be blocked from receiving the same data and the receiver itself will stop processing and forwarding newly received data. + +### Exporters + +Exporters typically forward the data they get to a destination on a network (but they can also send it elsewhere, e.g “logging” exporter writes the telemetry data to a local file). + +The configuration allows to have multiple exporters of the same type, even in the same pipeline. For example one can have 2 “opencensus” exporters defined each one sending to a different opencensus endpoint, e.g.: + +```yaml +exporters: + opencensus/1: + endpoint: "example.com:14250" + opencensus/2: + endpoint: "0.0.0.0:14250" +``` + +Usually an exporter gets the data from one pipeline, however it is possible to configure multiple pipelines to send data to the same exporter, e.g.: + +```yaml +exporters: + jaeger: + protocols: + grpc: + endpoint: "0.0.0.0:14250" + +service: + pipelines: + traces: # a pipeline of “traces” type + receivers: [zipkin] + processors: [tags, tail_sampling, batch, queued_retry] + exporters: [jaeger] + traces/2: # another pipeline of “traces” type + receivers: [opencensus] + processors: [batch] + exporters: [jaeger] +``` + +In the above example “jaeger” exporter will get data from pipeline “traces” and from pipeline “traces/2”. When the Collector loads this config the result will look like this (part of processors and receivers are omitted from the diagram for brevity): + +![Exporters](images/design-exporters.png) + +### Processors + +A pipeline can contain sequentially connected processors. The first processor gets the data from one or more receivers that are configured for the pipeline, the last processor sends the data to one or more exporters that are configured for the pipeline. All processors between the first and last receive the data strictly only from one preceding processor and send data strictly only to the succeeding processor. + +Processors can transform the data before forwarding it (i.e. add or remove attributes from spans), they can drop the data simply by deciding not to forward it (this is for example how “sampling” processor works), they can also generate new data (this is how for example how a “persistent-queue” processor can work after Collector restarts by reading previously saved data from a local file and forwarding it on the pipeline). + +The same name of the processor can be referenced in the “processors” key of multiple pipelines. In this case the same configuration will be used for each of these processors however each pipeline will always gets its own instance of the processor. Each of these processors will have its own state, the processors are never shared between pipelines. For example if “queued_retry” processor is used several pipelines each pipeline will have its own queue (although the queues will be configured exactly the same way if the reference the same key in the config file). As an example, given the following config: + +```yaml +processors: + queued_retry: + size: 50 + per-exporter: true + enabled: true + +service: + pipelines: + traces: # a pipeline of “traces” type + receivers: [zipkin] + processors: [queued_retry] + exporters: [jaeger] + traces/2: # another pipeline of “traces” type + receivers: [opencensus] + processors: [queued_retry] + exporters: [opencensus] +``` + +When the Collector loads this config the result will look like this: + +![Processors](images/design-processors.png) + +Note that each “queued_retry” processor is an independent instance, although both are configured the same way, i.e. each have a size of 50. + +## Running as an Agent + +On a typical VM/container, there are user applications running in some +processes/pods with OpenTelemetry Library (Library). Previously, Library did +all the recording, collecting, sampling and aggregation on spans/stats/metrics, +and exported them to other persistent storage backends via the Library +exporters, or displayed them on local zpages. This pattern has several +drawbacks, for example: + +1. For each OpenTelemetry Library, exporters/zpages need to be re-implemented + in native languages. +2. In some programming languages (e.g Ruby, PHP), it is difficult to do the + stats aggregation in process. +3. To enable exporting OpenTelemetry spans/stats/metrics, application users + need to manually add library exporters and redeploy their binaries. This is + especially difficult when there’s already an incident and users want to use + OpenTelemetry to investigate what’s going on right away. +4. Application users need to take the responsibility in configuring and + initializing exporters. This is error-prone (e.g they may not set up the + correct credentials\monitored resources), and users may be reluctant to + “pollute” their code with OpenTelemetry. + +To resolve the issues above, you can run OpenTelemetry Collector as an Agent. +The Agent runs as a daemon in the VM/container and can be deployed independent +of Library. Once Agent is deployed and running, it should be able to retrieve +spans/stats/metrics from Library, export them to other backends. We MAY also +give Agent the ability to push configurations (e.g sampling probability) to +Library. For those languages that cannot do stats aggregation in process, they +should also be able to send raw measurements and have Agent do the aggregation. + +TODO: update the diagram below. + +![agent-architecture](https://user-images.githubusercontent.com/10536136/48792454-2a69b900-eca9-11e8-96eb-c65b2b1e4e83.png) + +For developers/maintainers of other libraries: Agent can also +accept spans/stats/metrics from other tracing/monitoring libraries, such as +Zipkin, Prometheus, etc. This is done by adding specific receivers. See +[Receivers](#receivers) for details. + +## Running as a Standalone Collector + +The OpenTelemetry Collector can run as a Standalone instance and receives spans +and metrics exported by one or more Agents or Libraries, or by +tasks/agents that emit in one of the supported protocols. The Collector is +configured to send data to the configured exporter(s). The following figure +summarizes the deployment architecture: + +TODO: update the diagram below. + +![OpenTelemetry Collector Architecture](https://user-images.githubusercontent.com/10536136/46637070-65f05f80-cb0f-11e8-96e6-bc56468486b3.png "OpenTelemetry Collector Architecture") + +The OpenTelemetry Collector can also be deployed in other configurations, such +as receiving data from other agents or clients in one of the formats supported +by its receivers. + + +### OpenCensus Protocol + +TODO: move this section somewhere else since this document is intended to describe non-protocol specific functionality. + +OpenCensus Protocol uses a bi-directional gRPC +stream. Sender should initiate the connection, since there’s only one +dedicated port for Agent, while there could be multiple instrumented processes. By default, the Collector is available on port 55678. + +#### Protocol Workflow + +1. Sender will try to directly establish connections for Config and Export + streams. +2. As the first message in each stream, Sender must send its identifier. Each + identifier should uniquely identify Sender within the VM/container. If + there is no identifier in the first message, Collector should drop the whole + message and return an error to the client. In addition, the first message + MAY contain additional data (such as `Span`s). As long as it has a valid + identifier associated, Collector should handle the data properly, as if they + were sent in a subsequent message. Identifier is no longer needed once the + streams are established. +3. On Sender side, if connection to Collector failed, Sender should retry + indefinitely if possible, subject to available/configured memory buffer size. + (Reason: consider environments where the running applications are already + instrumented with OpenTelemetry Library but Collector is not deployed yet. + Sometime in the future, we can simply roll out the Collector to those + environments and Library would automatically connect to Collector with + indefinite retries. Zero changes are required to the applications.) + Depending on the language and implementation, retry can be done in either + background or a daemon thread. Retry should be performed at a fixed + frequency (rather than exponential backoff) to have a deterministic expected + connect time. +4. On Collector side, if an established stream were disconnected, the identifier of + the corresponding Sender would be considered expired. Sender needs to + start a new connection with a unique identifier (MAY be different than the + previous one). diff --git a/internal/otel_collector/docs/images/design-exporters.png b/internal/otel_collector/docs/images/design-exporters.png new file mode 100644 index 00000000000..0904403388e Binary files /dev/null and b/internal/otel_collector/docs/images/design-exporters.png differ diff --git a/internal/otel_collector/docs/images/design-pipelines.png b/internal/otel_collector/docs/images/design-pipelines.png new file mode 100644 index 00000000000..1b58d7fc0d9 Binary files /dev/null and b/internal/otel_collector/docs/images/design-pipelines.png differ diff --git a/internal/otel_collector/docs/images/design-processors.png b/internal/otel_collector/docs/images/design-processors.png new file mode 100644 index 00000000000..55fe128568d Binary files /dev/null and b/internal/otel_collector/docs/images/design-processors.png differ diff --git a/internal/otel_collector/docs/images/design-receivers.png b/internal/otel_collector/docs/images/design-receivers.png new file mode 100644 index 00000000000..dc22432c82a Binary files /dev/null and b/internal/otel_collector/docs/images/design-receivers.png differ diff --git a/internal/otel_collector/docs/images/design-service-lifecycle.png b/internal/otel_collector/docs/images/design-service-lifecycle.png new file mode 100644 index 00000000000..ad55af383f0 Binary files /dev/null and b/internal/otel_collector/docs/images/design-service-lifecycle.png differ diff --git a/internal/otel_collector/docs/images/opentelemetry-service-deployment-models.png b/internal/otel_collector/docs/images/opentelemetry-service-deployment-models.png new file mode 100644 index 00000000000..b977c0de015 Binary files /dev/null and b/internal/otel_collector/docs/images/opentelemetry-service-deployment-models.png differ diff --git a/internal/otel_collector/docs/images/zpages-example.png b/internal/otel_collector/docs/images/zpages-example.png new file mode 100644 index 00000000000..168004dd6fc Binary files /dev/null and b/internal/otel_collector/docs/images/zpages-example.png differ diff --git a/internal/otel_collector/docs/metric-metadata.md b/internal/otel_collector/docs/metric-metadata.md new file mode 100644 index 00000000000..03b5d6dee20 --- /dev/null +++ b/internal/otel_collector/docs/metric-metadata.md @@ -0,0 +1,19 @@ +# Metric Receiver Metadata + +Receivers can contain a `metadata.yaml` file that documents the metrics that may be emitted by the receiver. + +Current examples: + +* [hostmetricsreceiver](../receiver/hostmetricsreceiver/metadata.yaml) + +See [metric-metadata.yaml](metric-metadata.yaml) for file format documentation. + +If adding a new receiver a `codegen.go` file should also be added to trigger the generation. See below for details. + +## Build + +When `go generate` is run (it is run automatically in the make build targets) there are a few special build directives in `codegen.go` files: + +`make install-tools` results in `cmd/mdatagen` being installed to `GOBIN` + +[/receiver/hostmetricsreceiver/codegen.go](../receiver/hostmetricsreceiver/codegen.go) Runs `mdatagen` for the `hostmetricsreceiver` metadata.yaml which generates [/receiver/hostmetricsreceiver/internal/metadata](../receiver/hostmetricsreceiver/internal/metadata) package which has Go files containing metric and label metadata. diff --git a/internal/otel_collector/docs/metric-metadata.yaml b/internal/otel_collector/docs/metric-metadata.yaml new file mode 100644 index 00000000000..e469e3d64ba --- /dev/null +++ b/internal/otel_collector/docs/metric-metadata.yaml @@ -0,0 +1,33 @@ +# Required: name of the receiver. +name: + +# Optional: map of label definitions with the key being the label name and value +# being described below. +labels: + label.name: + # Optional: if the label name as described by the key is not the actual label + # value to be reported that value can be overridden here. + value: + # Required: description of the label. + description: + # Optional: array of label values if they are static values. + enum: + +# Required: map of metric names with the key being the metric name and value +# being described below. +metrics: + metric.name: + # Required: metric description + description: + # Required: metric type as defined by https://ucum.org/ucum.html + unit: + # Required + data: + # Required: one of int gauge, int sum, int histogram, double gauge, double sum, or double histogram. + type: + # Required for int sum and double sum. + monotonic: # true | false + # Required for int sum, int histogram, double sum, and double histogram. + aggregation: # delta | cumulative + # Optional: array of labels that were defined in the labels section that are emitted by this metric. + labels: diff --git a/internal/otel_collector/docs/migrating-from-opencensus.md b/internal/otel_collector/docs/migrating-from-opencensus.md new file mode 100644 index 00000000000..f8a3a952e44 --- /dev/null +++ b/internal/otel_collector/docs/migrating-from-opencensus.md @@ -0,0 +1,47 @@ +## Action Plan for Bootstraping from OpenCensus + +### Goals +We need to bootstrap the OpenTelemetry Collector using the existing OpenCensus Service codebase. We agreed to split the Service codebase into 2 parts: core and contrib. This bootstrapping is a good opportunity to do the splitting by only including in the OpenTelemetry Collector core the minimum number of receivers and exporters and moving the rest of functionality to a contrib package (most vendor-specific code). + +The contrib package and vendor-specific receivers and exporters will continue to be available and there is no intent to retire it. The intent is to have a clear decoupling in the codebase that facilitates independent contribution of new components in the future, allows to easily create customized versions of a Service and makes it clear that core contributors will be responsible for maintenance of the core while vendor-specific components will be maintained by corresponding vendors (note: this does not exclude dual participation at all - some developers will likely work for vendors and will also be core maintainers). + +# Migration Tasks + +This is the action plan that also shows the progress. Tick the boxes after the task is complete. + +[X] Copy all commits from https://github.com/census-instrumentation/opencensus-service to https://github.com/open-telemetry/opentelemetry-service +Make sure commit history is preserved. + +[X] Remove receivers and exporters that are not part of core. We will keep the following in the core: + +- Prometheus +- Jaeger (agent and collector ones) +- Zipkin +- OpenCensus, temporarily until OpenTelemetry one is available (we may want to keep OC for longer to facilitate migrations) + +[ ] Cleanly decouple `core` from `cmd` in the repository. `core` will contain all business logic. `cmd` will be just a main.go that executes the business logic and compiles to `otsvc` executable. + +`otsvc` will will only include receivers and exporters which we consider to be part of the core. + +The new codebase will contain improvements which are already in progress and which are aimed at making the codebase extensible and enable the splitting to core and contrib. This includes 3 initiatives: + +- Decoupling of receiver and exporter implementations from the core logic. + +- Introduction of receiver and exporter factories that can be individually registered to activate them. + +- Implementation of the [new configuration format](https://docs.google.com/document/d/1NeheFG7DmcUYo_h2vLtNRlia9x5wOJMlV4QKEK05FhQ/edit#) that makes use of factories and allows for greater flexibility in the configuration. + +The functionally of the new `otsvc` will heavily lean on existing implementation and will be mostly a superset of the current agent/collector functionality when considering core receivers and exporters only (however we will allow deviations if it saves significant implementation effort and makes the service better). + +[ ] Provide guidelines and example implementations for vendors to follow when they add new receivers and exporters to the contrib package. + +[ ] Create a new repository for contrib and copy all commits from https://github.com/census-instrumentation/opencensus-service to https://github.com/open-telemetry/opentelemetry-service +Make sure commit history is preserved. + +[ ] Cleanup the `contrib` repo to only contain additional vendor specific receivers and exporters. + +(Note: alternatively `contrib` can be a directory in the main repo - this is still open for discussion). + +[ ] Provide OpenCensus-to-OpenTelemetry Collector migration guidelines for end-users who want to migrate. This will include recommendations on configuration file migration. We will also consider the possibility to support old configuration format in the new binary. + +This approach allows us to have significant progress towards 2 stated goals in our [vision document](./vision.md): unify the codebase for agent and collector and make the service more extensible. diff --git a/internal/otel_collector/docs/monitoring.md b/internal/otel_collector/docs/monitoring.md new file mode 100644 index 00000000000..d86f3f560fd --- /dev/null +++ b/internal/otel_collector/docs/monitoring.md @@ -0,0 +1,69 @@ +# Monitoring + +Many metrics are provided by the Collector for its monitoring. Below some +key recommendations for alerting and monitoring are listed. All metrics +referenced below are using the `--new-metrics` option that is enabled by +default. + +## Critical Monitoring + +### Data Loss + +Use rate of `otelcol_processor_dropped_spans > 0` and +`otelcol_processor_dropped_metric_points > 0` to detect data loss, depending on +the requirements set up a minimal time window before alerting, avoiding +notifications for small losses that are not considered outages or within the +desired reliability level. + +### Low on CPU Resources + +This depends on the CPU metrics available on the deployment, eg.: +`kube_pod_container_resource_limits_cpu_cores` for Kubernetes. Let's call it +`available_cores` below. The idea here is to have an upper bound of the number +of available cores, and the maximum expected ingestion rate considered safe, +let's call it `safe_rate`, per core. This should trigger increase of resources/ +instances (or raise an alert as appropriate) whenever +`(actual_rate/available_cores) < safe_rate`. + +The `safe_rate` depends on the specific configuration being used. +// TODO: Provide reference `safe_rate` for a few selected configurations. + +## Secondary Monitoring + +### Queue Length + +The `queued_retry` processor is recommended as the retry mechanism for the +Collector and as such should be used in any production deployment. +The `queued_retry` processor provides the +`otelcol_processor_queued_retry_queue_length` metric, besides others. +When this metric is growing constantly it is an indication that the Collector +is not able to send data as fast as it is receiving. +This will precede data loss and also can indicate a Collector low on resources. + +### Receive Failures + +Sustained rates of `otelcol_receiver_refused_spans` and +`otelcol_receiver_refused_metric_points` indicate too many errors returned to +clients. Depending on the deployment and the client’s resilience this may +indicate data loss at the clients. + +Sustained rates of `otelcol_exporter_send_failed_spans` and +`otelcol_exporter_send_failed_metric_points` indicate that the Collector is not +able to export data as expected. +It doesn't imply data loss per se since there could be retries but a high rate +of failures could indicate issues with the network or backend receiving the +data. + +## Data Flow + +### Data Ingress + +The `otelcol_receiver_accepted_spans` and +`otelcol_receiver_accepted_metric_points` metrics provide information about +the data ingested by the Collector. + +### Data Egress + +The `otecol_exporter_sent_spans` and +`otelcol_exporter_sent_metric_points`metrics provide information about +the data exported by the Collector. diff --git a/internal/otel_collector/docs/observability.md b/internal/otel_collector/docs/observability.md new file mode 100644 index 00000000000..bf33f082e5f --- /dev/null +++ b/internal/otel_collector/docs/observability.md @@ -0,0 +1,91 @@ +# OpenTelemetry Collector Observability + +## Goal + +The goal of this document is to have a comprehensive description of observability of the Collector and changes needed to achieve observability part of our [vision](vision.md). + +## What Needs Observation + +The following elements of the Collector need to be observable. + +### Current Values + +- Resource consumption: CPU, RAM (in the future also IO - if we implement persistent queues) and any other metrics that may be available to Go apps (e.g. garbage size, etc). + +- Receiving data rate, broken down by receivers and by data type (traces/metrics). + +- Exporting data rate, broken down by exporters and by data type (traces/metrics). + +- Data drop rate due to throttling, broken down by data type. + +- Data drop rate due to invalid data received, broken down by data type. + +- Current throttling state: Not Throttled/Throttled by Downstream/Internally Saturated. + +- Incoming connection count, broken down by receiver. + +- Incoming connection rate (new connections per second), broken down by receiver. + +- In-memory queue size (in bytes and in units). Note: measurements in bytes may be difficult / expensive to obtain and should be used cautiously. + +- Persistent queue size (when supported). + +- End-to-end latency (from receiver input to exporter output). Note that with multiple receivers/exporters we potentially have NxM data paths, each with different latency (plus different pipelines in the future), so realistically we should likely expose the average of all data paths (perhaps broken down by pipeline). + +- Latency broken down by pipeline elements (including exporter network roundtrip latency for request/response protocols). + +“Rate” values must reflect the average rate of the last 10 seconds. Rates must exposed in bytes/sec and units/sec (e.g. spans/sec). + +Note: some of the current values and rates may be calculated as derivatives of cumulative values in the backend, so it is an open question if we want to expose them separately or no. + +### Cumulative Values + +- Total received data, broken down by receivers and by data type (traces/metrics). + +- Total exported data, broken down by exporters and by data type (traces/metrics). + +- Total dropped data due to throttling, broken down by data type. + +- Total dropped data due to invalid data received, broken down by data type. + +- Total incoming connection count, broken down by receiver. + +- Uptime since start. + +### Trace or Log on Events + +We want to generate the following events (log and/or send as a trace with additional data): + +- Collector started/stopped. + +- Collector reconfigured (if we support on-the-fly reconfiguration). + +- Begin dropping due to throttling (include throttling reason, e.g. local saturation, downstream saturation, downstream unavailable, etc). + +- Stop dropping due to throttling. + +- Begin dropping due to invalid data (include sample/first invalid data). + +- Stop dropping due to invalid data. + +- Crash detected (differentiate clean stopping and crash, possibly include crash data if available). + +For begin/stop events we need to define an appropriate hysteresis to avoid generating too many events. Note that begin/stop events cannot be detected in the backend simply as derivatives of current rates, the events include additional data that is not present in the current value. + +### Host Metrics + +The service should collect host resource metrics in addition to service's own process metrics. This may help to understand that the problem that we observe in the service is induced by a different process on the same host. + +## How We Expose Metrics/Traces + +Collector configuration must allow specifying the target for own metrics/traces (which can be different from the target of collected data). The metrics and traces must be clearly tagged to indicate that they are service’s own metrics (to avoid conflating with collected data in the backend). + +### Impact + +We need to be able to assess the impact of these observability improvements on the core performance of the Collector. + +### Configurable Level of Observability + +Some of the metrics/traces can be high volume and may not be desirable to always observe. We should consider adding an observability verboseness “level” that allows configuring the Collector to send more or less observability data (or even finer granularity to allow turning on/off specific metrics). + +The default level of observability must be defined in a way that has insignificant performance impact on the service. diff --git a/internal/otel_collector/docs/performance.md b/internal/otel_collector/docs/performance.md new file mode 100644 index 00000000000..98901196cd0 --- /dev/null +++ b/internal/otel_collector/docs/performance.md @@ -0,0 +1,72 @@ +# OpenTelemetry Collector Performance + +The performance numbers that follow were generated using version 0.1.3 of the +OpenTelemetry Collector, are applicable primarily to the OpenTelemetry Collector and +are measured only for traces. In the future, more configurations will be tested. + +Note with the OpenTelemetry Agent you can expect as good if not better performance +with lower resource utilization. This is because the OpenTelemetry Agent does not +today support features such as batching or retries and will not support +tail_sampling. + +It is important to note that the performance of the OpenTelemetry Collector depends +on a variety of factors including: + +* The receiving format: OpenTelemetry (55678), Jaeger thrift (14268) or Zipkin v2 JSON (9411) +* The size of the spans (tests are based on number of attributes): 20 +* Whether tail_sampling is enabled or not +* CPU / Memory allocation +* Operating System: Linux + +## Testing + +Testing was completed on Linux using the [Synthetic Load Generator +utility](https://github.com/Omnition/synthetic-load-generator) running for a +minimum of one hour (i.e. sustained rate). You can be reproduce these results in +your own environment using the parameters described in this document. It is +important to note that this utility has a few configurable parameters which can +impact the results of the tests. The parameters used are defined below. + +* FlushInterval(ms) [default: 1000] +* MaxQueueSize [default: 100] +* SubmissionRate(spans/sec): 100,000 + +## Results without tail-based sampling + +| Span
Format | CPU
(2+ GHz) | RAM
(GB) | Sustained
Rate | Recommended
Maximum | +| :---: | :---: | :---: | :---: | :---: | +| OpenTelemetry | 1 | 2 | ~12K | 10K | +| OpenTelemetry | 2 | 4 | ~24K | 20K | +| Jaeger Thrift | 1 | 2 | ~14K | 12K | +| Jaeger Thrift | 2 | 4 | ~27.5K | 24K | +| Zipkin v2 JSON | 1 | 2 | ~10.5K | 9K | +| Zipkin v2 JSON | 2 | 4 | ~22K | 18K | + +If you are NOT using tail-based sampling and you need higher rates then you can +either: + +* Divide traffic to different collector (e.g. by region) +* Scale-up by adding more resources (CPU/RAM) +* Scale-out by putting one or more collectors behind a load balancer or k8s +service + +## Results with tail-based sampling + +> Note: Additional memory is required for tail-based sampling + +| Span
Format | CPU
(2+ GHz) | RAM
(GB) | Sustained
Rate | Recommended
Maximum | +| :---: | :---: | :---: | :---: | :---: | +| OpenTelemetry | 1 | 2 | ~9K | 8K | +| OpenTelemetry | 2 | 4 | ~18K | 16K | +| Jaeger Thrift | 1 | 6 | ~11.5K | 10K | +| Jaeger Thrift | 2 | 8 | ~23K | 20K | +| Zipkin v2 JSON | 1 | 6 | ~8.5K | 7K | +| Zipkin v2 JSON | 2 | 8 | ~16K | 14K | + +If you are using tail-based sampling and you need higher rates then you can +either: + +* Scale-up by adding more resources (CPU/RAM) +* Scale-out by putting one or more collectors behind a load balancer or k8s +service, but the load balancer must support traceID-based routing (i.e. all +spans for a given traceID need to be received by the same collector instance) diff --git a/internal/otel_collector/docs/release.md b/internal/otel_collector/docs/release.md new file mode 100644 index 00000000000..092c031b722 --- /dev/null +++ b/internal/otel_collector/docs/release.md @@ -0,0 +1,39 @@ +# OpenTelemetry Collector Release Procedure + +Collector build and testing is currently fully automated. However there are still certain operations that need to be performed manually in order to make a release. + +We release both core and contrib collectors with the same versions where the contrib release uses the core release as a dependency. We’ve divided this process into two sections. A release engineer must first release the Core collector and then the Contrib collector. + +Important: Note that you’ll need to be able to sign git commits/tags in order to be able to release a collector version. Follow [this guide](https://docs.github.com/en/github/authenticating-to-github/signing-commits) to setup it up. + +Note: You’ll need to be an approver for both the repos in order to be able to make the release. This is required as you’ll need to push tags and commits directly to the upstream repo. + +## Releasing OpenTelemetry Core + +1. Update Contrib to use the latest in development version of Core. Run `make update-otel` in Contrib root directory and if it results in any changes, submit a PR. Get the PR approved and merged. This is to ensure that the latest core does not break contrib in any way. We’ll update it once more to the final release number later. Make sure contrib builds and end-to-end tests pass successfully after being merged and -dev docker images are published. + +1. Determine the version number that will be assigned to the release. Collector uses semver, with the exception that while we are still in Beta stage breaking changes are allowed without incrementing major version number. For breaking changes we increment the minor version number and set the patch number to 0. + +1. Prepare Core for release. Update CHANGELOG.md file and rename the Unreleased section to the new release name. Add a new unreleased section at top. + + Use commit history feature to get the list of commits since the last release to help understand what should be in the release notes, e.g.: https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/${last_release}...master. Submit a PR with the changes and get the PR approved and merged. + +1. Make sure the current master branch build successfully passes (Core and Contrib). For Contrib also check that the spawn-stability-tests-job triggered by the main build-publish job also passes. Check that the corresponding "-dev" images exist in Dockerhub (Core and Contrib). + +1. Create a branch named release/ (e.g. release/v0.4.x) in Core from the changelog update commit and push to origin (not your fork). Wait for the release branch builds to pass successfully. + +1. Tag all the modules with the new release version by running the `make add-tag` command (e.g. `make add-tag TAG=v0.4.0`). Push them to origin (not your fork) with `git push --tags origin` (assuming origin refers to upstream open-telemetry project). Wait for the new tag build to pass successfully. This build will push new docker images to https://hub.docker.com/repository/docker/otel/opentelemetry-collector, create a Github release for the tag and push all the build artifacts to the Github release. + +1. Edit the newly auto-created Github release and copy release notes from the CHANGELOG.md file to the release. This step should be automated. CI can pull the release notes from the change log and use it as the body when creating the new release. + +## Releasing OpenTelemetry Contrib + +1. Prepare Contrib for release. Update CHANGELOG.md file and rename the Unreleased section to the new release name. Add a new unreleased section at top. Refer to Core release notes (assuming the previous release of Core and Contrib was also performed simultaneously), and in addition to that list changes that happened in the Contrib repo. + +1. Update the Core dependency to the Core version we just released with `make update-otel` command, e.g, `make update-otel OTEL_VERSION=v0.4.0`. Create a PR with both the changes, get it approved and merged. + +1. Create a branch named release/ (e.g. release/v0.4.x) in Core from the changelog update commit and push to origin (not your fork). Wait for the release branch builds to pass successfully. + +1. Tag all the modules with the new release version by running the `make add-tag` command (e.g. `make add-tag TAG=v0.4.0`). Push them to origin (not your fork) with `git push --tags origin` (assuming origin refers to upstream open-telemetry project). Wait for the new tag build to pass successfully. This build will push new docker images to https://hub.docker.com/repository/docker/otel/opentelemetry-collector-contrib, create a Github release for the tag and push all the build artifacts to the Github release. + +1. Edit the newly auto-created Github release and copy release notes from the CHANGELOG.md file to the release. This step should be automated. CI can pull the release notes from the change log and use it as the body when creating the new release. diff --git a/internal/otel_collector/docs/roadmap.md b/internal/otel_collector/docs/roadmap.md new file mode 100644 index 00000000000..7583ca36943 --- /dev/null +++ b/internal/otel_collector/docs/roadmap.md @@ -0,0 +1,27 @@ +# Long-term Roadmap + +This long-term roadmap (draft) is a vision document that reflects our +current desires. It is not a commitment to implement everything listed in this roadmap. +The primary purpose of this document is to ensure that all contributors work in alignment. +As our vision changes over time, maintainers reserve the right to add, modify, and _remove_ +items from this roadmap. + +Description|Status|Links| +-----------|------|-----| +**Testing**| +Metrics correctness tests|In progress|[#652](https://github.com/open-telemetry/opentelemetry-collector/issues/652) +| | +**New Formats**| +Complete OTLP/HTTP support| |[#882](https://github.com/open-telemetry/opentelemetry-collector/issues/882) +Add logs support for all primary core processors (attributes, batch, k8s_tagger, etc)|In progress| +| | +**5 Min to Value**| +Distribution packages for most common targets (e.g. Docker, RPM, Windows, etc)| +Detection and collection of environment metrics and tags on AWS|| +Detection and collection of k8s telemetry|In progress| +Host metric collection|In progress| +Support more application-specific metric collection (e.g. Kafka, Hadoop, etc) +| | +**Other Features**| +Graceful shutdown (pipeline draining)| |[#483](https://github.com/open-telemetry/opentelemetry-collector/issues/483) +Deprecate queue retry processor and enable queuing per exporter by default||[#1721](https://github.com/open-telemetry/opentelemetry-collector/issues/1721) diff --git a/internal/otel_collector/docs/service-extensions.md b/internal/otel_collector/docs/service-extensions.md new file mode 100644 index 00000000000..1ae5b9c4ca9 --- /dev/null +++ b/internal/otel_collector/docs/service-extensions.md @@ -0,0 +1,146 @@ +# OpenTelemetry Collector: Extensions + +Besides the pipeline elements (receivers, processors, and exporters) the Collector +uses various service extensions (e.g.: healthcheck, z-pages, etc). +This document describes the “extensions” design and how they are implemented. + +## Configuration and Interface + +The configuration follows the same pattern used for pipelines: a base +configuration type and the creation of factories to instantiate the extension +objects. + +In order to support generic service extensions an interface is defined +so the service can interact uniformly with these. At minimum service extensions +need to implement the interface that covers Start and Shutdown. + +In addition to this base interface there is support to notify extensions when +pipelines are “ready” and when they are about to be stopped, i.e.: “not ready” +to receive data. These are a necessary addition to allow implementing extensions +that indicate to LBs and external systems if the service instance is ready or +not to receive data +(e.g.: a [k8s readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#define-readiness-probes)). +These state changes are under the control of the service application hosting +the extensions. + +There are more complex scenarios in which there can be notifications of state +changes from the extensions to their host. These more complex cases are not +supported at this moment, but this design doesn’t prevent such extensions in the +future[^1]. + + +## Collector State and Extensions + +The diagram below shows the basic state transitions of the OpenTelemetry Collector +and how it will interact with the service extensions. + +![ServiceLifeCycle](images/design-service-lifecycle.png) + + +## Configuration + +The config package will be extended to load the service extensions when the +configuration is loaded. The settings for service extensions will live in the +same configuration file as the pipeline elements. Below is an example of how +these sections would look like in the configuration file: + +```yaml + +# Example of the extensions available with the core Collector. The list below +# includes all configurable options and their respective default value. +extensions: + health_check: + port: 13133 + pprof: + endpoint: "localhost:1777" + block_profile_fraction: 0 + mutex_profile_fraction: 0 + zpages: + endpoint: "localhost:55679" + +# The service lists extensions not directly related to data pipelines, but used +# by the service. +service: + # extensions lists the extensions added to the service. They are started + # in the order presented below and stopped in the reverse order. + extensions: [health_check, pprof, zpages] +``` + +The configuration base type does not share any common fields. + +The configuration, analogous to pipelines, allows to have multiple extensions of +the same type. Implementers of extensions need to take care to return error +if it can only execute a single instance. (Note: the configuration uses composite +key names in the form of `type[/name]` +as defined in this [this document](https://docs.google.com/document/d/1NeheFG7DmcUYo_h2vLtNRlia9x5wOJMlV4QKEK05FhQ/edit#)). + +The factory follows the same pattern established for pipeline configuration: + +```go +// Factory is a factory interface for extensions to the service. +type Factory interface { + // Type gets the type of the extension created by this factory. + Type() string + + // CreateDefaultConfig creates the default configuration for the extension. + CreateDefaultConfig() configmodels.Extension + + // CreateExtension creates a service extension based on the given config. + CreateExtension(logger *zap.Logger, cfg configmodels.Extension) (ServiceExtension, error) +} +``` + + +## Extension Interface + +The interface defined below is the minimum required for +extensions in use on the service: + +```go +// ServiceExtension is the interface for objects hosted by the OpenTelemetry Collector that +// don't participate directly on data pipelines but provide some functionality +// to the service, examples: health check endpoint, z-pages, etc. +type ServiceExtension interface { + // Start the ServiceExtension object hosted by the given host. At this point in the + // process life-cycle the receivers are not started and the host did not + // receive any data yet. + Start(host Host) error + + // Shutdown the ServiceExtension instance. This happens after the pipelines were + // shutdown. + Shutdown() error +} + +// PipelineWatcher is an extra interface for ServiceExtension hosted by the OpenTelemetry +// Collector that is to be implemented by extensions interested in changes to pipeline +// states. Typically this will be used by extensions that change their behavior if data is +// being ingested or not, e.g.: a k8s readiness probe. +type PipelineWatcher interface { + // Ready notifies the ServiceExtension that all pipelines were built and the + // receivers were started, i.e.: the service is ready to receive data + // (notice that it may already have received data when this method is called). + Ready() error + + // NotReady notifies the ServiceExtension that all receivers are about to be stopped, + // i.e.: pipeline receivers will not accept new data. + // This is sent before receivers are stopped, so the ServiceExtension can take any + // appropriate action before that happens. + NotReady() error +} + +// Host represents the entity where the extension is being hosted. +// It is used to allow communication between the extension and its host. +type Host interface { + // ReportFatalError is used to report to the host that the extension + // encountered a fatal error (i.e.: an error that the instance can't recover + // from) after its start function had already returned. + ReportFatalError(err error) +} +``` + +## Notes + +[^1]: + This can be done by adding specific interfaces to extension types that support + those and having the service checking which of the extension instances support + each interface. diff --git a/internal/otel_collector/docs/troubleshooting.md b/internal/otel_collector/docs/troubleshooting.md new file mode 100644 index 00000000000..465695a8682 --- /dev/null +++ b/internal/otel_collector/docs/troubleshooting.md @@ -0,0 +1,260 @@ +# Troubleshooting + +## Observability + +The Collector offers multiple ways to measure the health of the Collector +as well as investigate issues. + +### Logs + +Logs can be helpful in identifying issues. Always start by checking the log +output and looking for potential issues. + +The verbosity level, which defaults to `INFO` can also be adjusted by passing +the `--log-level` flag to the `otelcol` process. See `--help` for more details. + +```bash +$ otelcol --log-level DEBUG +``` + +### Metrics + +Prometheus metrics are exposed locally on port `8888` and path `/metrics`. + +For containerized environments it may be desirable to expose this port on a +public interface instead of just locally. The metrics address can be configured +by passing the `--metrics-addr` flag to the `otelcol` process. See `--help` for +more details. + +```bash +$ otelcol --metrics-addr 0.0.0.0:8888 +``` + +A grafana dashboard for these metrics can be found +[here](https://grafana.com/grafana/dashboards/11575). + +Also note that a Collector can be configured to scrape its own metrics and send +it through configured pipelines. For example: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'otelcol' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: '.*grpc_io.*' + action: drop +exporters: + logging: +service: + pipelines: + metrics: + receivers: [prometheus] + processors: [] + exporters: [logging] +``` + +### zPages + +The +[zpages](https://github.com/open-telemetry/opentelemetry-collector/tree/master/extension/zpagesextension/README.md) +extension, which if enabled is exposed locally on port `55679`, can be used to +check receivers and exporters trace operations via `/debug/tracez`. `zpages` +may contain error logs that the Collector does not emit. + +For containerized environments it may be desirable to expose this port on a +public interface instead of just locally. This can be configured via the +extensions configuration section. For example: + +```yaml +extensions: + zpages: + endpoint: 0.0.0.0:55679 +``` + +### Local exporters + +[Local +exporters](https://github.com/open-telemetry/opentelemetry-collector/tree/master/exporter#general-information) +can be configured to inspect the data being processed by the Collector. + +For live troubleshooting purposes consider leveraging the `logging` exporter, +which can be used to confirm that data is being received, processed and +exported by the Collector. + +```yaml +receivers: + zipkin: +exporters: + logging: +service: + pipelines: + traces: + receivers: [zipkin] + processors: [] + exporters: [logging] +``` + +Get a Zipkin payload to test. For example create a file called `trace.json` +that contains: + +```json +[ + { + "traceId": "5982fe77008310cc80f1da5e10147519", + "parentId": "90394f6bcffb5d13", + "id": "67fae42571535f60", + "kind": "SERVER", + "name": "/m/n/2.6.1", + "timestamp": 1516781775726000, + "duration": 26000, + "localEndpoint": { + "serviceName": "api" + }, + "remoteEndpoint": { + "serviceName": "apip" + }, + "tags": { + "data.http_response_code": "201", + } + } +] +``` + +With the Collector running, send this payload to the Collector. For example: + +```bash +$ curl -X POST localhost:9411/api/v2/spans -H'Content-Type: application/json' -d @trace.json +``` + +You should see a log entry like the following from the Collector: + +```json +2020-11-11T04:12:33.089Z INFO loggingexporter/logging_exporter.go:296 TraceExporter {"#spans": 1} +``` + +You can also configure the `logging` exporter so the entire payload is printed: + +```yaml +exporters: + logging: + loglevel: debug +``` + +With the modified configuration if you re-run the test above the log output should look like: + +```json +2020-11-11T04:08:17.344Z DEBUG loggingexporter/logging_exporter.go:353 ResourceSpans #0 +Resource labels: + -> service.name: STRING(api) +InstrumentationLibrarySpans #0 +Span #0 + Trace ID : 5982fe77008310cc80f1da5e10147519 + Parent ID : 90394f6bcffb5d13 + ID : 67fae42571535f60 + Name : /m/n/2.6.1 + Kind : SPAN_KIND_SERVER + Start time : 2018-01-24 08:16:15.726 +0000 UTC + End time : 2018-01-24 08:16:15.752 +0000 UTC +Attributes: + -> data.http_response_code: STRING(201) +``` + +### Health Check + +The +[health_check](https://github.com/open-telemetry/opentelemetry-collector/tree/master/extension/healthcheckextension/README.md) +extension, which by default is available on all interfaces on port `13133`, can +be used to ensure the Collector is functioning properly. + +```yaml +extensions: + health_check: +service: + extensions: [health_check] +``` + +It returns a response like the following: + +```json +{"status":"Server available","upSince":"2020-11-11T04:12:31.6847174Z","uptime":"49.0132518s"} +``` + +### pprof + +The +[pprof](https://github.com/open-telemetry/opentelemetry-collector/tree/master/extension/pprofextension/README.md) +extension, which by default is available locally on port `1777`, allows you to profile the +Collector as it runs. This is an advanced use-case that should not be needed in most circumstances. + +## Common Issues + +### Collector exit/restart + +The Collector may exit/restart because: + +- Memory pressure due to missing or misconfigured + [memory_limiter](https://github.com/open-telemetry/opentelemetry-collector/blob/master/processor/memorylimiter/README.md) + processor. +- [Improperly sized](https://github.com/open-telemetry/opentelemetry-collector/blob/master/docs/performance.md) + for load. +- Improperly configured (for example, a queue size configured higher + than available memory). +- Infrastructure resource limits (for example Kubernetes). + +### Data being dropped + +Data may be dropped for a variety of reasons, but most commonly because of an: + +- [Improperly sized Collector](https://github.com/open-telemetry/opentelemetry-collector/blob/master/docs/performance.md) resulting in Collector being unable to process and export the data as fast as it is received. +- Exporter destination unavailable or accepting the data too slowly. + +To mitigate drops, it is highly recommended to configure the +[batch](https://github.com/open-telemetry/opentelemetry-collector/blob/master/processor/batchprocessor/README.md) +processor. In addition, it may be necessary to configure the [queued retry +options](https://github.com/open-telemetry/opentelemetry-collector/tree/master/exporter/exporterhelper#configuration) +on enabled exporters. + +### Receiving data not working + +If you are unable to receive data then this is likely because +either: + +- There is a network configuration issue +- The receiver configuration is incorrect +- The receiver is defined in the `receivers` section, but not enabled in any `pipelines` +- The client configuration is incorrect + +Check the Collector logs as well as `zpages` for potential issues. + +### Processing data not working + +Most processing issues are a result of either a misunderstanding of how the +processor works or a misconfiguration of the processor. + +Examples of misunderstanding include: + +- The attributes processors only work for "tags" on spans. Span name is + handled by the span processor. +- Processors for trace data (except tail sampling) work on individual spans. + +### Exporting data not working + +If you are unable to export to a destination then this is likely because +either: + +- There is a network configuration issue +- The exporter configuration is incorrect +- The destination is unavailable + +Check the collector logs as well as `zpages` for potential issues. + +More often than not, exporting data does not work because of a network +configuration issue. This could be due to a firewall, DNS, or proxy +issue. Note that the Collector does have +[proxy support](https://github.com/open-telemetry/opentelemetry-collector/tree/master/exporter#proxy-support). diff --git a/internal/otel_collector/docs/vision.md b/internal/otel_collector/docs/vision.md new file mode 100644 index 00000000000..dfe9ab01934 --- /dev/null +++ b/internal/otel_collector/docs/vision.md @@ -0,0 +1,25 @@ +# OpenTelemetry Collector Long-term Vision + +The following are high-level items that define our long-term vision for OpenTelemetry Collector, what we aspire to achieve. This vision is our daily guidance when we design new features and make changes to the Collector. + +This is a living document that is expected to evolve over time. + +## Performant +Highly stable and performant under varying loads. Well-behaved under extreme load, with predictable, low resource consumption. + +## Observable +Expose own operational metrics in a clear way. Be an exemplar of observable service. Allow configuring the level of observability (more or less metrics, traces, logs, etc reported). See [more details](observability.md). + +## Multi-Data +Support traces, metrics, logs and other relevant data types. + +## Usable Out of the Box +Reasonable default configuration, supports popular protocols, runs and collects out of the box. + +## Extensible +Extensible and customizable without touching the core code. Can create custom agents based on the core and extend with own components. Welcoming 3rd party contribution policy. + +## Unified Codebase +One codebase for daemon (Agent) and standalone service (Collector). + +For more details on how we plan to achieve this vision please see the [Roadmap](roadmap.md). \ No newline at end of file diff --git a/internal/otel_collector/examples/README.md b/internal/otel_collector/examples/README.md new file mode 100644 index 00000000000..1c5da97badb --- /dev/null +++ b/internal/otel_collector/examples/README.md @@ -0,0 +1,5 @@ +# Examples + +Information on how the examples can be used can be found in the [Getting +Started +documentation](https://opentelemetry.io/docs/collector/getting-started/). diff --git a/internal/otel_collector/examples/demo/.env b/internal/otel_collector/examples/demo/.env new file mode 100644 index 00000000000..e25dbb8143a --- /dev/null +++ b/internal/otel_collector/examples/demo/.env @@ -0,0 +1,2 @@ +OTELCOL_IMG=otel/opentelemetry-collector-dev:latest +OTELCOL_ARGS= diff --git a/internal/otel_collector/examples/demo/README.md b/internal/otel_collector/examples/demo/README.md new file mode 100644 index 00000000000..a218ba3daad --- /dev/null +++ b/internal/otel_collector/examples/demo/README.md @@ -0,0 +1,49 @@ +# OpenTelemetry Collector Demo + +*IMPORTANT:* This is a pre-released version of the OpenTelemetry Collector. + +This demo presents the typical flow of observability data with multiple +OpenTelemetry Collectors deployed: + +- Applications send data directly to a Collector configured to use fewer + resources, aka the _agent_; +- The agent then forwards the data to Collector(s) that receive data from + multiple agents. Collectors on this layer typically are allowed to use more + resources and queue more data; +- The Collector then sends the data to the appropriate backend, in this demo + Jaeger, Zipkin, and Prometheus; + +This demo uses `docker-compose` and by default runs against the +`otel/opentelemetry-collector-dev:latest` image. To run the demo, switch +to the `examples/demo` folder and run: + +```shell +docker-compose up -d +``` + +The demo exposes the following backends: + +- Jaeger at http://0.0.0.0:16686 +- Zipkin at http://0.0.0.0:9411 +- Prometheus at http://0.0.0.0:9090 + +Notes: + +- It may take some time for the application metrics to appear on the Prometheus + dashboard; + +To clean up any docker container from the demo run `docker-compose down` from +the `examples/demo` folder. + +### Using a Locally Built Image +Developers interested in running a local build of the Collector need to build a +docker image using the command below: + +```shell +make docker-otelcol +``` + +And set an environment variable `OTELCOL_IMG` to `otelcol:latest` before +launching the command `docker-compose up -d`. + + diff --git a/internal/otel_collector/examples/demo/docker-compose.yaml b/internal/otel_collector/examples/demo/docker-compose.yaml new file mode 100644 index 00000000000..612a6eefaf8 --- /dev/null +++ b/internal/otel_collector/examples/demo/docker-compose.yaml @@ -0,0 +1,83 @@ +version: "2" +services: + + # Jaeger + jaeger-all-in-one: + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" + - "14268" + - "14250" + + # Zipkin + zipkin-all-in-one: + image: openzipkin/zipkin:latest + ports: + - "9411:9411" + + # Collector + otel-collector: + image: ${OTELCOL_IMG} + command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "1888:1888" # pprof extension + - "8888:8888" # Prometheus metrics exposed by the collector + - "8889:8889" # Prometheus exporter metrics + - "13133:13133" # health_check extension + - "55678" # OpenCensus receiver + - "55670:55679" # zpages extension + depends_on: + - jaeger-all-in-one + - zipkin-all-in-one + + # Agent + otel-agent: + image: ${OTELCOL_IMG} + command: ["--config=/etc/otel-agent-config.yaml", "${OTELCOL_ARGS}"] + volumes: + - ./otel-agent-config.yaml:/etc/otel-agent-config.yaml + ports: + - "1777:1777" # pprof extension + - "8887:8888" # Prometheus metrics exposed by the agent + - "14268" # Jaeger receiver + - "55678" # OpenCensus receiver + - "55679:55679" # zpages extension + - "13133" # health_check + depends_on: + - otel-collector + + # Synthetic load generators + jaeger-emitter: + image: omnition/synthetic-load-generator:1.0.25 + environment: + - JAEGER_COLLECTOR_URL=http://otel-agent:14268 + depends_on: + - otel-agent + + zipkin-emitter: + image: omnition/synthetic-load-generator:1.0.25 + environment: + - ZIPKINV2_JSON_URL=http://otel-agent:9411/api/v2/spans + depends_on: + - otel-agent + + metrics-load-generator: + image: golang:1.12.7 + volumes: + - ./app/main.go:/usr/src/main.go + environment: + - GO111MODULE=on + - OTEL_AGENT_ENDPOINT=otel-agent:55678 + command: ["bash", "-c", "go run /usr/src/main.go"] + depends_on: + - otel-agent + + prometheus: + container_name: prometheus + image: prom/prometheus:latest + volumes: + - ./prometheus.yaml:/etc/prometheus/prometheus.yml + ports: + - "9090:9090" diff --git a/internal/otel_collector/examples/demo/otel-agent-config.yaml b/internal/otel_collector/examples/demo/otel-agent-config.yaml new file mode 100644 index 00000000000..99cde8ca540 --- /dev/null +++ b/internal/otel_collector/examples/demo/otel-agent-config.yaml @@ -0,0 +1,38 @@ +receivers: + opencensus: + zipkin: + endpoint: :9411 + jaeger: + protocols: + thrift_http: + + +exporters: + opencensus: + endpoint: "otel-collector:55678" + insecure: true + logging: + loglevel: debug + +processors: + batch: + queued_retry: + +extensions: + pprof: + endpoint: :1777 + zpages: + endpoint: :55679 + health_check: + +service: + extensions: [health_check, pprof, zpages] + pipelines: + traces: + receivers: [opencensus, jaeger, zipkin] + processors: [batch, queued_retry] + exporters: [opencensus, logging] + metrics: + receivers: [opencensus] + processors: [batch] + exporters: [logging,opencensus] diff --git a/internal/otel_collector/examples/demo/otel-collector-config.yaml b/internal/otel_collector/examples/demo/otel-collector-config.yaml new file mode 100644 index 00000000000..4eff96489ea --- /dev/null +++ b/internal/otel_collector/examples/demo/otel-collector-config.yaml @@ -0,0 +1,47 @@ +receivers: + opencensus: + +exporters: + prometheus: + endpoint: "0.0.0.0:8889" + namespace: promexample + const_labels: + label1: value1 + logging: + + zipkin: + endpoint: "http://zipkin-all-in-one:9411/api/v2/spans" + format: proto + + jaeger: + endpoint: jaeger-all-in-one:14250 + insecure: true + +# Alternatively, use jaeger_thrift_http with the settings below. In this case +# update the list of exporters on the traces pipeline. +# +# jaeger_thrift_http: +# url: http://jaeger-all-in-one:14268/api/traces + +processors: + batch: + queued_retry: + +extensions: + health_check: + pprof: + endpoint: :1888 + zpages: + endpoint: :55679 + +service: + extensions: [pprof, zpages, health_check] + pipelines: + traces: + receivers: [opencensus] + processors: [batch, queued_retry] + exporters: [logging, zipkin, jaeger] + metrics: + receivers: [opencensus] + processors: [batch] + exporters: [logging,prometheus] diff --git a/internal/otel_collector/examples/demo/prometheus.yaml b/internal/otel_collector/examples/demo/prometheus.yaml new file mode 100644 index 00000000000..a8477547ecf --- /dev/null +++ b/internal/otel_collector/examples/demo/prometheus.yaml @@ -0,0 +1,6 @@ +scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 10s + static_configs: + - targets: ['otel-collector:8889'] + - targets: ['otel-collector:8888'] diff --git a/internal/otel_collector/examples/k8s/otel-config.yaml b/internal/otel_collector/examples/k8s/otel-config.yaml new file mode 100644 index 00000000000..c5ab4fd799d --- /dev/null +++ b/internal/otel_collector/examples/k8s/otel-config.yaml @@ -0,0 +1,245 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: otel-agent-conf + labels: + app: opentelemetry + component: otel-agent-conf +data: + otel-agent-config: | + receivers: + otlp: + protocols: + grpc: + http: + exporters: + otlp: + endpoint: "otel-collector.default:55680" # TODO: Update me + insecure: true + processors: + batch: + memory_limiter: + # Same as --mem-ballast-size-mib CLI argument + ballast_size_mib: 165 + # 80% of maximum memory up to 2G + limit_mib: 400 + # 25% of limit up to 2G + spike_limit_mib: 100 + check_interval: 5s + queued_retry: + num_workers: 4 + queue_size: 100 + retry_on_failure: true + extensions: + health_check: {} + zpages: {} + service: + extensions: [health_check, zpages] + pipelines: + traces: + receivers: [otlp] + processors: [memory_limiter, batch, queued_retry] + exporters: [otlp] +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: otel-agent + labels: + app: opentelemetry + component: otel-agent +spec: + selector: + matchLabels: + app: opentelemetry + component: otel-agent + template: + metadata: + labels: + app: opentelemetry + component: otel-agent + spec: + containers: + - command: + - "/otelcol" + - "--config=/conf/otel-agent-config.yaml" + # Memory Ballast size should be max 1/3 to 1/2 of memory. + - "--mem-ballast-size-mib=165" + image: otel/opentelemetry-collector-dev:latest + name: otel-agent + resources: + limits: + cpu: 500m + memory: 500Mi + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 55679 # ZPages endpoint. + - containerPort: 55680 # Default OpenTelemetry receiver port. + - containerPort: 8888 # Metrics. + volumeMounts: + - name: otel-agent-config-vol + mountPath: /conf + livenessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + readinessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + volumes: + - configMap: + name: otel-agent-conf + items: + - key: otel-agent-config + path: otel-agent-config.yaml + name: otel-agent-config-vol +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: otel-collector-conf + labels: + app: opentelemetry + component: otel-collector-conf +data: + otel-collector-config: | + receivers: + otlp: + protocols: + grpc: + http: + jaeger: + protocols: + grpc: + thrift_http: + zipkin: {} + processors: + batch: + memory_limiter: + # Same as --mem-ballast-size-mib CLI argument + ballast_size_mib: 683 + # 80% of maximum memory up to 2G + limit_mib: 1500 + # 25% of limit up to 2G + spike_limit_mib: 512 + check_interval: 5s + queued_retry: + extensions: + health_check: {} + zpages: {} + exporters: + zipkin: + endpoint: "http://somezipkin.target.com:9411/api/v2/spans" # Replace with a real endpoint. + jaeger: + endpoint: "somejaegergrpc.target.com:14250" # Replace with a real endpoint. + insecure: true + service: + extensions: [health_check, zpages] + pipelines: + traces/1: + receivers: [otlp, zipkin] + processors: [memory_limiter, batch, queued_retry] + exporters: [zipkin] + traces/2: + receivers: [otlp, jaeger] + processors: [memory_limiter, batch, queued_retry] + exporters: [jaeger] +--- +apiVersion: v1 +kind: Service +metadata: + name: otel-collector + labels: + app: opentelemetry + component: otel-collector +spec: + ports: + - name: otlp # Default endpoint for OpenTelemetry receiver. + port: 55680 + protocol: TCP + targetPort: 55680 + - name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver + port: 14250 + - name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver. + port: 14268 + - name: zipkin # Default endpoint for Zipkin receiver. + port: 9411 + - name: metrics # Default endpoint for querying metrics. + port: 8888 + selector: + component: otel-collector +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-collector + labels: + app: opentelemetry + component: otel-collector +spec: + selector: + matchLabels: + app: opentelemetry + component: otel-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 + replicas: 1 #TODO - adjust this to your own requirements + template: + metadata: + labels: + app: opentelemetry + component: otel-collector + spec: + containers: + - command: + - "/otelcol" + - "--config=/conf/otel-collector-config.yaml" +# Memory Ballast size should be max 1/3 to 1/2 of memory. + - "--mem-ballast-size-mib=683" + image: otel/opentelemetry-collector-dev:latest + name: otel-collector + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 400Mi + ports: + - containerPort: 55679 # Default endpoint for ZPages. + - containerPort: 55680 # Default endpoint for OpenTelemetry receiver. + - containerPort: 14250 # Default endpoint for Jaeger HTTP receiver. + - containerPort: 14268 # Default endpoint for Jaeger HTTP receiver. + - containerPort: 9411 # Default endpoint for Zipkin receiver. + - containerPort: 8888 # Default endpoint for querying metrics. + volumeMounts: + - name: otel-collector-config-vol + mountPath: /conf +# - name: otel-collector-secrets +# mountPath: /secrets + livenessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + readinessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + volumes: + - configMap: + name: otel-collector-conf + items: + - key: otel-collector-config + path: otel-collector-config.yaml + name: otel-collector-config-vol +# - secret: +# name: otel-collector-secrets +# items: +# - key: cert.pem +# path: cert.pem +# - key: key.pem +# path: key.pem diff --git a/internal/otel_collector/examples/local/otel-config.yaml b/internal/otel_collector/examples/local/otel-config.yaml new file mode 100644 index 00000000000..c269b4179c4 --- /dev/null +++ b/internal/otel_collector/examples/local/otel-config.yaml @@ -0,0 +1,55 @@ +extensions: + health_check: + pprof: + endpoint: 0.0.0.0:1777 + zpages: + endpoint: 0.0.0.0:55679 + +receivers: + otlp: + protocols: + grpc: + http: + + opencensus: + + jaeger: + protocols: + grpc: + thrift_binary: + thrift_compact: + thrift_http: + + zipkin: + + # Collect own metrics + prometheus: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 10s + static_configs: + - targets: [ '0.0.0.0:8888' ] + +processors: + batch: + +exporters: + logging: + logLevel: debug + +service: + + pipelines: + + traces: + receivers: [otlp, opencensus, jaeger, zipkin] + processors: [batch] + exporters: [logging] + + metrics: + receivers: [otlp, opencensus, prometheus] + processors: [batch] + exporters: [logging] + + extensions: [health_check, pprof, zpages] diff --git a/internal/otel_collector/exporter/README.md b/internal/otel_collector/exporter/README.md new file mode 100644 index 00000000000..a2e09f98dd2 --- /dev/null +++ b/internal/otel_collector/exporter/README.md @@ -0,0 +1,109 @@ +# General Information + +An exporter is how data gets sent to different systems/back-ends. Generally, an +exporter translates the internal format into another defined format. + +Available trace exporters (sorted alphabetically): + +- [Jaeger](jaegerexporter/README.md) +- [Kafka](kafkaexporter/README.md) +- [OpenCensus](opencensusexporter/README.md) +- [OTLP gRPC](otlpexporter/README.md) +- [OTLP HTTP](otlphttpexporter/README.md) +- [Zipkin](zipkinexporter/README.md) + +Available metric exporters (sorted alphabetically): + +- [OpenCensus](opencensusexporter/README.md) +- [OTLP gRPC](otlpexporter/README.md) +- [OTLP HTTP](otlphttpexporter/README.md) +- [Prometheus](prometheusexporter/README.md) +- [Prometheus Remote Write](prometheusremotewriteexporter/README.md) + +Available log exporters (sorted alphabetically): + +- [OTLP gRPC](otlpexporter/README.md) +- [OTLP HTTP](otlphttpexporter/README.md) + +Available local exporters (sorted alphabetically): + +- [File](fileexporter/README.md) +- [Logging](loggingexporter/README.md) + +The [contrib +repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) +has more exporters available in its builds. + +## Configuring Exporters + +Exporters are configured via YAML under the top-level `exporters` tag. + +The following is a sample configuration for the `exampleexporter`. + +```yaml +exporters: + # Exporter 1. + # : + exampleexporter: + # : + endpoint: 1.2.3.4:8080 + # ... + # Exporter 2. + # /: + exampleexporter/settings: + # : + endpoint: 0.0.0.0:9211 +``` + +An exporter instance is referenced by its full name in other parts of the config, +such as in pipelines. A full name consists of the exporter type, '/' and the +name appended to the exporter type in the configuration. All exporter full names +must be unique. + +For the example above: + +- Exporter 1 has full name `exampleexporter`. +- Exporter 2 has full name `exampleexporter/settings`. + +Exporters are enabled upon being added to a pipeline. For example: + +```yaml +service: + pipelines: + # Valid pipelines are: traces, metrics or logs + # Trace pipeline 1. + traces: + receivers: [examplereceiver] + processors: [] + exporters: [exampleexporter, exampleexporter/settings] + # Trace pipeline 2. + traces/another: + receivers: [examplereceiver] + processors: [] + exporters: [exampleexporter, exampleexporter/settings] +``` + +## Data Ownership + +When multiple exporters are configured to send the same data (e.g. by configuring multiple +exporters for the same pipeline) the exporters will have a shared access to the data. +Exporters get access to this shared data when `ConsumeTraceData`/`ConsumeMetricsData` +function is called. Exporters MUST NOT modify the `TraceData`/`MetricsData` argument of +these functions. If the exporter needs to modify the data while performing the exporting +the exporter can clone the data and perform the modification on the clone or use a +copy-on-write approach for individual sub-parts of `TraceData`/`MetricsData` argument. +Any approach that does not mutate the original `TraceData`/`MetricsData` argument +(including referenced data, such as `Node`, `Resource`, `Spans`, etc) is allowed. + +## Proxy Support + +Beyond standard YAML configuration as outlined in the individual READMEs above, +exporters that leverage the net/http package (all do today) also respect the +following proxy environment variables: + +- HTTP_PROXY +- HTTPS_PROXY +- NO_PROXY + +If set at Collector start time then exporters, regardless of protocol, +will or will not proxy traffic as defined by these environment variables. diff --git a/internal/otel_collector/exporter/doc.go b/internal/otel_collector/exporter/doc.go new file mode 100644 index 00000000000..15c7e94732d --- /dev/null +++ b/internal/otel_collector/exporter/doc.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exporter contains implementations of Exporter components. +// +// To implement a custom exporter you will need to implement component.ExporterFactory +// interface and component.Exporter interface. +// +// To make the custom exporter part of the Collector build the factory must be added +// to defaultcomponents.Components() function. +package exporter diff --git a/internal/otel_collector/exporter/exporterhelper/README.md b/internal/otel_collector/exporter/exporterhelper/README.md new file mode 100644 index 00000000000..1b1bdc42639 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/README.md @@ -0,0 +1,28 @@ +# Exporter Helper + +This is a helper exporter that other exporters can depend on. Today, it +primarily offers queued retries and resource attributes to metric labels conversion. + +> :warning: This exporter should not be added to a service pipeline. + +## Configuration + +The following configuration options can be modified: + +- `retry_on_failure` + - `enabled` (default = true) + - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false` + - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false` + - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false` +- `sending_queue` + - `enabled` (default = true) + - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false` + - `queue_size` (default = 5000): Maximum number of batches kept in memory before data; ignored if `enabled` is `false`; + User should calculate this as `num_seconds * requests_per_second` where: + - `num_seconds` is the number of seconds to buffer in case of a backend outage + - `requests_per_second` is the average number of requests per seconds. +- `resource_to_telemetry_conversion` + - `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default. +- `timeout` (default = 5s): Time to wait per individual attempt to send data to a backend. + +The full list of settings exposed for this helper exporter are documented [here](factory.go). diff --git a/internal/otel_collector/exporter/exporterhelper/common.go b/internal/otel_collector/exporter/exporterhelper/common.go new file mode 100644 index 00000000000..08a79ae87c5 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/common.go @@ -0,0 +1,223 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "time" + + "go.opencensus.io/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +var ( + okStatus = trace.Status{Code: trace.StatusCodeOK} +) + +// ComponentSettings for timeout. The timeout applies to individual attempts to send data to the backend. +type TimeoutSettings struct { + // Timeout is the timeout for every attempt to send data to the backend. + Timeout time.Duration `mapstructure:"timeout"` +} + +// DefaultTimeoutSettings returns the default settings for TimeoutSettings. +func DefaultTimeoutSettings() TimeoutSettings { + return TimeoutSettings{ + Timeout: 5 * time.Second, + } +} + +// request is an abstraction of an individual request (batch of data) independent of the type of the data (traces, metrics, logs). +type request interface { + // context returns the Context of the requests. + context() context.Context + // setContext updates the Context of the requests. + setContext(context.Context) + export(ctx context.Context) (int, error) + // Returns a new request that contains the items left to be sent. + onPartialError(consumererror.PartialError) request + // Returns the count of spans/metric points or log records. + count() int +} + +// requestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). +type requestSender interface { + send(req request) (int, error) +} + +// baseRequest is a base implementation for the request. +type baseRequest struct { + ctx context.Context +} + +func (req *baseRequest) context() context.Context { + return req.ctx +} + +func (req *baseRequest) setContext(ctx context.Context) { + req.ctx = ctx +} + +// baseSettings represents all the options that users can configure. +type baseSettings struct { + *componenthelper.ComponentSettings + TimeoutSettings + QueueSettings + RetrySettings + ResourceToTelemetrySettings +} + +// fromOptions returns the internal options starting from the default and applying all configured options. +func fromOptions(options []Option) *baseSettings { + // Start from the default options: + opts := &baseSettings{ + ComponentSettings: componenthelper.DefaultComponentSettings(), + TimeoutSettings: DefaultTimeoutSettings(), + // TODO: Enable queuing by default (call DefaultQueueSettings) + QueueSettings: QueueSettings{Enabled: false}, + // TODO: Enable retry by default (call DefaultRetrySettings) + RetrySettings: RetrySettings{Enabled: false}, + ResourceToTelemetrySettings: defaultResourceToTelemetrySettings(), + } + + for _, op := range options { + op(opts) + } + + return opts +} + +// Option apply changes to baseSettings. +type Option func(*baseSettings) + +// WithShutdown overrides the default Shutdown function for an exporter. +// The default shutdown function does nothing and always returns nil. +func WithShutdown(shutdown componenthelper.Shutdown) Option { + return func(o *baseSettings) { + o.Shutdown = shutdown + } +} + +// WithStart overrides the default Start function for an exporter. +// The default shutdown function does nothing and always returns nil. +func WithStart(start componenthelper.Start) Option { + return func(o *baseSettings) { + o.Start = start + } +} + +// WithTimeout overrides the default TimeoutSettings for an exporter. +// The default TimeoutSettings is 5 seconds. +func WithTimeout(timeoutSettings TimeoutSettings) Option { + return func(o *baseSettings) { + o.TimeoutSettings = timeoutSettings + } +} + +// WithRetry overrides the default RetrySettings for an exporter. +// The default RetrySettings is to disable retries. +func WithRetry(retrySettings RetrySettings) Option { + return func(o *baseSettings) { + o.RetrySettings = retrySettings + } +} + +// WithQueue overrides the default QueueSettings for an exporter. +// The default QueueSettings is to disable queueing. +func WithQueue(queueSettings QueueSettings) Option { + return func(o *baseSettings) { + o.QueueSettings = queueSettings + } +} + +// WithResourceToTelemetryConversion overrides the default ResourceToTelemetrySettings for an exporter. +// The default ResourceToTelemetrySettings is to disable resource attributes to metric labels conversion. +func WithResourceToTelemetryConversion(resourceToTelemetrySettings ResourceToTelemetrySettings) Option { + return func(o *baseSettings) { + o.ResourceToTelemetrySettings = resourceToTelemetrySettings + } +} + +// baseExporter contains common fields between different exporter types. +type baseExporter struct { + component.Component + cfg configmodels.Exporter + sender requestSender + qrSender *queuedRetrySender + convertResourceToTelemetry bool +} + +func newBaseExporter(cfg configmodels.Exporter, logger *zap.Logger, options ...Option) *baseExporter { + bs := fromOptions(options) + be := &baseExporter{ + Component: componenthelper.NewComponent(bs.ComponentSettings), + cfg: cfg, + convertResourceToTelemetry: bs.ResourceToTelemetrySettings.Enabled, + } + + be.qrSender = newQueuedRetrySender(cfg.Name(), bs.QueueSettings, bs.RetrySettings, &timeoutSender{cfg: bs.TimeoutSettings}, logger) + be.sender = be.qrSender + + return be +} + +// wrapConsumerSender wraps the consumer sender (the sender that uses retries and timeout) with the given wrapper. +// This can be used to wrap with observability (create spans, record metrics) the consumer sender. +func (be *baseExporter) wrapConsumerSender(f func(consumer requestSender) requestSender) { + be.qrSender.consumerSender = f(be.qrSender.consumerSender) +} + +// Start all senders and exporter and is invoked during service start. +func (be *baseExporter) Start(ctx context.Context, host component.Host) error { + // First start the wrapped exporter. + if err := be.Component.Start(ctx, host); err != nil { + return err + } + + // If no error then start the queuedRetrySender. + be.qrSender.start() + return nil +} + +// Shutdown all senders and exporter and is invoked during service shutdown. +func (be *baseExporter) Shutdown(ctx context.Context) error { + // First shutdown the queued retry sender + be.qrSender.shutdown() + // Last shutdown the wrapped exporter itself. + return be.Component.Shutdown(ctx) +} + +// timeoutSender is a request sender that adds a `timeout` to every request that passes this sender. +type timeoutSender struct { + cfg TimeoutSettings +} + +// send implements the requestSender interface +func (ts *timeoutSender) send(req request) (int, error) { + // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be + // updated because this deadline most likely is before the next one. + ctx := req.context() + if ts.cfg.Timeout > 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(req.context(), ts.cfg.Timeout) + defer cancelFunc() + } + return req.export(ctx) +} diff --git a/internal/otel_collector/exporter/exporterhelper/common_test.go b/internal/otel_collector/exporter/exporterhelper/common_test.go new file mode 100644 index 00000000000..23718d4225c --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/common_test.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package exporterhelper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + "go.opencensus.io/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" +) + +var defaultExporterCfg = &configmodels.ExporterSettings{ + TypeVal: "test", + NameVal: "test", +} + +func TestErrorToStatus(t *testing.T) { + require.Equal(t, okStatus, errToStatus(nil)) + require.Equal(t, trace.Status{Code: trace.StatusCodeUnknown, Message: "my_error"}, errToStatus(errors.New("my_error"))) +} + +func TestBaseExporter(t *testing.T) { + be := newBaseExporter(defaultExporterCfg, zap.NewNop()) + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, be.Shutdown(context.Background())) +} + +func TestBaseExporterWithOptions(t *testing.T) { + want := errors.New("my error") + be := newBaseExporter( + defaultExporterCfg, + zap.NewNop(), + WithStart(func(ctx context.Context, host component.Host) error { return want }), + WithShutdown(func(ctx context.Context) error { return want }), + WithResourceToTelemetryConversion(defaultResourceToTelemetrySettings()), + WithTimeout(DefaultTimeoutSettings()), + ) + require.Equal(t, want, be.Start(context.Background(), componenttest.NewNopHost())) + require.Equal(t, want, be.Shutdown(context.Background())) +} + +func errToStatus(err error) trace.Status { + if err != nil { + return trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()} + } + return okStatus +} diff --git a/internal/otel_collector/exporter/exporterhelper/constants.go b/internal/otel_collector/exporter/exporterhelper/constants.go new file mode 100644 index 00000000000..2fb7511a438 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/constants.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "errors" +) + +var ( + // errNilConfig is returned when an empty name is given. + errNilConfig = errors.New("nil config") + // errNilLogger is returned when a logger is nil + errNilLogger = errors.New("nil logger") + // errNilPushTraceData is returned when a nil PushTraces is given. + errNilPushTraceData = errors.New("nil PushTraces") + // errNilPushMetricsData is returned when a nil PushMetrics is given. + errNilPushMetricsData = errors.New("nil PushMetrics") + // errNilPushLogsData is returned when a nil PushLogs is given. + errNilPushLogsData = errors.New("nil PushLogs") +) diff --git a/internal/otel_collector/exporter/exporterhelper/factory.go b/internal/otel_collector/exporter/exporterhelper/factory.go new file mode 100644 index 00000000000..7b55a60ec2d --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/factory.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + + "github.com/spf13/viper" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" +) + +// FactoryOption apply changes to ExporterOptions. +type FactoryOption func(o *factory) + +// CreateDefaultConfig is the equivalent of component.ExporterFactory.CreateDefaultConfig() +type CreateDefaultConfig func() configmodels.Exporter + +// CreateTraceExporter is the equivalent of component.ExporterFactory.CreateTracesExporter() +type CreateTraceExporter func(context.Context, component.ExporterCreateParams, configmodels.Exporter) (component.TracesExporter, error) + +// CreateMetricsExporter is the equivalent of component.ExporterFactory.CreateMetricsExporter() +type CreateMetricsExporter func(context.Context, component.ExporterCreateParams, configmodels.Exporter) (component.MetricsExporter, error) + +// CreateMetricsExporter is the equivalent of component.ExporterFactory.CreateLogsExporter() +type CreateLogsExporter func(context.Context, component.ExporterCreateParams, configmodels.Exporter) (component.LogsExporter, error) + +type factory struct { + cfgType configmodels.Type + customUnmarshaler component.CustomUnmarshaler + createDefaultConfig CreateDefaultConfig + createTraceExporter CreateTraceExporter + createMetricsExporter CreateMetricsExporter + createLogsExporter CreateLogsExporter +} + +// WithTraces overrides the default "error not supported" implementation for CreateTracesReceiver. +func WithTraces(createTraceExporter CreateTraceExporter) FactoryOption { + return func(o *factory) { + o.createTraceExporter = createTraceExporter + } +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetricsReceiver. +func WithMetrics(createMetricsExporter CreateMetricsExporter) FactoryOption { + return func(o *factory) { + o.createMetricsExporter = createMetricsExporter + } +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogsReceiver. +func WithLogs(createLogsExporter CreateLogsExporter) FactoryOption { + return func(o *factory) { + o.createLogsExporter = createLogsExporter + } +} + +// WithCustomUnmarshaler implements component.ConfigUnmarshaler. +func WithCustomUnmarshaler(customUnmarshaler component.CustomUnmarshaler) FactoryOption { + return func(o *factory) { + o.customUnmarshaler = customUnmarshaler + } +} + +// NewFactory returns a component.ExporterFactory. +func NewFactory( + cfgType configmodels.Type, + createDefaultConfig CreateDefaultConfig, + options ...FactoryOption) component.ExporterFactory { + f := &factory{ + cfgType: cfgType, + createDefaultConfig: createDefaultConfig, + } + for _, opt := range options { + opt(f) + } + var ret component.ExporterFactory + if f.customUnmarshaler != nil { + ret = &factoryWithUnmarshaler{f} + } else { + ret = f + } + return ret +} + +// Type gets the type of the Exporter config created by this factory. +func (f *factory) Type() configmodels.Type { + return f.cfgType +} + +// CreateDefaultConfig creates the default configuration for processor. +func (f *factory) CreateDefaultConfig() configmodels.Exporter { + return f.createDefaultConfig() +} + +// CreateTraceExporter creates a component.TracesExporter based on this config. +func (f *factory) CreateTracesExporter( + ctx context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter) (component.TracesExporter, error) { + if f.createTraceExporter != nil { + return f.createTraceExporter(ctx, params, cfg) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsExporter creates a consumer.MetricsConsumer based on this config. +func (f *factory) CreateMetricsExporter( + ctx context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter) (component.MetricsExporter, error) { + if f.createMetricsExporter != nil { + return f.createMetricsExporter(ctx, params, cfg) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateLogsExporter creates a metrics processor based on this config. +func (f *factory) CreateLogsExporter( + ctx context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.LogsExporter, error) { + if f.createLogsExporter != nil { + return f.createLogsExporter(ctx, params, cfg) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +var _ component.ConfigUnmarshaler = (*factoryWithUnmarshaler)(nil) + +type factoryWithUnmarshaler struct { + *factory +} + +// Unmarshal un-marshals the config using the provided custom unmarshaler. +func (f *factoryWithUnmarshaler) Unmarshal(componentViperSection *viper.Viper, intoCfg interface{}) error { + return f.customUnmarshaler(componentViperSection, intoCfg) +} diff --git a/internal/otel_collector/exporter/exporterhelper/factory_test.go b/internal/otel_collector/exporter/exporterhelper/factory_test.go new file mode 100644 index 00000000000..b4694a21853 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/factory_test.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "errors" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/pdata" +) + +const typeStr = "test" + +var ( + defaultCfg = &configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + } + nopTracesExporter, _ = NewTraceExporter(defaultCfg, zap.NewNop(), func(ctx context.Context, td pdata.Traces) (droppedSpans int, err error) { + return 0, nil + }) + nopMetricsExporter, _ = NewMetricsExporter(defaultCfg, zap.NewNop(), func(ctx context.Context, md pdata.Metrics) (droppedTimeSeries int, err error) { + return 0, nil + }) + nopLogsExporter, _ = NewLogsExporter(defaultCfg, zap.NewNop(), func(ctx context.Context, md pdata.Logs) (droppedTimeSeries int, err error) { + return 0, nil + }) +) + +func TestNewFactory(t *testing.T) { + factory := NewFactory( + typeStr, + defaultConfig) + assert.EqualValues(t, typeStr, factory.Type()) + assert.EqualValues(t, defaultCfg, factory.CreateDefaultConfig()) + _, ok := factory.(component.ConfigUnmarshaler) + assert.False(t, ok) + _, err := factory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, defaultCfg) + assert.Equal(t, configerror.ErrDataTypeIsNotSupported, err) + _, err = factory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, defaultCfg) + assert.Equal(t, configerror.ErrDataTypeIsNotSupported, err) + _, err = factory.CreateLogsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, defaultCfg) + assert.Equal(t, configerror.ErrDataTypeIsNotSupported, err) +} + +func TestNewFactory_WithConstructors(t *testing.T) { + factory := NewFactory( + typeStr, + defaultConfig, + WithTraces(createTraceExporter), + WithMetrics(createMetricsExporter), + WithLogs(createLogsExporter), + WithCustomUnmarshaler(customUnmarshaler)) + assert.EqualValues(t, typeStr, factory.Type()) + assert.EqualValues(t, defaultCfg, factory.CreateDefaultConfig()) + + fu, ok := factory.(component.ConfigUnmarshaler) + assert.True(t, ok) + assert.Equal(t, errors.New("my error"), fu.Unmarshal(nil, nil)) + + te, err := factory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, defaultCfg) + assert.NoError(t, err) + assert.Same(t, nopTracesExporter, te) + + me, err := factory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, defaultCfg) + assert.NoError(t, err) + assert.Same(t, nopMetricsExporter, me) + + le, err := factory.CreateLogsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, defaultCfg) + assert.NoError(t, err) + assert.Same(t, nopLogsExporter, le) +} + +func defaultConfig() configmodels.Exporter { + return defaultCfg +} + +func createTraceExporter(context.Context, component.ExporterCreateParams, configmodels.Exporter) (component.TracesExporter, error) { + return nopTracesExporter, nil +} + +func createMetricsExporter(context.Context, component.ExporterCreateParams, configmodels.Exporter) (component.MetricsExporter, error) { + return nopMetricsExporter, nil +} + +func createLogsExporter(context.Context, component.ExporterCreateParams, configmodels.Exporter) (component.LogsExporter, error) { + return nopLogsExporter, nil +} + +func customUnmarshaler(*viper.Viper, interface{}) error { + return errors.New("my error") +} diff --git a/internal/otel_collector/exporter/exporterhelper/logshelper.go b/internal/otel_collector/exporter/exporterhelper/logshelper.go new file mode 100644 index 00000000000..cb79d2b0fd2 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/logshelper.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +// PushLogs is a helper function that is similar to ConsumeLogs but also returns +// the number of dropped logs. +type PushLogs func(ctx context.Context, md pdata.Logs) (droppedTimeSeries int, err error) + +type logsRequest struct { + baseRequest + ld pdata.Logs + pusher PushLogs +} + +func newLogsRequest(ctx context.Context, ld pdata.Logs, pusher PushLogs) request { + return &logsRequest{ + baseRequest: baseRequest{ctx: ctx}, + ld: ld, + pusher: pusher, + } +} + +func (req *logsRequest) onPartialError(partialErr consumererror.PartialError) request { + return newLogsRequest(req.ctx, partialErr.GetLogs(), req.pusher) +} + +func (req *logsRequest) export(ctx context.Context) (int, error) { + return req.pusher(ctx, req.ld) +} + +func (req *logsRequest) count() int { + return req.ld.LogRecordCount() +} + +type logsExporter struct { + *baseExporter + pusher PushLogs +} + +func (lexp *logsExporter) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { + exporterCtx := obsreport.ExporterContext(ctx, lexp.cfg.Name()) + _, err := lexp.sender.send(newLogsRequest(exporterCtx, ld, lexp.pusher)) + return err +} + +// NewLogsExporter creates an LogsExporter that records observability metrics and wraps every request with a Span. +func NewLogsExporter( + cfg configmodels.Exporter, + logger *zap.Logger, + pusher PushLogs, + options ...Option, +) (component.LogsExporter, error) { + if cfg == nil { + return nil, errNilConfig + } + + if logger == nil { + return nil, errNilLogger + } + + if pusher == nil { + return nil, errNilPushLogsData + } + + be := newBaseExporter(cfg, logger, options...) + be.wrapConsumerSender(func(nextSender requestSender) requestSender { + return &logsExporterWithObservability{ + obsrep: obsreport.NewExporterObsReport(configtelemetry.GetMetricsLevelFlagValue(), cfg.Name()), + nextSender: nextSender, + } + }) + + return &logsExporter{ + baseExporter: be, + pusher: pusher, + }, nil +} + +type logsExporterWithObservability struct { + obsrep *obsreport.ExporterObsReport + nextSender requestSender +} + +func (lewo *logsExporterWithObservability) send(req request) (int, error) { + req.setContext(lewo.obsrep.StartLogsExportOp(req.context())) + numDroppedLogs, err := lewo.nextSender.send(req) + lewo.obsrep.EndLogsExportOp(req.context(), req.count(), err) + return numDroppedLogs, err +} diff --git a/internal/otel_collector/exporter/exporterhelper/logshelper_test.go b/internal/otel_collector/exporter/exporterhelper/logshelper_test.go new file mode 100644 index 00000000000..6169225ca6d --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/logshelper_test.go @@ -0,0 +1,232 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package exporterhelper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/obsreport/obsreporttest" +) + +const ( + fakeLogsExporterType = "fake_logs_exporter" + fakeLogsExporterName = "fake_logs_exporter/with_name" + fakeLogsParentSpanName = "fake_logs_parent_span_name" +) + +var ( + fakeLogsExporterConfig = &configmodels.ExporterSettings{ + TypeVal: fakeLogsExporterType, + NameVal: fakeLogsExporterName, + } +) + +func TestLogsRequest(t *testing.T) { + lr := newLogsRequest(context.Background(), testdata.GenerateLogDataOneLog(), nil) + + partialErr := consumererror.PartialLogsError(errors.New("some error"), testdata.GenerateLogDataEmpty()) + assert.EqualValues( + t, + newLogsRequest(context.Background(), testdata.GenerateLogDataEmpty(), nil), + lr.onPartialError(partialErr.(consumererror.PartialError)), + ) +} + +func TestLogsExporter_InvalidName(t *testing.T) { + le, err := NewLogsExporter(nil, zap.NewNop(), newPushLogsData(0, nil)) + require.Nil(t, le) + require.Equal(t, errNilConfig, err) +} + +func TestLogsExporter_NilLogger(t *testing.T) { + le, err := NewLogsExporter(fakeLogsExporterConfig, nil, newPushLogsData(0, nil)) + require.Nil(t, le) + require.Equal(t, errNilLogger, err) +} + +func TestLogsExporter_NilPushLogsData(t *testing.T) { + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), nil) + require.Nil(t, le) + require.Equal(t, errNilPushLogsData, err) +} + +func TestLogsExporter_Default(t *testing.T) { + ld := testdata.GenerateLogDataEmpty() + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(0, nil)) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Nil(t, le.ConsumeLogs(context.Background(), ld)) + assert.Nil(t, le.Shutdown(context.Background())) +} + +func TestLogsExporter_Default_ReturnError(t *testing.T) { + ld := testdata.GenerateLogDataEmpty() + want := errors.New("my_error") + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(0, want)) + require.Nil(t, err) + require.NotNil(t, le) + require.Equal(t, want, le.ConsumeLogs(context.Background(), ld)) +} + +func TestLogsExporter_WithRecordLogs(t *testing.T) { + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(0, nil)) + require.Nil(t, err) + require.NotNil(t, le) + + checkRecordedMetricsForLogsExporter(t, le, nil) +} + +func TestLogsExporter_WithRecordLogs_NonZeroDropped(t *testing.T) { + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(1, nil)) + require.Nil(t, err) + require.NotNil(t, le) + + checkRecordedMetricsForLogsExporter(t, le, nil) +} + +func TestLogsExporter_WithRecordLogs_ReturnError(t *testing.T) { + want := errors.New("my_error") + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(0, want)) + require.Nil(t, err) + require.NotNil(t, le) + + checkRecordedMetricsForLogsExporter(t, le, want) +} + +func TestLogsExporter_WithSpan(t *testing.T) { + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(0, nil)) + require.Nil(t, err) + require.NotNil(t, le) + checkWrapSpanForLogsExporter(t, le, nil, 1) +} + +func TestLogsExporter_WithSpan_NonZeroDropped(t *testing.T) { + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(1, nil)) + require.Nil(t, err) + require.NotNil(t, le) + checkWrapSpanForLogsExporter(t, le, nil, 1) +} + +func TestLogsExporter_WithSpan_ReturnError(t *testing.T) { + want := errors.New("my_error") + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(0, want)) + require.Nil(t, err) + require.NotNil(t, le) + checkWrapSpanForLogsExporter(t, le, want, 1) +} + +func TestLogsExporter_WithShutdown(t *testing.T) { + shutdownCalled := false + shutdown := func(context.Context) error { shutdownCalled = true; return nil } + + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(0, nil), WithShutdown(shutdown)) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Nil(t, le.Shutdown(context.Background())) + assert.True(t, shutdownCalled) +} + +func TestLogsExporter_WithShutdown_ReturnError(t *testing.T) { + want := errors.New("my_error") + shutdownErr := func(context.Context) error { return want } + + le, err := NewLogsExporter(fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(0, nil), WithShutdown(shutdownErr)) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Equal(t, le.Shutdown(context.Background()), want) +} + +func newPushLogsData(droppedTimeSeries int, retError error) PushLogs { + return func(ctx context.Context, td pdata.Logs) (int, error) { + return droppedTimeSeries, retError + } +} + +func checkRecordedMetricsForLogsExporter(t *testing.T, le component.LogsExporter, wantError error) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + ld := testdata.GenerateLogDataTwoLogsSameResource() + const numBatches = 7 + for i := 0; i < numBatches; i++ { + require.Equal(t, wantError, le.ConsumeLogs(context.Background(), ld)) + } + + // TODO: When the new metrics correctly count partial dropped fix this. + if wantError != nil { + obsreporttest.CheckExporterLogsViews(t, fakeLogsExporterName, 0, int64(numBatches*ld.LogRecordCount())) + } else { + obsreporttest.CheckExporterLogsViews(t, fakeLogsExporterName, int64(numBatches*ld.LogRecordCount()), 0) + } +} + +func generateLogsTraffic(t *testing.T, le component.LogsExporter, numRequests int, wantError error) { + ld := testdata.GenerateLogDataOneLog() + ctx, span := trace.StartSpan(context.Background(), fakeLogsParentSpanName, trace.WithSampler(trace.AlwaysSample())) + defer span.End() + for i := 0; i < numRequests; i++ { + require.Equal(t, wantError, le.ConsumeLogs(ctx, ld)) + } +} + +func checkWrapSpanForLogsExporter(t *testing.T, le component.LogsExporter, wantError error, numLogRecords int64) { + ocSpansSaver := new(testOCTraceExporter) + trace.RegisterExporter(ocSpansSaver) + defer trace.UnregisterExporter(ocSpansSaver) + + const numRequests = 5 + generateLogsTraffic(t, le, numRequests, wantError) + + // Inspection time! + ocSpansSaver.mu.Lock() + defer ocSpansSaver.mu.Unlock() + + require.NotEqual(t, 0, len(ocSpansSaver.spanData), "No exported span data") + + gotSpanData := ocSpansSaver.spanData + require.Equal(t, numRequests+1, len(gotSpanData)) + + parentSpan := gotSpanData[numRequests] + require.Equalf(t, fakeLogsParentSpanName, parentSpan.Name, "SpanData %v", parentSpan) + for _, sd := range gotSpanData[:numRequests] { + require.Equalf(t, parentSpan.SpanContext.SpanID, sd.ParentSpanID, "Exporter span not a child\nSpanData %v", sd) + require.Equalf(t, errToStatus(wantError), sd.Status, "SpanData %v", sd) + + sentLogRecords := numLogRecords + var failedToSendLogRecords int64 + if wantError != nil { + sentLogRecords = 0 + failedToSendLogRecords = numLogRecords + } + require.Equalf(t, sentLogRecords, sd.Attributes[obsreport.SentLogRecordsKey], "SpanData %v", sd) + require.Equalf(t, failedToSendLogRecords, sd.Attributes[obsreport.FailedToSendLogRecordsKey], "SpanData %v", sd) + } +} diff --git a/internal/otel_collector/exporter/exporterhelper/metricshelper.go b/internal/otel_collector/exporter/exporterhelper/metricshelper.go new file mode 100644 index 00000000000..013d4672ef7 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/metricshelper.go @@ -0,0 +1,126 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +// PushMetrics is a helper function that is similar to ConsumeMetrics but also returns +// the number of dropped metrics. +type PushMetrics func(ctx context.Context, md pdata.Metrics) (droppedTimeSeries int, err error) + +type metricsRequest struct { + baseRequest + md pdata.Metrics + pusher PushMetrics +} + +func newMetricsRequest(ctx context.Context, md pdata.Metrics, pusher PushMetrics) request { + return &metricsRequest{ + baseRequest: baseRequest{ctx: ctx}, + md: md, + pusher: pusher, + } +} + +func (req *metricsRequest) onPartialError(partialErr consumererror.PartialError) request { + return newMetricsRequest(req.ctx, partialErr.GetMetrics(), req.pusher) +} + +func (req *metricsRequest) export(ctx context.Context) (int, error) { + return req.pusher(ctx, req.md) +} + +func (req *metricsRequest) count() int { + _, numPoints := req.md.MetricAndDataPointCount() + return numPoints +} + +type metricsExporter struct { + *baseExporter + pusher PushMetrics +} + +func (mexp *metricsExporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + if mexp.baseExporter.convertResourceToTelemetry { + md = convertResourceToLabels(md) + } + exporterCtx := obsreport.ExporterContext(ctx, mexp.cfg.Name()) + req := newMetricsRequest(exporterCtx, md, mexp.pusher) + _, err := mexp.sender.send(req) + return err +} + +// NewMetricsExporter creates an MetricsExporter that records observability metrics and wraps every request with a Span. +func NewMetricsExporter( + cfg configmodels.Exporter, + logger *zap.Logger, + pusher PushMetrics, + options ...Option, +) (component.MetricsExporter, error) { + if cfg == nil { + return nil, errNilConfig + } + + if logger == nil { + return nil, errNilLogger + } + + if pusher == nil { + return nil, errNilPushMetricsData + } + + be := newBaseExporter(cfg, logger, options...) + be.wrapConsumerSender(func(nextSender requestSender) requestSender { + return &metricsSenderWithObservability{ + obsrep: obsreport.NewExporterObsReport(configtelemetry.GetMetricsLevelFlagValue(), cfg.Name()), + nextSender: nextSender, + } + }) + + return &metricsExporter{ + baseExporter: be, + pusher: pusher, + }, nil +} + +type metricsSenderWithObservability struct { + obsrep *obsreport.ExporterObsReport + nextSender requestSender +} + +func (mewo *metricsSenderWithObservability) send(req request) (int, error) { + req.setContext(mewo.obsrep.StartMetricsExportOp(req.context())) + _, err := mewo.nextSender.send(req) + + // TODO: this is not ideal: it should come from the next function itself. + // temporarily loading it from internal format. Once full switch is done + // to new metrics will remove this. + mReq := req.(*metricsRequest) + numReceivedMetrics, numPoints := mReq.md.MetricAndDataPointCount() + + mewo.obsrep.EndMetricsExportOp(req.context(), numPoints, err) + return numReceivedMetrics, err +} diff --git a/internal/otel_collector/exporter/exporterhelper/metricshelper_test.go b/internal/otel_collector/exporter/exporterhelper/metricshelper_test.go new file mode 100644 index 00000000000..deb37bfc60e --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/metricshelper_test.go @@ -0,0 +1,253 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package exporterhelper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/obsreport/obsreporttest" +) + +const ( + fakeMetricsExporterType = "fake_metrics_exporter" + fakeMetricsExporterName = "fake_metrics_exporter/with_name" + fakeMetricsParentSpanName = "fake_metrics_parent_span_name" +) + +var ( + fakeMetricsExporterConfig = &configmodels.ExporterSettings{ + TypeVal: fakeMetricsExporterType, + NameVal: fakeMetricsExporterName, + } +) + +func TestMetricsRequest(t *testing.T) { + mr := newMetricsRequest(context.Background(), testdata.GenerateMetricsOneMetric(), nil) + + partialErr := consumererror.PartialMetricsError(errors.New("some error"), testdata.GenerateMetricsEmpty()) + assert.EqualValues( + t, + newMetricsRequest(context.Background(), testdata.GenerateMetricsEmpty(), nil), + mr.onPartialError(partialErr.(consumererror.PartialError)), + ) +} + +func TestMetricsExporter_InvalidName(t *testing.T) { + me, err := NewMetricsExporter(nil, zap.NewNop(), newPushMetricsData(0, nil)) + require.Nil(t, me) + require.Equal(t, errNilConfig, err) +} + +func TestMetricsExporter_NilLogger(t *testing.T) { + me, err := NewMetricsExporter(fakeMetricsExporterConfig, nil, newPushMetricsData(0, nil)) + require.Nil(t, me) + require.Equal(t, errNilLogger, err) +} + +func TestMetricsExporter_NilPushMetricsData(t *testing.T) { + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), nil) + require.Nil(t, me) + require.Equal(t, errNilPushMetricsData, err) +} + +func TestMetricsExporter_Default(t *testing.T) { + md := testdata.GenerateMetricsEmpty() + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, nil)) + assert.NotNil(t, me) + assert.NoError(t, err) + + assert.Nil(t, me.ConsumeMetrics(context.Background(), md)) + assert.Nil(t, me.Shutdown(context.Background())) +} + +func TestMetricsExporter_Default_ReturnError(t *testing.T) { + md := testdata.GenerateMetricsEmpty() + want := errors.New("my_error") + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, want)) + require.Nil(t, err) + require.NotNil(t, me) + require.Equal(t, want, me.ConsumeMetrics(context.Background(), md)) +} + +func TestMetricsExporter_WithRecordMetrics(t *testing.T) { + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, nil)) + require.Nil(t, err) + require.NotNil(t, me) + + checkRecordedMetricsForMetricsExporter(t, me, nil) +} + +func TestMetricsExporter_WithRecordMetrics_NonZeroDropped(t *testing.T) { + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(1, nil)) + require.Nil(t, err) + require.NotNil(t, me) + + checkRecordedMetricsForMetricsExporter(t, me, nil) +} + +func TestMetricsExporter_WithRecordMetrics_ReturnError(t *testing.T) { + want := errors.New("my_error") + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, want)) + require.Nil(t, err) + require.NotNil(t, me) + + checkRecordedMetricsForMetricsExporter(t, me, want) +} + +func TestMetricsExporter_WithSpan(t *testing.T) { + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, nil)) + require.Nil(t, err) + require.NotNil(t, me) + checkWrapSpanForMetricsExporter(t, me, nil, 1) +} + +func TestMetricsExporter_WithSpan_NonZeroDropped(t *testing.T) { + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(1, nil)) + require.Nil(t, err) + require.NotNil(t, me) + checkWrapSpanForMetricsExporter(t, me, nil, 1) +} + +func TestMetricsExporter_WithSpan_ReturnError(t *testing.T) { + want := errors.New("my_error") + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, want)) + require.Nil(t, err) + require.NotNil(t, me) + checkWrapSpanForMetricsExporter(t, me, want, 1) +} + +func TestMetricsExporter_WithShutdown(t *testing.T) { + shutdownCalled := false + shutdown := func(context.Context) error { shutdownCalled = true; return nil } + + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, nil), WithShutdown(shutdown)) + assert.NotNil(t, me) + assert.NoError(t, err) + + assert.Nil(t, me.Shutdown(context.Background())) + assert.True(t, shutdownCalled) +} + +func TestMetricsExporter_WithResourceToTelemetryConversionDisabled(t *testing.T) { + md := testdata.GenerateMetricsTwoMetrics() + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, nil), WithResourceToTelemetryConversion(defaultResourceToTelemetrySettings())) + assert.NotNil(t, me) + assert.NoError(t, err) + + assert.Nil(t, me.ConsumeMetrics(context.Background(), md)) + assert.Nil(t, me.Shutdown(context.Background())) +} + +func TestMetricsExporter_WithResourceToTelemetryConversionEbabled(t *testing.T) { + md := testdata.GenerateMetricsTwoMetrics() + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, nil), WithResourceToTelemetryConversion(ResourceToTelemetrySettings{Enabled: true})) + assert.NotNil(t, me) + assert.NoError(t, err) + + assert.Nil(t, me.ConsumeMetrics(context.Background(), md)) + assert.Nil(t, me.Shutdown(context.Background())) +} + +func TestMetricsExporter_WithShutdown_ReturnError(t *testing.T) { + want := errors.New("my_error") + shutdownErr := func(context.Context) error { return want } + + me, err := NewMetricsExporter(fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(0, nil), WithShutdown(shutdownErr)) + assert.NotNil(t, me) + assert.NoError(t, err) + + assert.Equal(t, me.Shutdown(context.Background()), want) +} + +func newPushMetricsData(droppedTimeSeries int, retError error) PushMetrics { + return func(ctx context.Context, td pdata.Metrics) (int, error) { + return droppedTimeSeries, retError + } +} + +func checkRecordedMetricsForMetricsExporter(t *testing.T, me component.MetricsExporter, wantError error) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + md := testdata.GenerateMetricsTwoMetrics() + const numBatches = 7 + for i := 0; i < numBatches; i++ { + require.Equal(t, wantError, me.ConsumeMetrics(context.Background(), md)) + } + + // TODO: When the new metrics correctly count partial dropped fix this. + numPoints := int64(numBatches * md.MetricCount() * 2) /* 2 points per metric*/ + if wantError != nil { + obsreporttest.CheckExporterMetricsViews(t, fakeMetricsExporterName, 0, numPoints) + } else { + obsreporttest.CheckExporterMetricsViews(t, fakeMetricsExporterName, numPoints, 0) + } +} + +func generateMetricsTraffic(t *testing.T, me component.MetricsExporter, numRequests int, wantError error) { + md := testdata.GenerateMetricsOneMetricOneDataPoint() + ctx, span := trace.StartSpan(context.Background(), fakeMetricsParentSpanName, trace.WithSampler(trace.AlwaysSample())) + defer span.End() + for i := 0; i < numRequests; i++ { + require.Equal(t, wantError, me.ConsumeMetrics(ctx, md)) + } +} + +func checkWrapSpanForMetricsExporter(t *testing.T, me component.MetricsExporter, wantError error, numMetricPoints int64) { + ocSpansSaver := new(testOCTraceExporter) + trace.RegisterExporter(ocSpansSaver) + defer trace.UnregisterExporter(ocSpansSaver) + + const numRequests = 5 + generateMetricsTraffic(t, me, numRequests, wantError) + + // Inspection time! + ocSpansSaver.mu.Lock() + defer ocSpansSaver.mu.Unlock() + + require.NotEqual(t, 0, len(ocSpansSaver.spanData), "No exported span data") + + gotSpanData := ocSpansSaver.spanData + require.Equal(t, numRequests+1, len(gotSpanData)) + + parentSpan := gotSpanData[numRequests] + require.Equalf(t, fakeMetricsParentSpanName, parentSpan.Name, "SpanData %v", parentSpan) + for _, sd := range gotSpanData[:numRequests] { + require.Equalf(t, parentSpan.SpanContext.SpanID, sd.ParentSpanID, "Exporter span not a child\nSpanData %v", sd) + require.Equalf(t, errToStatus(wantError), sd.Status, "SpanData %v", sd) + + sentMetricPoints := numMetricPoints + var failedToSendMetricPoints int64 + if wantError != nil { + sentMetricPoints = 0 + failedToSendMetricPoints = numMetricPoints + } + require.Equalf(t, sentMetricPoints, sd.Attributes[obsreport.SentMetricPointsKey], "SpanData %v", sd) + require.Equalf(t, failedToSendMetricPoints, sd.Attributes[obsreport.FailedToSendMetricPointsKey], "SpanData %v", sd) + } +} diff --git a/internal/otel_collector/exporter/exporterhelper/queued_retry.go b/internal/otel_collector/exporter/exporterhelper/queued_retry.go new file mode 100644 index 00000000000..4fe509d7ebb --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/queued_retry.go @@ -0,0 +1,316 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cenkalti/backoff" + "github.com/jaegertracing/jaeger/pkg/queue" + "go.opencensus.io/trace" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/obsreport" +) + +// QueueSettings defines configuration for queueing batches before sending to the consumerSender. +type QueueSettings struct { + // Enabled indicates whether to not enqueue batches before sending to the consumerSender. + Enabled bool `mapstructure:"enabled"` + // NumConsumers is the number of consumers from the queue. + NumConsumers int `mapstructure:"num_consumers"` + // QueueSize is the maximum number of batches allowed in queue at a given time. + QueueSize int `mapstructure:"queue_size"` +} + +// DefaultQueueSettings returns the default settings for QueueSettings. +func DefaultQueueSettings() QueueSettings { + return QueueSettings{ + Enabled: true, + NumConsumers: 10, + // For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage. + // This is a pretty decent value for production. + // User should calculate this from the perspective of how many seconds to buffer in case of a backend outage, + // multiply that by the number of requests per seconds. + QueueSize: 5000, + } +} + +// RetrySettings defines configuration for retrying batches in case of export failure. +// The current supported strategy is exponential backoff. +type RetrySettings struct { + // Enabled indicates whether to not retry sending batches in case of export failure. + Enabled bool `mapstructure:"enabled"` + // InitialInterval the time to wait after the first failure before retrying. + InitialInterval time.Duration `mapstructure:"initial_interval"` + // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between + // consecutive retries will always be `MaxInterval`. + MaxInterval time.Duration `mapstructure:"max_interval"` + // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. + // Once this value is reached, the data is discarded. + MaxElapsedTime time.Duration `mapstructure:"max_elapsed_time"` +} + +// DefaultRetrySettings returns the default settings for RetrySettings. +func DefaultRetrySettings() RetrySettings { + return RetrySettings{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: 5 * time.Minute, + } +} + +type queuedRetrySender struct { + cfg QueueSettings + consumerSender requestSender + queue *queue.BoundedQueue + retryStopCh chan struct{} + traceAttributes []trace.Attribute + logger *zap.Logger +} + +func createSampledLogger(logger *zap.Logger) *zap.Logger { + if logger.Core().Enabled(zapcore.DebugLevel) { + // Debugging is enabled. Don't do any sampling. + return logger + } + + // Create a logger that samples all messages to 1 per 10 seconds initially, + // and 1/100 of messages after that. + opts := zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSamplerWithOptions( + core, + 10*time.Second, + 1, + 100, + ) + }) + return logger.WithOptions(opts) +} + +func newQueuedRetrySender(fullName string, qCfg QueueSettings, rCfg RetrySettings, nextSender requestSender, logger *zap.Logger) *queuedRetrySender { + retryStopCh := make(chan struct{}) + sampledLogger := createSampledLogger(logger) + traceAttr := trace.StringAttribute(obsreport.ExporterKey, fullName) + return &queuedRetrySender{ + cfg: qCfg, + consumerSender: &retrySender{ + traceAttribute: traceAttr, + cfg: rCfg, + nextSender: nextSender, + stopCh: retryStopCh, + logger: sampledLogger, + }, + queue: queue.NewBoundedQueue(qCfg.QueueSize, func(item interface{}) {}), + retryStopCh: retryStopCh, + traceAttributes: []trace.Attribute{traceAttr}, + logger: sampledLogger, + } +} + +// start is invoked during service startup. +func (qrs *queuedRetrySender) start() { + qrs.queue.StartConsumers(qrs.cfg.NumConsumers, func(item interface{}) { + req := item.(request) + _, _ = qrs.consumerSender.send(req) + }) +} + +// send implements the requestSender interface +func (qrs *queuedRetrySender) send(req request) (int, error) { + if !qrs.cfg.Enabled { + n, err := qrs.consumerSender.send(req) + if err != nil { + qrs.logger.Error( + "Exporting failed. Dropping data. Try enabling sending_queue to survive temporary failures.", + zap.Int("dropped_items", n), + ) + } + return n, err + } + + // Prevent cancellation and deadline to propagate to the context stored in the queue. + // The grpc/http based receivers will cancel the request context after this function returns. + req.setContext(noCancellationContext{Context: req.context()}) + + span := trace.FromContext(req.context()) + if !qrs.queue.Produce(req) { + qrs.logger.Error( + "Dropping data because sending_queue is full. Try increasing queue_size.", + zap.Int("dropped_items", req.count()), + ) + span.Annotate(qrs.traceAttributes, "Dropped item, sending_queue is full.") + return req.count(), errors.New("sending_queue is full") + } + + span.Annotate(qrs.traceAttributes, "Enqueued item.") + return 0, nil +} + +// shutdown is invoked during service shutdown. +func (qrs *queuedRetrySender) shutdown() { + // First stop the retry goroutines, so that unblocks the queue workers. + close(qrs.retryStopCh) + + // Stop the queued sender, this will drain the queue and will call the retry (which is stopped) that will only + // try once every request. + qrs.queue.Stop() +} + +// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. +type throttleRetry struct { + error + delay time.Duration +} + +func NewThrottleRetry(err error, delay time.Duration) error { + return &throttleRetry{ + error: err, + delay: delay, + } +} + +type retrySender struct { + traceAttribute trace.Attribute + cfg RetrySettings + nextSender requestSender + stopCh chan struct{} + logger *zap.Logger +} + +// send implements the requestSender interface +func (rs *retrySender) send(req request) (int, error) { + if !rs.cfg.Enabled { + n, err := rs.nextSender.send(req) + if err != nil { + rs.logger.Error( + "Exporting failed. Try enabling retry_on_failure config option.", + zap.Error(err), + ) + } + return n, err + } + + // Do not use NewExponentialBackOff since it calls Reset and the code here must + // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). + expBackoff := backoff.ExponentialBackOff{ + InitialInterval: rs.cfg.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: rs.cfg.MaxInterval, + MaxElapsedTime: rs.cfg.MaxElapsedTime, + Clock: backoff.SystemClock, + } + expBackoff.Reset() + span := trace.FromContext(req.context()) + retryNum := int64(0) + for { + span.Annotate( + []trace.Attribute{ + rs.traceAttribute, + trace.Int64Attribute("retry_num", retryNum)}, + "Sending request.") + droppedItems, err := rs.nextSender.send(req) + + if err == nil { + return droppedItems, nil + } + + // Immediately drop data on permanent errors. + if consumererror.IsPermanent(err) { + rs.logger.Error( + "Exporting failed. The error is not retryable. Dropping data.", + zap.Error(err), + zap.Int("dropped_items", droppedItems), + ) + return droppedItems, err + } + + // If partial error, update data and stats with non exported data. + if partialErr, isPartial := err.(consumererror.PartialError); isPartial { + req = req.onPartialError(partialErr) + } + + backoffDelay := expBackoff.NextBackOff() + + if backoffDelay == backoff.Stop { + // throw away the batch + err = fmt.Errorf("max elapsed time expired %w", err) + rs.logger.Error( + "Exporting failed. No more retries left. Dropping data.", + zap.Error(err), + zap.Int("dropped_items", droppedItems), + ) + return req.count(), err + } + + if throttleErr, isThrottle := err.(*throttleRetry); isThrottle { + backoffDelay = max(backoffDelay, throttleErr.delay) + } + + backoffDelayStr := backoffDelay.String() + span.Annotate( + []trace.Attribute{ + rs.traceAttribute, + trace.StringAttribute("interval", backoffDelayStr), + trace.StringAttribute("error", err.Error())}, + "Exporting failed. Will retry the request after interval.") + rs.logger.Info( + "Exporting failed. Will retry the request after interval.", + zap.Error(err), + zap.String("interval", backoffDelayStr), + ) + retryNum++ + + // back-off, but get interrupted when shutting down or request is cancelled or timed out. + select { + case <-req.context().Done(): + return req.count(), fmt.Errorf("request is cancelled or timed out %w", err) + case <-rs.stopCh: + return req.count(), fmt.Errorf("interrupted due to shutdown %w", err) + case <-time.After(backoffDelay): + } + } +} + +// max returns the larger of x or y. +func max(x, y time.Duration) time.Duration { + if x < y { + return y + } + return x +} + +type noCancellationContext struct { + context.Context +} + +func (noCancellationContext) Deadline() (deadline time.Time, ok bool) { + return +} + +func (noCancellationContext) Done() <-chan struct{} { + return nil +} + +func (noCancellationContext) Err() error { + return nil +} diff --git a/internal/otel_collector/exporter/exporterhelper/queued_retry_test.go b/internal/otel_collector/exporter/exporterhelper/queued_retry_test.go new file mode 100644 index 00000000000..9df39d34119 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/queued_retry_test.go @@ -0,0 +1,462 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport/obsreporttest" +) + +func TestQueuedRetry_DropOnPermanentError(t *testing.T) { + qCfg := DefaultQueueSettings() + rCfg := DefaultRetrySettings() + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + + mockR := newMockRequest(context.Background(), 2, consumererror.Permanent(errors.New("bad data"))) + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(mockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + ocs.awaitAsyncProcessing() + // In the newMockConcurrentExporter we count requests and items even for failed requests + mockR.checkNumRequests(t, 1) + ocs.checkSendItemsCount(t, 0) + ocs.checkDroppedItemsCount(t, 2) +} + +func TestQueuedRetry_DropOnNoRetry(t *testing.T) { + qCfg := DefaultQueueSettings() + rCfg := DefaultRetrySettings() + rCfg.Enabled = false + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + + mockR := newMockRequest(context.Background(), 2, errors.New("transient error")) + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(mockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + ocs.awaitAsyncProcessing() + // In the newMockConcurrentExporter we count requests and items even for failed requests + mockR.checkNumRequests(t, 1) + ocs.checkSendItemsCount(t, 0) + ocs.checkDroppedItemsCount(t, 2) +} + +func TestQueuedRetry_PartialError(t *testing.T) { + qCfg := DefaultQueueSettings() + qCfg.NumConsumers = 1 + rCfg := DefaultRetrySettings() + rCfg.InitialInterval = 0 + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + + partialErr := consumererror.PartialTracesError(errors.New("some error"), testdata.GenerateTraceDataOneSpan()) + mockR := newMockRequest(context.Background(), 2, partialErr) + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(mockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + ocs.awaitAsyncProcessing() + + // In the newMockConcurrentExporter we count requests and items even for failed requests + mockR.checkNumRequests(t, 2) + ocs.checkSendItemsCount(t, 2) + ocs.checkDroppedItemsCount(t, 0) +} + +func TestQueuedRetry_StopWhileWaiting(t *testing.T) { + qCfg := DefaultQueueSettings() + qCfg.NumConsumers = 1 + rCfg := DefaultRetrySettings() + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + + firstMockR := newMockRequest(context.Background(), 2, errors.New("transient error")) + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(firstMockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + + // Enqueue another request to ensure when calling shutdown we drain the queue. + secondMockR := newMockRequest(context.Background(), 3, nil) + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(secondMockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + + assert.NoError(t, be.Shutdown(context.Background())) + + // TODO: Ensure that queue is drained, and uncomment the next 3 lines. + // https://github.com/jaegertracing/jaeger/pull/2349 + firstMockR.checkNumRequests(t, 1) + // secondMockR.checkNumRequests(t, 1) + // ocs.checkSendItemsCount(t, 3) + ocs.checkDroppedItemsCount(t, 2) + // require.Zero(t, be.qrSender.queue.Size()) +} + +func TestQueuedRetry_DoNotPreserveCancellation(t *testing.T) { + qCfg := DefaultQueueSettings() + qCfg.NumConsumers = 1 + rCfg := DefaultRetrySettings() + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + + ctx, cancelFunc := context.WithCancel(context.Background()) + cancelFunc() + mockR := newMockRequest(ctx, 2, nil) + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(mockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + ocs.awaitAsyncProcessing() + + mockR.checkNumRequests(t, 1) + ocs.checkSendItemsCount(t, 2) + ocs.checkDroppedItemsCount(t, 0) + require.Zero(t, be.qrSender.queue.Size()) +} + +func TestQueuedRetry_MaxElapsedTime(t *testing.T) { + qCfg := DefaultQueueSettings() + qCfg.NumConsumers = 1 + rCfg := DefaultRetrySettings() + rCfg.InitialInterval = time.Millisecond + rCfg.MaxElapsedTime = 100 * time.Millisecond + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + + ocs.run(func() { + // Add an item that will always fail. + droppedItems, err := be.sender.send(newErrorRequest(context.Background())) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + + mockR := newMockRequest(context.Background(), 2, nil) + start := time.Now() + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(mockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + ocs.awaitAsyncProcessing() + + // We should ensure that we wait for more than MaxElapsedTime, but not too much. + waitingTime := time.Since(start) + assert.True(t, 100*time.Millisecond < waitingTime) + assert.True(t, 5*time.Second > waitingTime) + + // In the newMockConcurrentExporter we count requests and items even for failed requests. + mockR.checkNumRequests(t, 1) + ocs.checkSendItemsCount(t, 2) + ocs.checkDroppedItemsCount(t, 7) + require.Zero(t, be.qrSender.queue.Size()) +} + +func TestQueuedRetry_ThrottleError(t *testing.T) { + qCfg := DefaultQueueSettings() + qCfg.NumConsumers = 1 + rCfg := DefaultRetrySettings() + rCfg.InitialInterval = 10 * time.Millisecond + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + + mockR := newMockRequest(context.Background(), 2, NewThrottleRetry(errors.New("throttle error"), 100*time.Millisecond)) + start := time.Now() + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(mockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + ocs.awaitAsyncProcessing() + + // The initial backoff is 10ms, but because of the throttle this should wait at least 100ms. + assert.True(t, 100*time.Millisecond < time.Since(start)) + + mockR.checkNumRequests(t, 2) + ocs.checkSendItemsCount(t, 2) + ocs.checkDroppedItemsCount(t, 0) + require.Zero(t, be.qrSender.queue.Size()) +} + +func TestQueuedRetry_RetryOnError(t *testing.T) { + qCfg := DefaultQueueSettings() + qCfg.NumConsumers = 1 + qCfg.QueueSize = 1 + rCfg := DefaultRetrySettings() + rCfg.InitialInterval = 0 + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + + mockR := newMockRequest(context.Background(), 2, errors.New("transient error")) + ocs.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + droppedItems, err := be.sender.send(mockR) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + ocs.awaitAsyncProcessing() + + // In the newMockConcurrentExporter we count requests and items even for failed requests + mockR.checkNumRequests(t, 2) + ocs.checkSendItemsCount(t, 2) + ocs.checkDroppedItemsCount(t, 0) + require.Zero(t, be.qrSender.queue.Size()) +} + +func TestQueuedRetry_DropOnFull(t *testing.T) { + qCfg := DefaultQueueSettings() + qCfg.QueueSize = 0 + rCfg := DefaultRetrySettings() + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + droppedItems, err := be.sender.send(newMockRequest(context.Background(), 2, errors.New("transient error"))) + require.Error(t, err) + assert.Equal(t, 2, droppedItems) +} + +func TestQueuedRetryHappyPath(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + qCfg := DefaultQueueSettings() + rCfg := DefaultRetrySettings() + be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) + be.qrSender.consumerSender = ocs + require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, be.Shutdown(context.Background())) + }) + + wantRequests := 10 + reqs := make([]*mockRequest, 0, 10) + for i := 0; i < wantRequests; i++ { + ocs.run(func() { + req := newMockRequest(context.Background(), 2, nil) + reqs = append(reqs, req) + droppedItems, err := be.sender.send(req) + require.NoError(t, err) + assert.Equal(t, 0, droppedItems) + }) + } + + // Wait until all batches received + ocs.awaitAsyncProcessing() + + require.Len(t, reqs, wantRequests) + for _, req := range reqs { + req.checkNumRequests(t, 1) + } + + ocs.checkSendItemsCount(t, 2*wantRequests) + ocs.checkDroppedItemsCount(t, 0) +} + +func TestNoCancellationContext(t *testing.T) { + deadline := time.Now().Add(1 * time.Second) + ctx, cancelFunc := context.WithDeadline(context.Background(), deadline) + cancelFunc() + require.Error(t, ctx.Err()) + d, ok := ctx.Deadline() + require.True(t, ok) + require.Equal(t, deadline, d) + + nctx := noCancellationContext{Context: ctx} + assert.NoError(t, nctx.Err()) + d, ok = nctx.Deadline() + assert.False(t, ok) + assert.True(t, d.IsZero()) +} + +type mockErrorRequest struct { + baseRequest +} + +func (mer *mockErrorRequest) export(_ context.Context) (int, error) { + return 0, errors.New("transient error") +} + +func (mer *mockErrorRequest) onPartialError(consumererror.PartialError) request { + return mer +} + +func (mer *mockErrorRequest) count() int { + return 7 +} + +func newErrorRequest(ctx context.Context) request { + return &mockErrorRequest{ + baseRequest: baseRequest{ctx: ctx}, + } +} + +type mockRequest struct { + baseRequest + cnt int + mu sync.Mutex + consumeError error + requestCount *int64 +} + +func (m *mockRequest) export(ctx context.Context) (int, error) { + atomic.AddInt64(m.requestCount, 1) + m.mu.Lock() + defer m.mu.Unlock() + err := m.consumeError + m.consumeError = nil + if err != nil { + return m.cnt, err + } + // Respond like gRPC/HTTP, if context is cancelled, return error + return 0, ctx.Err() +} + +func (m *mockRequest) onPartialError(consumererror.PartialError) request { + return &mockRequest{ + baseRequest: m.baseRequest, + cnt: 1, + consumeError: nil, + requestCount: m.requestCount, + } +} + +func (m *mockRequest) checkNumRequests(t *testing.T, want int) { + assert.Eventually(t, func() bool { + return int64(want) == atomic.LoadInt64(m.requestCount) + }, time.Second, 1*time.Millisecond) +} + +func (m *mockRequest) count() int { + return m.cnt +} + +func newMockRequest(ctx context.Context, cnt int, consumeError error) *mockRequest { + return &mockRequest{ + baseRequest: baseRequest{ctx: ctx}, + cnt: cnt, + consumeError: consumeError, + requestCount: new(int64), + } +} + +type observabilityConsumerSender struct { + waitGroup *sync.WaitGroup + sentItemsCount int64 + droppedItemsCount int64 + nextSender requestSender +} + +func newObservabilityConsumerSender(nextSender requestSender) *observabilityConsumerSender { + return &observabilityConsumerSender{waitGroup: new(sync.WaitGroup), nextSender: nextSender} +} + +func (ocs *observabilityConsumerSender) send(req request) (int, error) { + dic, err := ocs.nextSender.send(req) + atomic.AddInt64(&ocs.sentItemsCount, int64(req.count()-dic)) + atomic.AddInt64(&ocs.droppedItemsCount, int64(dic)) + ocs.waitGroup.Done() + return dic, err +} + +func (ocs *observabilityConsumerSender) run(fn func()) { + ocs.waitGroup.Add(1) + fn() +} + +func (ocs *observabilityConsumerSender) awaitAsyncProcessing() { + ocs.waitGroup.Wait() +} + +func (ocs *observabilityConsumerSender) checkSendItemsCount(t *testing.T, want int) { + assert.EqualValues(t, want, atomic.LoadInt64(&ocs.sentItemsCount)) +} + +func (ocs *observabilityConsumerSender) checkDroppedItemsCount(t *testing.T, want int) { + assert.EqualValues(t, want, atomic.LoadInt64(&ocs.droppedItemsCount)) +} diff --git a/internal/otel_collector/exporter/exporterhelper/resource_to_label.go b/internal/otel_collector/exporter/exporterhelper/resource_to_label.go new file mode 100644 index 00000000000..53c139e2f7f --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/resource_to_label.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +// ResourceToTelemetrySettings defines configuration for converting resource attributes to metric labels. +type ResourceToTelemetrySettings struct { + // Enabled indicates whether to not convert resource attributes to metric labels + Enabled bool `mapstructure:"enabled"` +} + +// defaultResourceToTelemetrySettings returns the default settings for ResourceToTelemetrySettings. +func defaultResourceToTelemetrySettings() ResourceToTelemetrySettings { + return ResourceToTelemetrySettings{ + Enabled: false, + } +} + +// convertResourceToLabels converts all resource attributes to metric labels +func convertResourceToLabels(md pdata.Metrics) pdata.Metrics { + cloneMd := md.Clone() + rms := cloneMd.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + resource := rms.At(i).Resource() + + labelMap := extractLabelsFromResource(&resource) + + ilms := rms.At(i).InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + metricSlice := ilm.Metrics() + for k := 0; k < metricSlice.Len(); k++ { + metric := metricSlice.At(k) + addLabelsToMetric(&metric, labelMap) + } + } + } + return cloneMd +} + +// extractAttributesFromResource extracts the attributes from a given resource and +// returns them as a StringMap. +func extractLabelsFromResource(resource *pdata.Resource) pdata.StringMap { + labelMap := pdata.NewStringMap() + + attrMap := resource.Attributes() + attrMap.ForEach(func(k string, av pdata.AttributeValue) { + stringLabel := tracetranslator.AttributeValueToString(av, false) + labelMap.Upsert(k, stringLabel) + }) + return labelMap +} + +// addLabelsToMetric adds additional labels to the given metric +func addLabelsToMetric(metric *pdata.Metric, labelMap pdata.StringMap) { + switch metric.DataType() { + case pdata.MetricDataTypeIntGauge: + addLabelsToIntDataPoints(metric.IntGauge().DataPoints(), labelMap) + case pdata.MetricDataTypeDoubleGauge: + addLabelsToDoubleDataPoints(metric.DoubleGauge().DataPoints(), labelMap) + case pdata.MetricDataTypeIntSum: + addLabelsToIntDataPoints(metric.IntSum().DataPoints(), labelMap) + case pdata.MetricDataTypeDoubleSum: + addLabelsToDoubleDataPoints(metric.DoubleSum().DataPoints(), labelMap) + case pdata.MetricDataTypeIntHistogram: + addLabelsToIntHistogramDataPoints(metric.IntHistogram().DataPoints(), labelMap) + case pdata.MetricDataTypeDoubleHistogram: + addLabelsToDoubleHistogramDataPoints(metric.DoubleHistogram().DataPoints(), labelMap) + } +} + +func addLabelsToIntDataPoints(ps pdata.IntDataPointSlice, newLabelMap pdata.StringMap) { + for i := 0; i < ps.Len(); i++ { + joinStringMaps(newLabelMap, ps.At(i).LabelsMap()) + } +} + +func addLabelsToDoubleDataPoints(ps pdata.DoubleDataPointSlice, newLabelMap pdata.StringMap) { + for i := 0; i < ps.Len(); i++ { + joinStringMaps(newLabelMap, ps.At(i).LabelsMap()) + } +} + +func addLabelsToIntHistogramDataPoints(ps pdata.IntHistogramDataPointSlice, newLabelMap pdata.StringMap) { + for i := 0; i < ps.Len(); i++ { + joinStringMaps(newLabelMap, ps.At(i).LabelsMap()) + } +} + +func addLabelsToDoubleHistogramDataPoints(ps pdata.DoubleHistogramDataPointSlice, newLabelMap pdata.StringMap) { + for i := 0; i < ps.Len(); i++ { + joinStringMaps(newLabelMap, ps.At(i).LabelsMap()) + } +} + +func joinStringMaps(from, to pdata.StringMap) { + from.ForEach(func(k, v string) { + to.Upsert(k, v) + }) +} diff --git a/internal/otel_collector/exporter/exporterhelper/resource_to_label_test.go b/internal/otel_collector/exporter/exporterhelper/resource_to_label_test.go new file mode 100644 index 00000000000..88facb1134b --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/resource_to_label_test.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package exporterhelper + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestConvertResourceToLabels(t *testing.T) { + md := testdata.GenerateMetricsOneMetric() + assert.NotNil(t, md) + + // Before converting resource to labels + assert.Equal(t, 1, md.ResourceMetrics().At(0).Resource().Attributes().Len()) + assert.Equal(t, 1, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).IntSum().DataPoints().At(0).LabelsMap().Len()) + + cloneMd := convertResourceToLabels(md) + + // After converting resource to labels + assert.Equal(t, 1, cloneMd.ResourceMetrics().At(0).Resource().Attributes().Len()) + assert.Equal(t, 2, cloneMd.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).IntSum().DataPoints().At(0).LabelsMap().Len()) + + assert.Equal(t, 1, md.ResourceMetrics().At(0).Resource().Attributes().Len()) + assert.Equal(t, 1, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).IntSum().DataPoints().At(0).LabelsMap().Len()) + +} + +func TestConvertResourceToLabelsAllDataTypesEmptyDataPoint(t *testing.T) { + md := testdata.GenerateMetricsAllTypesEmptyDataPoint() + assert.NotNil(t, md) + + // Before converting resource to labels + assert.Equal(t, 1, md.ResourceMetrics().At(0).Resource().Attributes().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).DoubleGauge().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(1).IntGauge().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(2).DoubleSum().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(3).IntSum().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(4).DoubleHistogram().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(5).IntHistogram().DataPoints().At(0).LabelsMap().Len()) + + cloneMd := convertResourceToLabels(md) + + // After converting resource to labels + assert.Equal(t, 1, cloneMd.ResourceMetrics().At(0).Resource().Attributes().Len()) + assert.Equal(t, 1, cloneMd.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).DoubleGauge().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 1, cloneMd.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(1).IntGauge().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 1, cloneMd.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(2).DoubleSum().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 1, cloneMd.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(3).IntSum().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 1, cloneMd.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(4).DoubleHistogram().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 1, cloneMd.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(5).IntHistogram().DataPoints().At(0).LabelsMap().Len()) + + assert.Equal(t, 1, md.ResourceMetrics().At(0).Resource().Attributes().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).DoubleGauge().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(1).IntGauge().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(2).DoubleSum().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(3).IntSum().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(4).DoubleHistogram().DataPoints().At(0).LabelsMap().Len()) + assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(5).IntHistogram().DataPoints().At(0).LabelsMap().Len()) + +} diff --git a/internal/otel_collector/exporter/exporterhelper/tracehelper.go b/internal/otel_collector/exporter/exporterhelper/tracehelper.go new file mode 100644 index 00000000000..9e6f30a3334 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/tracehelper.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +// PushTraces is a helper function that is similar to ConsumeTraces but also +// returns the number of dropped spans. +type PushTraces func(ctx context.Context, td pdata.Traces) (droppedSpans int, err error) + +type tracesRequest struct { + baseRequest + td pdata.Traces + pusher PushTraces +} + +func newTracesRequest(ctx context.Context, td pdata.Traces, pusher PushTraces) request { + return &tracesRequest{ + baseRequest: baseRequest{ctx: ctx}, + td: td, + pusher: pusher, + } +} + +func (req *tracesRequest) onPartialError(partialErr consumererror.PartialError) request { + return newTracesRequest(req.ctx, partialErr.GetTraces(), req.pusher) +} + +func (req *tracesRequest) export(ctx context.Context) (int, error) { + return req.pusher(ctx, req.td) +} + +func (req *tracesRequest) count() int { + return req.td.SpanCount() +} + +type traceExporter struct { + *baseExporter + pusher PushTraces +} + +func (texp *traceExporter) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + exporterCtx := obsreport.ExporterContext(ctx, texp.cfg.Name()) + req := newTracesRequest(exporterCtx, td, texp.pusher) + _, err := texp.sender.send(req) + return err +} + +// NewTraceExporter creates a TracesExporter that records observability metrics and wraps every request with a Span. +func NewTraceExporter( + cfg configmodels.Exporter, + logger *zap.Logger, + pusher PushTraces, + options ...Option, +) (component.TracesExporter, error) { + + if cfg == nil { + return nil, errNilConfig + } + + if logger == nil { + return nil, errNilLogger + } + + if pusher == nil { + return nil, errNilPushTraceData + } + + be := newBaseExporter(cfg, logger, options...) + be.wrapConsumerSender(func(nextSender requestSender) requestSender { + return &tracesExporterWithObservability{ + obsrep: obsreport.NewExporterObsReport(configtelemetry.GetMetricsLevelFlagValue(), cfg.Name()), + nextSender: nextSender, + } + }) + + return &traceExporter{ + baseExporter: be, + pusher: pusher, + }, nil +} + +type tracesExporterWithObservability struct { + obsrep *obsreport.ExporterObsReport + nextSender requestSender +} + +func (tewo *tracesExporterWithObservability) send(req request) (int, error) { + req.setContext(tewo.obsrep.StartTracesExportOp(req.context())) + // Forward the data to the next consumer (this pusher is the next). + droppedSpans, err := tewo.nextSender.send(req) + + // TODO: this is not ideal: it should come from the next function itself. + // temporarily loading it from internal format. Once full switch is done + // to new metrics will remove this. + tewo.obsrep.EndTracesExportOp(req.context(), req.count(), err) + return droppedSpans, err +} diff --git a/internal/otel_collector/exporter/exporterhelper/tracehelper_test.go b/internal/otel_collector/exporter/exporterhelper/tracehelper_test.go new file mode 100644 index 00000000000..057c0e03d30 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/tracehelper_test.go @@ -0,0 +1,252 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package exporterhelper + +import ( + "context" + "errors" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/obsreport/obsreporttest" +) + +const ( + fakeTraceExporterType = "fake_trace_exporter" + fakeTraceExporterName = "fake_trace_exporter/with_name" + fakeTraceParentSpanName = "fake_trace_parent_span_name" +) + +var ( + fakeTraceExporterConfig = &configmodels.ExporterSettings{ + TypeVal: fakeTraceExporterType, + NameVal: fakeTraceExporterName, + } +) + +func TestTracesRequest(t *testing.T) { + mr := newTracesRequest(context.Background(), testdata.GenerateTraceDataOneSpan(), nil) + + partialErr := consumererror.PartialTracesError(errors.New("some error"), testdata.GenerateTraceDataEmpty()) + assert.EqualValues(t, newTracesRequest(context.Background(), testdata.GenerateTraceDataEmpty(), nil), mr.onPartialError(partialErr.(consumererror.PartialError))) +} + +type testOCTraceExporter struct { + mu sync.Mutex + spanData []*trace.SpanData +} + +func (tote *testOCTraceExporter) ExportSpan(sd *trace.SpanData) { + tote.mu.Lock() + defer tote.mu.Unlock() + + tote.spanData = append(tote.spanData, sd) +} + +func TestTraceExporter_InvalidName(t *testing.T) { + te, err := NewTraceExporter(nil, zap.NewNop(), newTraceDataPusher(0, nil)) + require.Nil(t, te) + require.Equal(t, errNilConfig, err) +} + +func TestTraceExporter_NilLogger(t *testing.T) { + te, err := NewTraceExporter(fakeTraceExporterConfig, nil, newTraceDataPusher(0, nil)) + require.Nil(t, te) + require.Equal(t, errNilLogger, err) +} + +func TestTraceExporter_NilPushTraceData(t *testing.T) { + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), nil) + require.Nil(t, te) + require.Equal(t, errNilPushTraceData, err) +} + +func TestTraceExporter_Default(t *testing.T) { + td := pdata.NewTraces() + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(0, nil)) + assert.NotNil(t, te) + assert.NoError(t, err) + + assert.Nil(t, te.ConsumeTraces(context.Background(), td)) + assert.Nil(t, te.Shutdown(context.Background())) +} + +func TestTraceExporter_Default_ReturnError(t *testing.T) { + td := pdata.NewTraces() + want := errors.New("my_error") + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(0, want)) + require.Nil(t, err) + require.NotNil(t, te) + + err = te.ConsumeTraces(context.Background(), td) + require.Equalf(t, want, err, "ConsumeTraceData returns: Want %v Got %v", want, err) +} + +func TestTraceExporter_WithRecordMetrics(t *testing.T) { + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(0, nil)) + require.Nil(t, err) + require.NotNil(t, te) + + checkRecordedMetricsForTraceExporter(t, te, nil) +} + +func TestTraceExporter_WithRecordMetrics_NonZeroDropped(t *testing.T) { + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(1, nil)) + require.Nil(t, err) + require.NotNil(t, te) + + checkRecordedMetricsForTraceExporter(t, te, nil) +} + +func TestTraceExporter_WithRecordMetrics_ReturnError(t *testing.T) { + want := errors.New("my_error") + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(0, want)) + require.Nil(t, err) + require.NotNil(t, te) + + checkRecordedMetricsForTraceExporter(t, te, want) +} + +func TestTraceExporter_WithSpan(t *testing.T) { + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(0, nil)) + require.Nil(t, err) + require.NotNil(t, te) + + checkWrapSpanForTraceExporter(t, te, nil, 1) +} + +func TestTraceExporter_WithSpan_NonZeroDropped(t *testing.T) { + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(1, nil)) + require.Nil(t, err) + require.NotNil(t, te) + + checkWrapSpanForTraceExporter(t, te, nil, 1) +} + +func TestTraceExporter_WithSpan_ReturnError(t *testing.T) { + want := errors.New("my_error") + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(0, want)) + require.Nil(t, err) + require.NotNil(t, te) + + checkWrapSpanForTraceExporter(t, te, want, 1) +} + +func TestTraceExporter_WithShutdown(t *testing.T) { + shutdownCalled := false + shutdown := func(context.Context) error { shutdownCalled = true; return nil } + + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(0, nil), WithShutdown(shutdown)) + assert.NotNil(t, te) + assert.NoError(t, err) + + assert.Nil(t, te.Shutdown(context.Background())) + assert.True(t, shutdownCalled) +} + +func TestTraceExporter_WithShutdown_ReturnError(t *testing.T) { + want := errors.New("my_error") + shutdownErr := func(context.Context) error { return want } + + te, err := NewTraceExporter(fakeTraceExporterConfig, zap.NewNop(), newTraceDataPusher(0, nil), WithShutdown(shutdownErr)) + assert.NotNil(t, te) + assert.NoError(t, err) + + assert.Equal(t, te.Shutdown(context.Background()), want) +} + +func newTraceDataPusher(droppedSpans int, retError error) PushTraces { + return func(ctx context.Context, td pdata.Traces) (int, error) { + return droppedSpans, retError + } +} + +func checkRecordedMetricsForTraceExporter(t *testing.T, te component.TracesExporter, wantError error) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + td := testdata.GenerateTraceDataTwoSpansSameResource() + const numBatches = 7 + for i := 0; i < numBatches; i++ { + require.Equal(t, wantError, te.ConsumeTraces(context.Background(), td)) + } + + // TODO: When the new metrics correctly count partial dropped fix this. + if wantError != nil { + obsreporttest.CheckExporterTracesViews(t, fakeTraceExporterName, 0, int64(numBatches*td.SpanCount())) + } else { + obsreporttest.CheckExporterTracesViews(t, fakeTraceExporterName, int64(numBatches*td.SpanCount()), 0) + } +} + +func generateTraceTraffic(t *testing.T, te component.TracesExporter, numRequests int, wantError error) { + td := pdata.NewTraces() + rs := td.ResourceSpans() + rs.Resize(1) + rs.At(0).InstrumentationLibrarySpans().Resize(1) + rs.At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + ctx, span := trace.StartSpan(context.Background(), fakeTraceParentSpanName, trace.WithSampler(trace.AlwaysSample())) + defer span.End() + for i := 0; i < numRequests; i++ { + require.Equal(t, wantError, te.ConsumeTraces(ctx, td)) + } +} + +func checkWrapSpanForTraceExporter(t *testing.T, te component.TracesExporter, wantError error, numSpans int64) { + ocSpansSaver := new(testOCTraceExporter) + trace.RegisterExporter(ocSpansSaver) + defer trace.UnregisterExporter(ocSpansSaver) + + const numRequests = 5 + generateTraceTraffic(t, te, numRequests, wantError) + + // Inspection time! + ocSpansSaver.mu.Lock() + defer ocSpansSaver.mu.Unlock() + + require.NotEqual(t, 0, len(ocSpansSaver.spanData), "No exported span data.") + + gotSpanData := ocSpansSaver.spanData + require.Equal(t, numRequests+1, len(gotSpanData)) + + parentSpan := gotSpanData[numRequests] + require.Equalf(t, fakeTraceParentSpanName, parentSpan.Name, "SpanData %v", parentSpan) + + for _, sd := range gotSpanData[:numRequests] { + require.Equalf(t, parentSpan.SpanContext.SpanID, sd.ParentSpanID, "Exporter span not a child\nSpanData %v", sd) + require.Equalf(t, errToStatus(wantError), sd.Status, "SpanData %v", sd) + + sentSpans := numSpans + var failedToSendSpans int64 + if wantError != nil { + sentSpans = 0 + failedToSendSpans = numSpans + } + + require.Equalf(t, sentSpans, sd.Attributes[obsreport.SentSpansKey], "SpanData %v", sd) + require.Equalf(t, failedToSendSpans, sd.Attributes[obsreport.FailedToSendSpansKey], "SpanData %v", sd) + } +} diff --git a/internal/otel_collector/exporter/fileexporter/README.md b/internal/otel_collector/exporter/fileexporter/README.md new file mode 100644 index 00000000000..0ae26b4d128 --- /dev/null +++ b/internal/otel_collector/exporter/fileexporter/README.md @@ -0,0 +1,26 @@ +# File Exporter + +This exporter will write pipeline data to a JSON file. The data is written in +[Protobuf JSON +encoding](https://developers.google.com/protocol-buffers/docs/proto3#json) +using [OpenTelemetry +protocol](https://github.com/open-telemetry/opentelemetry-proto). + +Please note that there is no guarantee that exact field names will remain stable. +This intended for primarily for debugging Collector without setting up backends. + +Supported pipeline types: traces, metrics, logs + +## Getting Started + +The following settings are required: + +- `path` (no default): where to write information. + +Example: + +```yaml +exporters: + file: + path: ./filename.json +``` diff --git a/internal/otel_collector/exporter/fileexporter/config.go b/internal/otel_collector/exporter/fileexporter/config.go new file mode 100644 index 00000000000..e1086657960 --- /dev/null +++ b/internal/otel_collector/exporter/fileexporter/config.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileexporter + +import ( + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for file exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + + // Path of the file to write to. Path is relative to current directory. + Path string `mapstructure:"path"` +} diff --git a/internal/otel_collector/exporter/fileexporter/config_test.go b/internal/otel_collector/exporter/fileexporter/config_test.go new file mode 100644 index 00000000000..89d42d0aab6 --- /dev/null +++ b/internal/otel_collector/exporter/fileexporter/config_test.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileexporter + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + e0 := cfg.Exporters["file"] + assert.Equal(t, e0, factory.CreateDefaultConfig()) + + e1 := cfg.Exporters["file/2"] + assert.Equal(t, e1, + &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "file/2", + TypeVal: "file", + }, + Path: "./filename.json", + }) +} diff --git a/internal/otel_collector/exporter/fileexporter/factory.go b/internal/otel_collector/exporter/fileexporter/factory.go new file mode 100644 index 00000000000..4f72ccd6ee3 --- /dev/null +++ b/internal/otel_collector/exporter/fileexporter/factory.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileexporter + +import ( + "context" + "os" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "file" +) + +// NewFactory creates a factory for OTLP exporter. +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTraceExporter), + exporterhelper.WithMetrics(createMetricsExporter), + exporterhelper.WithLogs(createLogsExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createTraceExporter( + _ context.Context, + _ component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.TracesExporter, error) { + return createExporter(cfg) +} + +func createMetricsExporter( + _ context.Context, + _ component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.MetricsExporter, error) { + return createExporter(cfg) +} + +func createLogsExporter( + _ context.Context, + _ component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.LogsExporter, error) { + return createExporter(cfg) +} + +func createExporter(config configmodels.Exporter) (*fileExporter, error) { + cfg := config.(*Config) + + // There must be one exporter for metrics, traces, and logs. We maintain a + // map of exporters per config. + + // Check to see if there is already a exporter for this config. + exporter, ok := exporters[cfg] + + if !ok { + file, err := os.OpenFile(cfg.Path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return nil, err + } + exporter = &fileExporter{file: file} + + // Remember the receiver in the map + exporters[cfg] = exporter + } + return exporter, nil +} + +// This is the map of already created File exporters for particular configurations. +// We maintain this map because the Factory is asked trace and metric receivers separately +// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// create separate objects, they must use one Receiver object per configuration. +var exporters = map[*Config]*fileExporter{} diff --git a/internal/otel_collector/exporter/fileexporter/factory_test.go b/internal/otel_collector/exporter/fileexporter/factory_test.go new file mode 100644 index 00000000000..a0e08a84418 --- /dev/null +++ b/internal/otel_collector/exporter/fileexporter/factory_test.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateMetricsExporter(t *testing.T) { + cfg := createDefaultConfig() + exp, err := createMetricsExporter( + context.Background(), + component.ExporterCreateParams{Logger: zap.NewNop()}, + cfg) + assert.Error(t, err) + require.Nil(t, exp) +} + +func TestCreateTraceExporter(t *testing.T) { + cfg := createDefaultConfig() + exp, err := createTraceExporter( + context.Background(), + component.ExporterCreateParams{Logger: zap.NewNop()}, + cfg) + assert.Error(t, err) + require.Nil(t, exp) +} + +func TestCreateLogsExporter(t *testing.T) { + cfg := createDefaultConfig() + + exp, err := createLogsExporter( + context.Background(), + component.ExporterCreateParams{Logger: zap.NewNop()}, + cfg) + assert.Error(t, err) + require.Nil(t, exp) +} diff --git a/internal/otel_collector/exporter/fileexporter/file_exporter.go b/internal/otel_collector/exporter/fileexporter/file_exporter.go new file mode 100644 index 00000000000..a6edfc0d2c1 --- /dev/null +++ b/internal/otel_collector/exporter/fileexporter/file_exporter.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileexporter + +import ( + "context" + "io" + "sync" + + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal" + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" +) + +// Marshaler configuration used for marhsaling Protobuf to JSON. Use default config. +var marshaler = &jsonpb.Marshaler{} + +// fileExporter is the implementation of file exporter that writes telemetry data to a file +// in Protobuf-JSON format. +type fileExporter struct { + file io.WriteCloser + mutex sync.Mutex +} + +func (e *fileExporter) ConsumeTraces(_ context.Context, td pdata.Traces) error { + request := otlptrace.ExportTraceServiceRequest{ + ResourceSpans: pdata.TracesToOtlp(td), + } + return exportMessageAsLine(e, &request) +} + +func (e *fileExporter) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + request := otlpmetrics.ExportMetricsServiceRequest{ + ResourceMetrics: pdata.MetricsToOtlp(md), + } + return exportMessageAsLine(e, &request) +} + +func (e *fileExporter) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + request := otlplogs.ExportLogsServiceRequest{ + ResourceLogs: internal.LogsToOtlp(ld.InternalRep()), + } + return exportMessageAsLine(e, &request) +} + +func exportMessageAsLine(e *fileExporter, message proto.Message) error { + // Ensure only one write operation happens at a time. + e.mutex.Lock() + defer e.mutex.Unlock() + if err := marshaler.Marshal(e.file, message); err != nil { + return err + } + if _, err := io.WriteString(e.file, "\n"); err != nil { + return err + } + return nil +} + +func (e *fileExporter) Start(ctx context.Context, host component.Host) error { + return nil +} + +// Shutdown stops the exporter and is invoked during shutdown. +func (e *fileExporter) Shutdown(context.Context) error { + return e.file.Close() +} diff --git a/internal/otel_collector/exporter/fileexporter/file_exporter_test.go b/internal/otel_collector/exporter/fileexporter/file_exporter_test.go new file mode 100644 index 00000000000..a5093147613 --- /dev/null +++ b/internal/otel_collector/exporter/fileexporter/file_exporter_test.go @@ -0,0 +1,217 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package fileexporter + +import ( + "context" + "testing" + "time" + + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal" + collectorlogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + collectormetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + collectortrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + logspb "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" + otresourcepb "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/testutil" +) + +func TestFileTraceExporterNoErrors(t *testing.T) { + mf := &testutil.LimitedWriter{} + lte := &fileExporter{file: mf} + require.NotNil(t, lte) + + td := testdata.GenerateTraceDataTwoSpansSameResource() + + assert.NoError(t, lte.ConsumeTraces(context.Background(), td)) + assert.NoError(t, lte.Shutdown(context.Background())) + + var unmarshaler = &jsonpb.Unmarshaler{} + var j collectortrace.ExportTraceServiceRequest + assert.NoError(t, unmarshaler.Unmarshal(mf, &j)) + + assert.EqualValues(t, pdata.TracesToOtlp(td), j.ResourceSpans) +} + +func TestFileMetricsExporterNoErrors(t *testing.T) { + mf := &testutil.LimitedWriter{} + lme := &fileExporter{file: mf} + require.NotNil(t, lme) + + md := testdata.GenerateMetricsTwoMetrics() + assert.NoError(t, lme.ConsumeMetrics(context.Background(), md)) + assert.NoError(t, lme.Shutdown(context.Background())) + + var unmarshaler = &jsonpb.Unmarshaler{} + var j collectormetrics.ExportMetricsServiceRequest + assert.NoError(t, unmarshaler.Unmarshal(mf, &j)) + + assert.EqualValues(t, pdata.MetricsToOtlp(md), j.ResourceMetrics) +} + +func TestFileLogsExporterNoErrors(t *testing.T) { + mf := &testutil.LimitedWriter{} + exporter := &fileExporter{file: mf} + require.NotNil(t, exporter) + + now := time.Now() + ld := []*logspb.ResourceLogs{ + { + Resource: otresourcepb.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "attr1", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "value1"}}, + }, + }, + }, + InstrumentationLibraryLogs: []*logspb.InstrumentationLibraryLogs{ + { + Logs: []*logspb.LogRecord{ + { + TimeUnixNano: uint64(now.UnixNano()), + Name: "logA", + }, + { + TimeUnixNano: uint64(now.UnixNano()), + Name: "logB", + }, + }, + }, + }, + }, + { + Resource: otresourcepb.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "attr2", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "value2"}}, + }, + }, + }, + InstrumentationLibraryLogs: []*logspb.InstrumentationLibraryLogs{ + { + Logs: []*logspb.LogRecord{ + { + TimeUnixNano: uint64(now.UnixNano()), + Name: "logC", + }, + }, + }, + }, + }, + } + assert.NoError(t, exporter.ConsumeLogs(context.Background(), pdata.LogsFromInternalRep(internal.LogsFromOtlp(ld)))) + assert.NoError(t, exporter.Shutdown(context.Background())) + + var unmarshaler = &jsonpb.Unmarshaler{} + var j collectorlogs.ExportLogsServiceRequest + + assert.NoError(t, unmarshaler.Unmarshal(mf, &j)) + assert.EqualValues(t, ld, j.ResourceLogs) +} + +func TestFileLogsExporterErrors(t *testing.T) { + + now := time.Now() + ld := []*logspb.ResourceLogs{ + { + Resource: otresourcepb.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "attr1", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "value1"}}, + }, + }, + }, + InstrumentationLibraryLogs: []*logspb.InstrumentationLibraryLogs{ + { + Logs: []*logspb.LogRecord{ + { + TimeUnixNano: uint64(now.UnixNano()), + Name: "logA", + }, + { + TimeUnixNano: uint64(now.UnixNano()), + Name: "logB", + }, + }, + }, + }, + }, + { + Resource: otresourcepb.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "attr2", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "value2"}}, + }, + }, + }, + InstrumentationLibraryLogs: []*logspb.InstrumentationLibraryLogs{ + { + Logs: []*logspb.LogRecord{ + { + TimeUnixNano: uint64(now.UnixNano()), + Name: "logC", + }, + }, + }, + }, + }, + } + + cases := []struct { + Name string + MaxLen int + }{ + { + Name: "opening", + MaxLen: 1, + }, + { + Name: "resource", + MaxLen: 16, + }, + { + Name: "log_start", + MaxLen: 78, + }, + { + Name: "logs", + MaxLen: 128, + }, + } + + for i := range cases { + maxLen := cases[i].MaxLen + t.Run(cases[i].Name, func(t *testing.T) { + mf := &testutil.LimitedWriter{ + MaxLen: maxLen, + } + exporter := &fileExporter{file: mf} + require.NotNil(t, exporter) + + assert.Error(t, exporter.ConsumeLogs(context.Background(), pdata.LogsFromInternalRep(internal.LogsFromOtlp(ld)))) + assert.NoError(t, exporter.Shutdown(context.Background())) + }) + } +} diff --git a/internal/otel_collector/exporter/fileexporter/testdata/config.yaml b/internal/otel_collector/exporter/fileexporter/testdata/config.yaml new file mode 100644 index 00000000000..f3d046a1692 --- /dev/null +++ b/internal/otel_collector/exporter/fileexporter/testdata/config.yaml @@ -0,0 +1,26 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + file: + file/2: + # This will write the pipeline data to a JSON file. + # The data is written in Protobuf JSON encoding + # (https://developers.google.com/protocol-buffers/docs/proto3#json). + # Note that there are no compatibility guarantees for this format, since it + # just a dump of internal structures which can be changed over time. + # This intended for primarily for debugging Collector without setting up backends. + path: ./filename.json + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [file] + metrics: + receivers: [examplereceiver] + exporters: [file,file/2] diff --git a/internal/otel_collector/exporter/jaegerexporter/README.md b/internal/otel_collector/exporter/jaegerexporter/README.md new file mode 100644 index 00000000000..1cca7c0e285 --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/README.md @@ -0,0 +1,49 @@ +# Jaeger gRPC Exporter + +Exports data via gRPC to [Jaeger](https://www.jaegertracing.io/) destinations. +By default, this exporter requires TLS and offers queued retry capabilities. A +Jaeger Thrift HTTP exporter is available in the [contrib +repository](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/master/exporter/jaegerthrifthttpexporter). + +Supported pipeline types: traces + +## Getting Started + +The following settings are required: + +- `endpoint` (no default): host:port to which the exporter is going to send Jaeger trace data, +using the gRPC protocol. The valid syntax is described +[here](https://github.com/grpc/grpc/blob/master/doc/naming.md) + +By default, TLS is enabled: + +- `insecure` (default = `false`): whether to enable client transport security for + the exporter's connection. + +As a result, the following parameters are also required: + +- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` (no default): path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +Example: + +```yaml +exporters: + jaeger: + endpoint: jaeger-all-in-one:14250 + cert_file: file.cert + key_file: file.key + jaeger/2: + endpoint: jaeger-all-in-one:14250 + insecure: true +``` + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configgrpc/README.md) +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/exporterhelper/README.md) diff --git a/internal/otel_collector/exporter/jaegerexporter/config.go b/internal/otel_collector/exporter/jaegerexporter/config.go new file mode 100644 index 00000000000..d0bf6fb1e32 --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/config.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerexporter + +import ( + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for Jaeger gRPC exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + configgrpc.GRPCClientSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. +} diff --git a/internal/otel_collector/exporter/jaegerexporter/config_test.go b/internal/otel_collector/exporter/jaegerexporter/config_test.go new file mode 100644 index 00000000000..4aee77bdcfa --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/config_test.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerexporter + +import ( + "context" + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + e0 := cfg.Exporters["jaeger"] + assert.Equal(t, e0, factory.CreateDefaultConfig()) + + e1 := cfg.Exporters["jaeger/2"] + assert.Equal(t, e1, + &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "jaeger/2", + TypeVal: "jaeger", + }, + TimeoutSettings: exporterhelper.TimeoutSettings{ + Timeout: 10 * time.Second, + }, + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 10 * time.Second, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + }, + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: "a.new.target:1234", + WriteBufferSize: 512 * 1024, + BalancerName: "round_robin", + }, + }) + + params := component.ExporterCreateParams{Logger: zap.NewNop()} + te, err := factory.CreateTracesExporter(context.Background(), params, e1) + require.NoError(t, err) + require.NotNil(t, te) +} diff --git a/internal/otel_collector/exporter/jaegerexporter/doc.go b/internal/otel_collector/exporter/jaegerexporter/doc.go new file mode 100644 index 00000000000..e66a34822ad --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jaegerexporter implements an exporter that sends trace data to +// a Jaeger collector gRPC endpoint. +package jaegerexporter diff --git a/internal/otel_collector/exporter/jaegerexporter/exporter.go b/internal/otel_collector/exporter/jaegerexporter/exporter.go new file mode 100644 index 00000000000..188d4d3d138 --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/exporter.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerexporter + +import ( + "context" + "fmt" + + jaegerproto "github.com/jaegertracing/jaeger/proto-gen/api_v2" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/exporterhelper" + jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" +) + +// newTraceExporter returns a new Jaeger gRPC exporter. +// The exporter name is the name to be used in the observability of the exporter. +// The collectorEndpoint should be of the form "hostname:14250" (a gRPC target). +func newTraceExporter(cfg *Config, logger *zap.Logger) (component.TracesExporter, error) { + + opts, err := cfg.GRPCClientSettings.ToDialOptions() + if err != nil { + return nil, err + } + + client, err := grpc.Dial(cfg.GRPCClientSettings.Endpoint, opts...) + if err != nil { + return nil, err + } + + collectorServiceClient := jaegerproto.NewCollectorServiceClient(client) + s := &protoGRPCSender{ + logger: logger, + client: collectorServiceClient, + metadata: metadata.New(cfg.GRPCClientSettings.Headers), + waitForReady: cfg.WaitForReady, + } + + exp, err := exporterhelper.NewTraceExporter( + cfg, logger, s.pushTraceData, + exporterhelper.WithTimeout(cfg.TimeoutSettings), + exporterhelper.WithRetry(cfg.RetrySettings), + exporterhelper.WithQueue(cfg.QueueSettings), + ) + + return exp, err +} + +// protoGRPCSender forwards spans encoded in the jaeger proto +// format, to a grpc server. +type protoGRPCSender struct { + logger *zap.Logger + client jaegerproto.CollectorServiceClient + metadata metadata.MD + waitForReady bool +} + +func (s *protoGRPCSender) pushTraceData( + ctx context.Context, + td pdata.Traces, +) (droppedSpans int, err error) { + + batches, err := jaegertranslator.InternalTracesToJaegerProto(td) + if err != nil { + return td.SpanCount(), consumererror.Permanent(fmt.Errorf("failed to push trace data via Jaeger exporter: %w", err)) + } + + if s.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, s.metadata) + } + + var sentSpans int + for _, batch := range batches { + _, err = s.client.PostSpans( + ctx, + &jaegerproto.PostSpansRequest{Batch: *batch}, grpc.WaitForReady(s.waitForReady)) + if err != nil { + s.logger.Debug("failed to push trace data to Jaeger", zap.Error(err)) + return td.SpanCount() - sentSpans, fmt.Errorf("failed to push trace data via Jaeger exporter: %w", err) + } + sentSpans += len(batch.Spans) + } + + return 0, nil +} diff --git a/internal/otel_collector/exporter/jaegerexporter/exporter_test.go b/internal/otel_collector/exporter/jaegerexporter/exporter_test.go new file mode 100644 index 00000000000..ff2fcc04ef2 --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/exporter_test.go @@ -0,0 +1,278 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerexporter + +import ( + "context" + "net" + "path" + "sync" + "testing" + + "github.com/jaegertracing/jaeger/model" + "github.com/jaegertracing/jaeger/proto-gen/api_v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/data" + tracev1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestNew(t *testing.T) { + type args struct { + config Config + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "createExporter", + args: args{ + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: nil, + Endpoint: "foo.bar", + Compression: "", + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + Keepalive: nil, + }, + }, + }, + }, + { + name: "createExporterWithHeaders", + args: args{ + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: map[string]string{"extra-header": "header-value"}, + Endpoint: "foo.bar", + Compression: "", + Keepalive: nil, + }, + }, + }, + }, + { + name: "createBasicSecureExporter", + args: args{ + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: nil, + Endpoint: "foo.bar", + Compression: "", + Keepalive: nil, + }, + }, + }, + }, + { + name: "createSecureExporterWithClientTLS", + args: args{ + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: nil, + Endpoint: "foo.bar", + Compression: "", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "testdata/test_cert.pem", + }, + Insecure: false, + }, + Keepalive: nil, + }, + }, + }, + }, + { + name: "createSecureExporterWithKeepAlive", + args: args{ + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: nil, + Endpoint: "foo.bar", + Compression: "", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "testdata/test_cert.pem", + }, + Insecure: false, + ServerName: "", + }, + Keepalive: &configgrpc.KeepaliveClientConfig{ + Time: 0, + Timeout: 0, + PermitWithoutStream: false, + }, + }, + }, + }, + }, + { + name: "createSecureExporterWithMissingFile", + args: args{ + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: nil, + Endpoint: "foo.bar", + Compression: "", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "testdata/test_cert_missing.pem", + }, + Insecure: false, + }, + Keepalive: nil, + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newTraceExporter(&tt.args.config, zap.NewNop()) + if (err != nil) != tt.wantErr { + t.Errorf("newTraceExporter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got == nil { + return + } + + // This is expected to fail. + err = got.ConsumeTraces(context.Background(), testdata.GenerateTraceDataNoLibraries()) + assert.Error(t, err) + }) + } +} + +// CA key and cert +// openssl req -new -nodes -x509 -days 9650 -keyout ca.key -out ca.crt -subj "/C=US/ST=California/L=Mountain View/O=Your Organization/OU=Your Unit/CN=localhost" +// Server key and cert +// openssl genrsa -des3 -out server.key 1024 +// openssl req -new -key server.key -out server.csr -subj "/C=US/ST=California/L=Mountain View/O=Your Organization/OU=Your Unit/CN=localhost" +// openssl x509 -req -days 9650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt +// Client key and cert +// openssl genrsa -des3 -out client.key 1024 +// openssl req -new -key client.key -out client.csr -subj "/C=US/ST=California/L=Mountain View/O=Your Organization/OU=Your Unit/CN=localhost" +// openssl x509 -req -days 9650 -in client.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out client.crt +// Remove passphrase +// openssl rsa -in server.key -out temp.key && rm server.key && mv temp.key server.key +// openssl rsa -in client.key -out temp.key && rm client.key && mv temp.key client.key +func TestMutualTLS(t *testing.T) { + caPath := path.Join(".", "testdata", "ca.crt") + serverCertPath := path.Join(".", "testdata", "server.crt") + serverKeyPath := path.Join(".", "testdata", "server.key") + clientCertPath := path.Join(".", "testdata", "client.crt") + clientKeyPath := path.Join(".", "testdata", "client.key") + + // start gRPC Jaeger server + tlsCfgOpts := configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: serverCertPath, + KeyFile: serverKeyPath, + }, + ClientCAFile: caPath, + } + tlsCfg, err := tlsCfgOpts.LoadTLSConfig() + require.NoError(t, err) + spanHandler := &mockSpanHandler{} + server, serverAddr := initializeGRPCTestServer(t, func(server *grpc.Server) { + api_v2.RegisterCollectorServiceServer(server, spanHandler) + }, grpc.Creds(credentials.NewTLS(tlsCfg))) + defer server.GracefulStop() + + // Create gRPC trace exporter + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + // Disable queuing to ensure that we execute the request when calling ConsumeTraces + // otherwise we will have to wait. + cfg.QueueSettings.Enabled = false + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: serverAddr.String(), + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: caPath, + CertFile: clientCertPath, + KeyFile: clientKeyPath, + }, + Insecure: false, + ServerName: "localhost", + }, + } + exporter, err := factory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + err = exporter.Start(context.Background(), nil) + require.NoError(t, err) + defer exporter.Shutdown(context.Background()) + + traceID := data.NewTraceID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) + spanID := data.NewSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7}) + traces := pdata.TracesFromOtlp([]*tracev1.ResourceSpans{ + {InstrumentationLibrarySpans: []*tracev1.InstrumentationLibrarySpans{{Spans: []*tracev1.Span{{TraceId: traceID, SpanId: spanID}}}}}, + }) + err = exporter.ConsumeTraces(context.Background(), traces) + require.NoError(t, err) + requestes := spanHandler.getRequests() + assert.Equal(t, 1, len(requestes)) + tidBytes := traceID.Bytes() + jTraceID, err := model.TraceIDFromBytes(tidBytes[:]) + require.NoError(t, err) + require.Len(t, requestes, 1) + require.Len(t, requestes[0].GetBatch().Spans, 1) + assert.Equal(t, jTraceID, requestes[0].GetBatch().Spans[0].TraceID) +} + +func initializeGRPCTestServer(t *testing.T, beforeServe func(server *grpc.Server), opts ...grpc.ServerOption) (*grpc.Server, net.Addr) { + server := grpc.NewServer(opts...) + lis, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + beforeServe(server) + go func() { + require.NoError(t, server.Serve(lis)) + }() + return server, lis.Addr() +} + +type mockSpanHandler struct { + mux sync.Mutex + requests []*api_v2.PostSpansRequest +} + +func (h *mockSpanHandler) getRequests() []*api_v2.PostSpansRequest { + h.mux.Lock() + defer h.mux.Unlock() + return h.requests +} + +func (h *mockSpanHandler) PostSpans(_ context.Context, r *api_v2.PostSpansRequest) (*api_v2.PostSpansResponse, error) { + h.mux.Lock() + defer h.mux.Unlock() + h.requests = append(h.requests, r) + return &api_v2.PostSpansResponse{}, nil +} diff --git a/internal/otel_collector/exporter/jaegerexporter/factory.go b/internal/otel_collector/exporter/jaegerexporter/factory.go new file mode 100644 index 00000000000..ac6e287b0ed --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/factory.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerexporter + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "jaeger" +) + +// NewFactory creates a factory for Jaeger exporter +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTraceExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + TimeoutSettings: exporterhelper.DefaultTimeoutSettings(), + RetrySettings: exporterhelper.DefaultRetrySettings(), + QueueSettings: exporterhelper.DefaultQueueSettings(), + GRPCClientSettings: configgrpc.GRPCClientSettings{ + // We almost read 0 bytes, so no need to tune ReadBufferSize. + WriteBufferSize: 512 * 1024, + }, + } +} + +func createTraceExporter( + _ context.Context, + params component.ExporterCreateParams, + config configmodels.Exporter, +) (component.TracesExporter, error) { + + expCfg := config.(*Config) + if expCfg.Endpoint == "" { + // TODO: Improve error message, see #215 + err := fmt.Errorf( + "%q config requires a non-empty \"endpoint\"", + expCfg.Name()) + return nil, err + } + + exp, err := newTraceExporter(expCfg, params.Logger) + if err != nil { + return nil, err + } + + return exp, nil +} diff --git a/internal/otel_collector/exporter/jaegerexporter/factory_test.go b/internal/otel_collector/exporter/jaegerexporter/factory_test.go new file mode 100644 index 00000000000..2d4233c2395 --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/factory_test.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configerror" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateMetricsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + params := component.ExporterCreateParams{Logger: zap.NewNop()} + _, err := factory.CreateMetricsExporter(context.Background(), params, cfg) + assert.Error(t, err, configerror.ErrDataTypeIsNotSupported) +} + +func TestCreateInstanceViaFactory(t *testing.T) { + factory := NewFactory() + + cfg := factory.CreateDefaultConfig() + + // Default config doesn't have default endpoint so creating from it should + // fail. + params := component.ExporterCreateParams{Logger: zap.NewNop()} + exp, err := factory.CreateTracesExporter(context.Background(), params, cfg) + assert.NotNil(t, err) + assert.Equal(t, "\"jaeger\" config requires a non-empty \"endpoint\"", err.Error()) + assert.Nil(t, exp) + + // Endpoint doesn't have a default value so set it directly. + expCfg := cfg.(*Config) + expCfg.Endpoint = "some.target.org:12345" + exp, err = factory.CreateTracesExporter(context.Background(), params, cfg) + assert.NoError(t, err) + assert.NotNil(t, exp) + + assert.NoError(t, exp.Shutdown(context.Background())) +} diff --git a/internal/otel_collector/exporter/jaegerexporter/testdata/ca.crt b/internal/otel_collector/exporter/jaegerexporter/testdata/ca.crt new file mode 100644 index 00000000000..afe1effdf3a --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/testdata/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNjCCAh4CCQCGaM7CuADemjANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJB +VTESMBAGA1UECAwJQXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoM +CU15T3JnTmFtZTEVMBMGA1UEAwwMTXlDb21tb25OYW1lMB4XDTIwMDkyMjA1MjIx +MVoXDTMwMDkyMDA1MjIxMVowXTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1c3Ry +YWxpYTEPMA0GA1UEBwwGU3lkbmV5MRIwEAYDVQQKDAlNeU9yZ05hbWUxFTATBgNV +BAMMDE15Q29tbW9uTmFtZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AMfzhzDBS/8Pt4nQL25ZgNXqdpAs+mpnOQLc0ep7+HI4zmWyP+3OvKxF4dyCIk5w +wTYZo5ln79hshIU+OGRBBmIrMvP09ekhjuvACYK/Sn4MLj9lihq9wV3+hqrlQlqG +YaZULoQ3+7cj0S964JQu5wfD3IIeSAzaW7EU4x5CBwFlpe9aSqgiENicUmDQRem7 +mXpCKGz0QZ8OPhidSpYYzltOYqEUYVjNwB7oQj5awpuT5twqF3Tzy1UQ8EsT6K8a +k4TdnZAwZoSZmpBP7GNEZAac8YcgAdk4llhB/pc+wd62MODGYznvP/TtU4BiY3K+ +wdf3nPPYd7P2PelqXWfRxp0CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAr5tOtnsm +g4Co6vG8lb788+nHMoGCzNG1GxEoluAXfbKvM2zF8m3nt9WY7A16sqBuDrBlAruM +R28hLtVA6nDbpRo2K1bvfI6pBA5DBDxx/bCBPHG49v/MSmpeXrgpX2/8ZQyDqrLe +gmN+NZ55yv95JUHi4rBeI72q+kEXFJ97yNFGbirR22VOjk+L28rA8qDCGcGF1NJf +u/va6BcXnXwKJhtpjzqqdeQJjigDEmpzVJcPj7PtuKcqjmoeLb46+SRUtSPDGqlS +cqz1Mp0eM6CxS5hfCMZN19K6kTwyy7j2PjnnFWfm4Vx/j24bu7dRMTIgBafFnz8/ +bX5JU6zb1ksWuQ== +-----END CERTIFICATE----- diff --git a/internal/otel_collector/exporter/jaegerexporter/testdata/client.crt b/internal/otel_collector/exporter/jaegerexporter/testdata/client.crt new file mode 100644 index 00000000000..9a34430b39b --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/testdata/client.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdfMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjExWhcNMzAwOTIwMDUyMjExWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEApZ17aiCiS105m2PajvPyh993oe2PG2FsReihmffWj8kF2YFeDo4weJ7o +SUylnBOwSF0c7VAnQgn6NON+57vVmFm/xVBYvLPqElJq98fJI/AlZTWsFwVfnhWk +T6o9C1L6LmrDe7aduj/CP11GKQsioK1NkFxLYBUTs4k4+kFxT0jhnLpbI1Ib/m6P +VhbRDOcP0lHkF80YrcJbuVop6gobdxq0slCdOjRBHwcIIgZnDZzwVoxEI2zGfX+h +YgP7zcAW8QrGHgsT0xbVC5OA/Re/t3lzJkCNJPLDKQCuUuXsYtLjV0dxLW8sruTT +xQrwG0Rp6iYLAUfGwVFDqNmknP+3vQIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQB+ +1Qp4ny36KgSCfao8NwfvNSDz8VfzaGJQLvJLrq2+YDmUB+i8M4BvivYsz1Q5WQYN +A9dWnMAjgYFItUMBPGoCMlg8panGYS/7XIHXhexENzfqXA0l4bTQt804mz+QIbs4 +oqVTtQdLt1OGb8IGWvI+jJGcV08aJ87kdNdd6sTfzwYR0w5uR7ceB5iB6Im5tNgz +7ASqHZYQNpID7mnVK8es2wLoGkh1SI4jRHxZAGBS4uDNy2H2eC1Jj9Y1m5nzl9nA +9IXxwac7f1WfPjpI000Hed3kY7hBV+lIJlF0A29l7DqnVsXKx6FAtEHMwIazAyFF +dTuYHlKIgx2Fno1b35qA +-----END CERTIFICATE----- diff --git a/internal/otel_collector/exporter/jaegerexporter/testdata/client.key b/internal/otel_collector/exporter/jaegerexporter/testdata/client.key new file mode 100644 index 00000000000..4e6369105ec --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/testdata/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApZ17aiCiS105m2PajvPyh993oe2PG2FsReihmffWj8kF2YFe +Do4weJ7oSUylnBOwSF0c7VAnQgn6NON+57vVmFm/xVBYvLPqElJq98fJI/AlZTWs +FwVfnhWkT6o9C1L6LmrDe7aduj/CP11GKQsioK1NkFxLYBUTs4k4+kFxT0jhnLpb +I1Ib/m6PVhbRDOcP0lHkF80YrcJbuVop6gobdxq0slCdOjRBHwcIIgZnDZzwVoxE +I2zGfX+hYgP7zcAW8QrGHgsT0xbVC5OA/Re/t3lzJkCNJPLDKQCuUuXsYtLjV0dx +LW8sruTTxQrwG0Rp6iYLAUfGwVFDqNmknP+3vQIDAQABAoIBAFCAbCz6D+lyNz5B +G0vBaHKDPTOItbcpc+fHXEXrInBh1mYTbBTHKOh41ZBLoXRsXZPPLvBrtal0EBsF +OfqKYxjbB0xx8bBIA89EIJqwkiTV5ld771qBUikVe/j9Vw7aFqHZY4wPCDwwIcuL +Gz94GizgEIPLWRgrJphuazO5+8n9atgrsQkv6iyjYa4b/RiqjJ/cV0NHfdf1rhjA +a67WPunXqRmdthY6znJ5FviVHwyRT3R2rLZ7aeoHwdBCpaqOUtmqF6m3lgR1gTV6 +I8CqenbS7TQNGJJbnBaiHJ4mHkZ7T8raVxbXoEcODx4C5o/XqV8LarTwvEhxoxZf +gU6I3AECgYEA1jXQrN7hFYlUv2f61GMTD/soH6XvusV0Osp8vzt39nnCPFykjygU +Q1HLK5Q2y6IyUrSpRB+ESlY2VRQguLGmPIFYJ7/QMLPbdEmjhzG9pmOwkGPz3Pzo +HDclUk0tVW/Y0yowamGxjwQ2fqYQkDo/x7oJatonLBo/a0d96rpT7rECgYEAxey1 +6wu7Dzp9FqLo2ejmFiEnAo0iJbkWqZ/t970Vss+luN7wdg8pFTmQpcQaP/yULbj7 +Sr4GT+DwvaVp9wOCqmmM7zaiRDsKePh9+x8zDNg93CU46gVrnOXI8lQhy7GU8zJH +vgLIW/lBgWI1WviP8E9RZJ46T1xA8N/b23XP1M0CgYB2IoGuBNDfXriFQuP7I5SM +uLd9FLdsKp8aGTfJAxP6s1WiknkSlkjug3pn0zqQ/SeBoy7Cahs8H+Wg8BPHp/mE +3tRuP4OiiF5b+Iyd75mm3M2wI9+GnVeoz+TeSb/ZoXBTrK28zZntxKYFHLJ/yBAK +pxewfRUjZmpZULkprY7CUQKBgHSS3vL5jcuCX3qdqRfvnNUb4aYXCi29viNaFwb7 +T3Rp6OniJS7j7waZSSDZR7y4P25OrXNwSLdzfVe1vZvHbYaBdIjQXPJi8+AcO9dU +oPTHyGeJZOQxHRjkHl7cPquChIBY9PhtGyVQwcw608Io9F5N2Vqx9xYW+AjnUM8J +rMdhAoGBAMVWRDgVNYyVy/ZBom9xcFOiuDG+U+osE9NZirC+vOhB+hK2NZ+rq7CI +Cf7qVsAYW49Vt0Sk/F+s0FcM//y84rE+hiXOcqFIwXV77mIHcv6QNb4mjdG/kg3+ +5Wx8EjYemS949kbxng8eqoCqyjU8s0v63k5DeCSxpJwX7EzxCOG3 +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/exporter/jaegerexporter/testdata/config.yaml b/internal/otel_collector/exporter/jaegerexporter/testdata/config.yaml new file mode 100644 index 00000000000..679c40d3b02 --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/testdata/config.yaml @@ -0,0 +1,28 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + jaeger: + jaeger/2: + endpoint: "a.new.target:1234" + balancer_name: "round_robin" + timeout: 10s + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [jaeger, jaeger/2] diff --git a/internal/otel_collector/exporter/jaegerexporter/testdata/server.crt b/internal/otel_collector/exporter/jaegerexporter/testdata/server.crt new file mode 100644 index 00000000000..ad9472180ff --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/testdata/server.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdeMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjExWhcNMzAwOTIwMDUyMjExWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAri5mBuqnshebSyRY4eyDi5Q3OuUFgekmWd4IXQMH2QC8QSXW5sl7zxcU +cCccjRyBcfCiOhAphD/w3XsuZhVVdVMj5KZErh0gKFyZCJQrkTma6/7L4NsY7R9k +p0YzEaAHCx7DsEpzGfGhstv9CJDRNd7H2ydF27vvc28KvOQn2FSoIGVVYKAQGCiZ +jzq+eaA/KqByeKpU612KJHi0v7fc8VA0XLpnFXH5PqlMV18YqnEJRw8honhPNOEP +cbdDTh06ycChWVEOhZSz8yJWz6M5JdxmMZmgU8QaJhJ8h6n/JsauftH9R8jxRqhe +Xoy113z2+IqEYDm+jzd6+vLwnZLZdQIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQCk +XhMNKQe1BqVpuOIFSsAg48VK44tWX/xn9fgeRtNL0b/LWPAuNk17GIu85WmCaiVF +0wha6+fh6cKDw9PNFK3ktXEESa9y0Wnf6jjYA/oj6jAnGI84F0YS+MSm/x5etnW1 +YhCuyFnPTNVoHP7qawQHXP7LC6t/y4EafYohoaGgNhxvCp0n8itfLPHfcZfCQio+ +8RCGM2OukZXihaRGKfEGLYlrAfr0l53n8mxF4Qzp8PiJfB/5FPTnS9teIzhq29nM +62ITmyWvJeHdtqpdOZsEiUo6E1idwtHf3E5XKPuzPA6/Llhrr+JS6kPdq1B8EtN3 +n8FE3N6bXsGCZgoy2S60 +-----END CERTIFICATE----- diff --git a/internal/otel_collector/exporter/jaegerexporter/testdata/server.key b/internal/otel_collector/exporter/jaegerexporter/testdata/server.key new file mode 100644 index 00000000000..32767297214 --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAri5mBuqnshebSyRY4eyDi5Q3OuUFgekmWd4IXQMH2QC8QSXW +5sl7zxcUcCccjRyBcfCiOhAphD/w3XsuZhVVdVMj5KZErh0gKFyZCJQrkTma6/7L +4NsY7R9kp0YzEaAHCx7DsEpzGfGhstv9CJDRNd7H2ydF27vvc28KvOQn2FSoIGVV +YKAQGCiZjzq+eaA/KqByeKpU612KJHi0v7fc8VA0XLpnFXH5PqlMV18YqnEJRw8h +onhPNOEPcbdDTh06ycChWVEOhZSz8yJWz6M5JdxmMZmgU8QaJhJ8h6n/JsauftH9 +R8jxRqheXoy113z2+IqEYDm+jzd6+vLwnZLZdQIDAQABAoIBAB9lY6SZhXK3iJlp +ys+mVyvAopfuEikRgoUuXWmOk0qNNGG11V+yL6vraBazJhVVbwu/qS44HZOmBt+V +RY7HB6lnPBAJ3FVaEKLV1gvS0WuS65bgTZWRSJPtVbJFGA10P/DEMdfEA61IfbIE +rz5tBBjmuDWLha8O2CGBgXj80yXOCIJYYcTZizLo2njxbZ6RrcJZxtKoYVxSCwjI ++HY+p5yKl9+P6O1uWf1gRTZzBS6j/IcSncp04zl0QZ05NTlh334AM+9TdLRdBNvW +aGC3gD5SttwBzjeQQUoao2KfTyEL4OZUtDlgGCxFTAr8JmikooOSGr50iz6raPok +oYiXB90CgYEA1d6WO5PDzSp78VxE3CWzvDHK/4mTrBHJzLfSQ/ppAWeyT93Yqo1X +AZrhbTB4qeT/hP/d8tX5lTUXCmxly3RhKGqINtax9NX9nnT3lW/KLJy+fCJlJhGb +/e4zN9TjmHHd1s0F7X0sX2kCT3CgMRZAuRM8HIbUisJuGkxn40vIR68CgYEA0H5W +rB46duLASF/HQes5gG+UWGu8ZjfLRm61dARIE+DF9A1UifzGs4MNRXIBxJMC4eVN +CHYOZAtfaxS67nF+rjCBFJhDK86K208oIOXMlsdsCNEPSQOJ773SiW7obhxHvb/v +arMijMlw6gR6sZjAvDCqLPqAn2novknpTm+p1hsCgYBV7vsUczogjOqCP53Xizqd +6q/zX3c7k2YvYMkW0V4x11W5a51sYiBMn8vmUKybL01QRnMK6NlBD7TzVjzMtDV5 +xNx4lGmqw/UFH2B/5gwpQs3zGOrlkfvI21YvH9ZXYaFOdtDj7QDQUWRBQTLMpnFZ +LBZiK/bozljpLjwsAz41NwKBgD3FgLZkClxY2DFZNzb/kzzLUj9URDBgzXgjqt0h +u50wFlY1cNulKdXbuR3fJkjwaYU2I7mjCKZ5fB7EuJGWzLqdIlFQv24GNru9Wx1M +GG5/zTFbh1TRmJeQPQV8955QAK1mZW/OfSkLMcoT46okoM3g+Tp0SZFxloRyb45O +pRoLAoGASL4xGPKJBTMFgSDmSK+Uyeax+ckn4i0pck9pIi8fahPSe0wEMFAazVPS +4izjuyIH3J5nA0zaDzJccZGvmtL1d+u5irtTPvuwUcpGW7uCupG2kh7rQGd94OoZ +z3S8vRpta6EiyS/qmLl1PBvPzOJltm2+LtqwRxLTQPr+Xz7HL9g= +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/exporter/jaegerexporter/testdata/test_cert.pem b/internal/otel_collector/exporter/jaegerexporter/testdata/test_cert.pem new file mode 100644 index 00000000000..b2e77b89d49 --- /dev/null +++ b/internal/otel_collector/exporter/jaegerexporter/testdata/test_cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE6jCCAtICCQDVU4PtqpqADTANBgkqhkiG9w0BAQsFADA3MQswCQYDVQQGEwJV +UzETMBEGA1UECAwKY2FsaWZvcm5pYTETMBEGA1UECgwKb3BlbmNlbnN1czAeFw0x +OTAzMDQxODA3MjZaFw0yMDAzMDMxODA3MjZaMDcxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIDApjYWxpZm9ybmlhMRMwEQYDVQQKDApvcGVuY2Vuc3VzMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAy9JQiAOMzArcdiS4szbTuzg5yYijSSY6SvGj +XMs4/LEFLxgGmFfyHXxoVQzV26lTu/AiUFlZi4JY2qlkZyPwmmmSg4fmzikpVPiC +Vv9pvSIojs8gs0sHaOt40Q8ym43bNt3Mh8rYrs+XMERi6Ol9//j4LnfePkNU5uEo +qC8KQamckaMR6UEHFNunyOwvNBsipgTPldQUPGVnCsNKk8olYGAXS7DR25bgbPli +4T9VCSElsSPAODmyo+2MEDagVXa1vVYxKyO2k6oeBS0lsvdRqRTmGggcg0B/dk+a +H1CL9ful0cu9P3dQif+hfGay8udPkwDLPEq1+WnjJFut3Pmbk3SqUCas5iWt76kK +eKFh4k8fCy4yiaZxzvSbm9+bEBHAl0ZXd8pjvAsBfCKe6G9SBzE1DK4FjWiiEGCb +5dGsyTKr33q3DekLvT3LF8ZeON/13d9toucX9PqG2HDwMP/Fb4WjQIzOc/H9wIak +pf7u6QBDGUiCMmoDrp1d8RsI1RPbEhoywH0YlLmwgf+cr1dU7vlISf576EsGxFz4 ++/sZjIBvZBHn/x0MH+bs4J8V3vMujfDoRdhL07bK7q/AkEALUxljKEfoWeqiuVzK +F9BVv3xNhiua2kgPVbMNWPrQ5uotkNp8IykJ3QOuQ3p5pzxdGfpLd6f8gmJDmcbi +AI9dWTcCAwEAATANBgkqhkiG9w0BAQsFAAOCAgEAVVi4t/Sumre+AGTaU7np9dl2 +tpllbES5ixe6m2uezt5wAzYNNyuQ2mMG2XrSkMy5gvBZRT9nRNSmLV8VEcxZihG0 +YHS5soXnLL3Jdlwxp98WTDPvM1ntxcHyEyqrrg9YDfKn4sOrr5vo2yZzoKwtxtc7 +lue9JormVx7GxMi7NwaUtCbnwAIcqJJpFjt1EhmJOxGqTJPgUvTBdeGvRj30c6fk +pqpUdPbZ7RKPEtbLoMoCBujKnErv+H0G6Vp9WyCHN+Mi9uTMsGwH14cmJjmfwGDC +8/WF4LdlawFnf/arIp9YcVwcP91d4ywyvbuuo2M7qdosQ7k4uRZ3tyggLYShS3RW +BMEhMRDz9dM0oKGF+HnaS824BIh6O6Hn82Vt8uCKS7IbEX99/kkN1KcqqQe6Lwjq +tG/lm4K5yf+FJVDivpZ9mYTvqTBjhTaOp6m3HYSNJfS0hLQVvEuBNXd8bHiXkcLp +rmFOYUWsjxV1Qku3U5Rner0UpB2Fuw9nJcXuDgWG0gjwzAZ83y3du1VIZp0Ad8Vv +IYpaucbImGJszMtNXn3l72K1wvQVIhm9eRwYc3QteJzweHaDsbytZEoS/GhTrZIT +wRe5ZGrjJBJngRANRSm1BH8j6PjLem9mzPb2eytwJJA0lLhUk4vYproVvXcx0vow +5F+5VB1YB8/tbWePmpo= +-----END CERTIFICATE----- diff --git a/internal/otel_collector/exporter/kafkaexporter/README.md b/internal/otel_collector/exporter/kafkaexporter/README.md new file mode 100644 index 00000000000..b75fcbfe63a --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/README.md @@ -0,0 +1,69 @@ +# Kafka Exporter + +Kafka exporter exports traces to Kafka. This exporter uses a synchronous producer +that blocks and does not batch messages, therefore it should be used with batch and queued retry +processors for higher throughput and resiliency. Message payload encoding is configurable. + +The following settings are required: +- `protocol_version` (no default): Kafka protocol version e.g. 2.0.0 + +The following settings can be optionally configured: +- `brokers` (default = localhost:9092): The list of kafka brokers +- `topic` (default = otlp_spans for traces, otlp_metrics for metrics): The name of the kafka topic to export to. +- `encoding` (default = otlp_proto): The encoding of the traces sent to kafka. All available encodings: + - `otlp_proto`: payload is Protobuf serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics. + - The following encodings are valid *only* for **traces**. + - `jaeger_proto`: the payload is serialized to a single Jaeger proto `Span`. + - `jaeger_json`: the payload is serialized to a single Jaeger JSON Span using `jsonpb`. +- `auth` + - `plain_text` + - `username`: The username to use. + - `password`: The password to use + - `tls` + - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should + only be used if `insecure` is set to true. + - `cert_file`: path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to true. + - `key_file`: path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to true. + - `insecure` (default = false): Disable verifying the server's certificate chain and host + name (`InsecureSkipVerify` in the tls config) + - `server_name_override`: ServerName indicates the name of the server requested by the client + in order to support virtual hosting. + - `kerberos` + - `service_name`: Kerberos service name + - `realm`: Kerberos realm + - `use_keytab`: Use of keytab instead of password, if this is true, keytab file will be used instead of password + - `username`: The Kerberos username used for authenticate with KDC + - `password`: The Kerberos password used for authenticate with KDC + - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf + - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab +- `metadata` + - `full` (default = true): Whether to maintain a full set of metadata. + When disabled the client does not make the initial request to broker at the startup. + - `retry` + - `max` (default = 3): The number of retries to get metadata + - `backoff` (default = 250ms): How long to wait between metadata retries +- `timeout` (default = 5s): Is the timeout for every attempt to send data to the backend. +- `retry_on_failure` + - `enabled` (default = true) + - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false` + - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false` + - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false` +- `sending_queue` + - `enabled` (default = true) + - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false` + - `queue_size` (default = 5000): Maximum number of batches kept in memory before dropping data; ignored if `enabled` is `false`; + User should calculate this as `num_seconds * requests_per_second` where: + - `num_seconds` is the number of seconds to buffer in case of a backend outage + - `requests_per_second` is the average number of requests per seconds. + +Example configuration: + +```yaml +exporters: + kafka: + brokers: + - localhost:9092 + protocol_version: 2.0.0 +``` diff --git a/internal/otel_collector/exporter/kafkaexporter/authentication.go b/internal/otel_collector/exporter/kafkaexporter/authentication.go new file mode 100644 index 00000000000..ad723b4696b --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/authentication.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "fmt" + + "github.com/Shopify/sarama" + + "go.opentelemetry.io/collector/config/configtls" +) + +// Authentication defines authentication. +type Authentication struct { + PlainText *PlainTextConfig `mapstructure:"plain_text"` + TLS *configtls.TLSClientSetting `mapstructure:"tls"` + Kerberos *KerberosConfig `mapstructure:"kerberos"` +} + +// PlainTextConfig defines plaintext authentication. +type PlainTextConfig struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` +} + +// KerberosConfig defines kereros configuration. +type KerberosConfig struct { + ServiceName string `mapstructure:"service_name"` + Realm string `mapstructure:"realm"` + UseKeyTab bool `mapstructure:"use_keytab"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password" json:"-"` + ConfigPath string `mapstructure:"config_file"` + KeyTabPath string `mapstructure:"keytab_file"` +} + +// ConfigureAuthentication configures authentication in sarama.Config. +func ConfigureAuthentication(config Authentication, saramaConfig *sarama.Config) error { + if config.PlainText != nil { + configurePlaintext(*config.PlainText, saramaConfig) + } + if config.TLS != nil { + if err := configureTLS(*config.TLS, saramaConfig); err != nil { + return err + } + } + if config.Kerberos != nil { + configureKerberos(*config.Kerberos, saramaConfig) + } + return nil +} + +func configurePlaintext(config PlainTextConfig, saramaConfig *sarama.Config) { + saramaConfig.Net.SASL.Enable = true + saramaConfig.Net.SASL.User = config.Username + saramaConfig.Net.SASL.Password = config.Password +} + +func configureTLS(config configtls.TLSClientSetting, saramaConfig *sarama.Config) error { + tlsConfig, err := config.LoadTLSConfig() + if err != nil { + return fmt.Errorf("error loading tls config: %w", err) + } + saramaConfig.Net.TLS.Enable = true + saramaConfig.Net.TLS.Config = tlsConfig + return nil +} + +func configureKerberos(config KerberosConfig, saramaConfig *sarama.Config) { + saramaConfig.Net.SASL.Mechanism = sarama.SASLTypeGSSAPI + saramaConfig.Net.SASL.Enable = true + if config.UseKeyTab { + saramaConfig.Net.SASL.GSSAPI.KeyTabPath = config.KeyTabPath + saramaConfig.Net.SASL.GSSAPI.AuthType = sarama.KRB5_KEYTAB_AUTH + } else { + saramaConfig.Net.SASL.GSSAPI.AuthType = sarama.KRB5_USER_AUTH + saramaConfig.Net.SASL.GSSAPI.Password = config.Password + } + saramaConfig.Net.SASL.GSSAPI.KerberosConfigPath = config.ConfigPath + saramaConfig.Net.SASL.GSSAPI.Username = config.Username + saramaConfig.Net.SASL.GSSAPI.Realm = config.Realm + saramaConfig.Net.SASL.GSSAPI.ServiceName = config.ServiceName +} diff --git a/internal/otel_collector/exporter/kafkaexporter/authentication_test.go b/internal/otel_collector/exporter/kafkaexporter/authentication_test.go new file mode 100644 index 00000000000..2dfa0746884 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/authentication_test.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "testing" + + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/config/configtls" +) + +func TestAuthentication(t *testing.T) { + saramaPlaintext := &sarama.Config{} + saramaPlaintext.Net.SASL.Enable = true + saramaPlaintext.Net.SASL.User = "jdoe" + saramaPlaintext.Net.SASL.Password = "pass" + + saramaTLSCfg := &sarama.Config{} + saramaTLSCfg.Net.TLS.Enable = true + tlsClient := configtls.TLSClientSetting{} + tlscfg, err := tlsClient.LoadTLSConfig() + require.NoError(t, err) + saramaTLSCfg.Net.TLS.Config = tlscfg + + saramaKerberosCfg := &sarama.Config{} + saramaKerberosCfg.Net.SASL.Mechanism = sarama.SASLTypeGSSAPI + saramaKerberosCfg.Net.SASL.Enable = true + saramaKerberosCfg.Net.SASL.GSSAPI.ServiceName = "foobar" + saramaKerberosCfg.Net.SASL.GSSAPI.AuthType = sarama.KRB5_USER_AUTH + + saramaKerberosKeyTabCfg := &sarama.Config{} + saramaKerberosKeyTabCfg.Net.SASL.Mechanism = sarama.SASLTypeGSSAPI + saramaKerberosKeyTabCfg.Net.SASL.Enable = true + saramaKerberosKeyTabCfg.Net.SASL.GSSAPI.KeyTabPath = "/path" + saramaKerberosKeyTabCfg.Net.SASL.GSSAPI.AuthType = sarama.KRB5_KEYTAB_AUTH + + tests := []struct { + auth Authentication + saramaConfig *sarama.Config + err string + }{ + { + auth: Authentication{PlainText: &PlainTextConfig{Username: "jdoe", Password: "pass"}}, + saramaConfig: saramaPlaintext, + }, + { + auth: Authentication{TLS: &configtls.TLSClientSetting{}}, + saramaConfig: saramaTLSCfg, + }, + { + auth: Authentication{TLS: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{CAFile: "/doesnotexists"}, + }}, + saramaConfig: saramaTLSCfg, + err: "failed to load TLS config", + }, + { + auth: Authentication{Kerberos: &KerberosConfig{ServiceName: "foobar"}}, + saramaConfig: saramaKerberosCfg, + }, + { + auth: Authentication{Kerberos: &KerberosConfig{UseKeyTab: true, KeyTabPath: "/path"}}, + saramaConfig: saramaKerberosKeyTabCfg, + }, + } + for _, test := range tests { + t.Run("", func(t *testing.T) { + config := &sarama.Config{} + err := ConfigureAuthentication(test.auth, config) + if test.err != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), test.err) + } else { + assert.Equal(t, test.saramaConfig, config) + } + }) + } +} diff --git a/internal/otel_collector/exporter/kafkaexporter/config.go b/internal/otel_collector/exporter/kafkaexporter/config.go new file mode 100644 index 00000000000..3cd98f28428 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/config.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "time" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for Kafka exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` + exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + // The list of kafka brokers (default localhost:9092) + Brokers []string `mapstructure:"brokers"` + // Kafka protocol version + ProtocolVersion string `mapstructure:"protocol_version"` + // The name of the kafka topic to export to (default otlp_spans for traces, otlp_metrics for metrics) + Topic string `mapstructure:"topic"` + + // Encoding of messages (default "otlp_proto") + Encoding string `mapstructure:"encoding"` + + // Metadata is the namespace for metadata management properties used by the + // Client, and shared by the Producer/Consumer. + Metadata Metadata `mapstructure:"metadata"` + + // Authentication defines used authentication mechanism. + Authentication Authentication `mapstructure:"auth"` +} + +// Metadata defines configuration for retrieving metadata from the broker. +type Metadata struct { + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool `mapstructure:"full"` + + // Retry configuration for metadata. + // This configuration is useful to avoid race conditions when broker + // is starting at the same time as collector. + Retry MetadataRetry `mapstructure:"retry"` +} + +// MetadataRetry defines retry configuration for Metadata. +type MetadataRetry struct { + // The total number of times to retry a metadata request when the + // cluster is in the middle of a leader election or at startup (default 3). + Max int `mapstructure:"max"` + // How long to wait for leader election to occur before retrying + // (default 250ms). Similar to the JVM's `retry.backoff.ms`. + Backoff time.Duration `mapstructure:"backoff"` +} diff --git a/internal/otel_collector/exporter/kafkaexporter/config_test.go b/internal/otel_collector/exporter/kafkaexporter/config_test.go new file mode 100644 index 00000000000..4382e6cdf37 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/config_test.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + require.NoError(t, err) + require.Equal(t, 1, len(cfg.Receivers)) + + c := cfg.Exporters[typeStr].(*Config) + assert.Equal(t, &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: typeStr, + TypeVal: typeStr, + }, + TimeoutSettings: exporterhelper.TimeoutSettings{ + Timeout: 10 * time.Second, + }, + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 10 * time.Second, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + }, + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + Topic: "spans", + Encoding: "otlp_proto", + Brokers: []string{"foo:123", "bar:456"}, + Authentication: Authentication{ + PlainText: &PlainTextConfig{ + Username: "jdoe", + Password: "pass", + }, + }, + Metadata: Metadata{ + Full: false, + Retry: MetadataRetry{ + Max: 15, + Backoff: defaultMetadataRetryBackoff, + }, + }, + }, c) +} diff --git a/internal/otel_collector/exporter/kafkaexporter/factory.go b/internal/otel_collector/exporter/kafkaexporter/factory.go new file mode 100644 index 00000000000..1064b7715f9 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/factory.go @@ -0,0 +1,144 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "context" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + typeStr = "kafka" + defaultTracesTopic = "otlp_spans" + defaultMetricsTopic = "otlp_metrics" + defaultEncoding = "otlp_proto" + defaultBroker = "localhost:9092" + // default from sarama.NewConfig() + defaultMetadataRetryMax = 3 + // default from sarama.NewConfig() + defaultMetadataRetryBackoff = time.Millisecond * 250 + // default from sarama.NewConfig() + defaultMetadataFull = true +) + +// FactoryOption applies changes to kafkaExporterFactory. +type FactoryOption func(factory *kafkaExporterFactory) + +// WithAddTracesMarshallers adds tracesMarshallers. +func WithAddTracesMarshallers(encodingMarshaller map[string]TracesMarshaller) FactoryOption { + return func(factory *kafkaExporterFactory) { + for encoding, marshaller := range encodingMarshaller { + factory.tracesMarshallers[encoding] = marshaller + } + } +} + +// NewFactory creates Kafka exporter factory. +func NewFactory(options ...FactoryOption) component.ExporterFactory { + f := &kafkaExporterFactory{ + tracesMarshallers: tracesMarshallers(), + metricsMarshallers: metricsMarshallers(), + } + for _, o := range options { + o(f) + } + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(f.createTraceExporter), + exporterhelper.WithMetrics(f.createMetricsExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + TimeoutSettings: exporterhelper.DefaultTimeoutSettings(), + RetrySettings: exporterhelper.DefaultRetrySettings(), + QueueSettings: exporterhelper.DefaultQueueSettings(), + Brokers: []string{defaultBroker}, + // using an empty topic to track when it has not been set by user, default is based on traces or metrics. + Topic: "", + Encoding: defaultEncoding, + Metadata: Metadata{ + Full: defaultMetadataFull, + Retry: MetadataRetry{ + Max: defaultMetadataRetryMax, + Backoff: defaultMetadataRetryBackoff, + }, + }, + } +} + +type kafkaExporterFactory struct { + tracesMarshallers map[string]TracesMarshaller + metricsMarshallers map[string]MetricsMarshaller +} + +func (f *kafkaExporterFactory) createTraceExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.TracesExporter, error) { + oCfg := cfg.(*Config) + if oCfg.Topic == "" { + oCfg.Topic = defaultTracesTopic + } + exp, err := newTracesExporter(*oCfg, params, f.tracesMarshallers) + if err != nil { + return nil, err + } + return exporterhelper.NewTraceExporter( + cfg, + params.Logger, + exp.traceDataPusher, + // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, + // and will rely on the sarama Producer Timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithShutdown(exp.Close)) +} + +func (f *kafkaExporterFactory) createMetricsExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.MetricsExporter, error) { + oCfg := cfg.(*Config) + if oCfg.Topic == "" { + oCfg.Topic = defaultMetricsTopic + } + exp, err := newMetricsExporter(*oCfg, params, f.metricsMarshallers) + if err != nil { + return nil, err + } + return exporterhelper.NewMetricsExporter( + cfg, + params.Logger, + exp.metricsDataPusher, + // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, + // and will rely on the sarama Producer Timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithShutdown(exp.Close)) +} diff --git a/internal/otel_collector/exporter/kafkaexporter/factory_test.go b/internal/otel_collector/exporter/kafkaexporter/factory_test.go new file mode 100644 index 00000000000..fdc2138968e --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/factory_test.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig().(*Config) + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) + assert.Equal(t, []string{defaultBroker}, cfg.Brokers) + assert.Equal(t, "", cfg.Topic) +} + +func TestCreateTracesExporter(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + // this disables contacting the broker so we can successfully create the exporter + cfg.Metadata.Full = false + f := kafkaExporterFactory{tracesMarshallers: tracesMarshallers()} + r, err := f.createTraceExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + assert.NotNil(t, r) +} + +func TestCreateMetricsExport(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + // this disables contacting the broker so we can successfully create the exporter + cfg.Metadata.Full = false + mf := kafkaExporterFactory{metricsMarshallers: metricsMarshallers()} + mr, err := mf.createMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + assert.NotNil(t, mr) +} + +func TestCreateTracesExporter_err(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + f := kafkaExporterFactory{tracesMarshallers: tracesMarshallers()} + r, err := f.createTraceExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + // no available broker + require.Error(t, err) + assert.Nil(t, r) +} + +func TestCreateMetricsExporter_err(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + mf := kafkaExporterFactory{metricsMarshallers: metricsMarshallers()} + mr, err := mf.createMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.Error(t, err) + assert.Nil(t, mr) +} + +func TestWithMarshallers(t *testing.T) { + cm := &customMarshaller{} + f := NewFactory(WithAddTracesMarshallers(map[string]TracesMarshaller{cm.Encoding(): cm})) + cfg := createDefaultConfig().(*Config) + // disable contacting broker + cfg.Metadata.Full = false + + t.Run("custom_encoding", func(t *testing.T) { + cfg.Encoding = cm.Encoding() + exporter, err := f.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, exporter) + }) + t.Run("default_encoding", func(t *testing.T) { + cfg.Encoding = new(otlpTracesPbMarshaller).Encoding() + exporter, err := f.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + assert.NotNil(t, exporter) + }) +} + +type customMarshaller struct { +} + +var _ TracesMarshaller = (*customMarshaller)(nil) + +func (c customMarshaller) Marshal(_ pdata.Traces) ([]Message, error) { + panic("implement me") +} + +func (c customMarshaller) Encoding() string { + return "custom" +} diff --git a/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaller.go b/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaller.go new file mode 100644 index 00000000000..10d2fc1dad3 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaller.go @@ -0,0 +1,98 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "bytes" + + "github.com/gogo/protobuf/jsonpb" + jaegerproto "github.com/jaegertracing/jaeger/model" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer/pdata" + jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" +) + +type jaegerMarshaller struct { + marshaller jaegerSpanMarshaller +} + +var _ TracesMarshaller = (*jaegerMarshaller)(nil) + +func (j jaegerMarshaller) Marshal(traces pdata.Traces) ([]Message, error) { + batches, err := jaegertranslator.InternalTracesToJaegerProto(traces) + if err != nil { + return nil, err + } + var messages []Message + var errs []error + for _, batch := range batches { + for _, span := range batch.Spans { + span.Process = batch.Process + bts, err := j.marshaller.marshall(span) + // continue to process spans that can be serialized + if err != nil { + errs = append(errs, err) + continue + } + messages = append(messages, Message{Value: bts}) + } + } + return messages, componenterror.CombineErrors(errs) +} + +func (j jaegerMarshaller) Encoding() string { + return j.marshaller.encoding() +} + +type jaegerSpanMarshaller interface { + marshall(span *jaegerproto.Span) ([]byte, error) + encoding() string +} + +type jaegerProtoSpanMarshaller struct { +} + +var _ jaegerSpanMarshaller = (*jaegerProtoSpanMarshaller)(nil) + +func (p jaegerProtoSpanMarshaller) marshall(span *jaegerproto.Span) ([]byte, error) { + return span.Marshal() +} + +func (p jaegerProtoSpanMarshaller) encoding() string { + return "jaeger_proto" +} + +type jaegerJSONSpanMarshaller struct { + pbMarshaller *jsonpb.Marshaler +} + +var _ jaegerSpanMarshaller = (*jaegerJSONSpanMarshaller)(nil) + +func newJaegerJSONMarshaller() *jaegerJSONSpanMarshaller { + return &jaegerJSONSpanMarshaller{ + pbMarshaller: &jsonpb.Marshaler{}, + } +} + +func (p jaegerJSONSpanMarshaller) marshall(span *jaegerproto.Span) ([]byte, error) { + out := new(bytes.Buffer) + err := p.pbMarshaller.Marshal(out, span) + return out.Bytes(), err +} + +func (p jaegerJSONSpanMarshaller) encoding() string { + return "jaeger_json" +} diff --git a/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaller_test.go b/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaller_test.go new file mode 100644 index 00000000000..af1a7e27e61 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaller_test.go @@ -0,0 +1,95 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "bytes" + "testing" + + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" +) + +func TestJaegerMarshaller(t *testing.T) { + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetName("foo") + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetStartTime(pdata.TimestampUnixNano(10)) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetEndTime(pdata.TimestampUnixNano(20)) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + batches, err := jaegertranslator.InternalTracesToJaegerProto(td) + require.NoError(t, err) + + batches[0].Spans[0].Process = batches[0].Process + jaegerProtoBytes, err := batches[0].Spans[0].Marshal() + require.NoError(t, err) + require.NotNil(t, jaegerProtoBytes) + + jsonMarshaller := &jsonpb.Marshaler{} + jsonByteBuffer := new(bytes.Buffer) + require.NoError(t, jsonMarshaller.Marshal(jsonByteBuffer, batches[0].Spans[0])) + + tests := []struct { + unmarshaller TracesMarshaller + encoding string + messages []Message + }{ + { + unmarshaller: jaegerMarshaller{ + marshaller: jaegerProtoSpanMarshaller{}, + }, + encoding: "jaeger_proto", + messages: []Message{{Value: jaegerProtoBytes}}, + }, + { + unmarshaller: jaegerMarshaller{ + marshaller: jaegerJSONSpanMarshaller{ + pbMarshaller: &jsonpb.Marshaler{}, + }, + }, + encoding: "jaeger_json", + messages: []Message{{Value: jsonByteBuffer.Bytes()}}, + }, + } + for _, test := range tests { + t.Run(test.encoding, func(t *testing.T) { + messages, err := test.unmarshaller.Marshal(td) + require.NoError(t, err) + assert.Equal(t, test.messages, messages) + assert.Equal(t, test.encoding, test.unmarshaller.Encoding()) + }) + } +} + +func TestJaegerMarshaller_error_covert_traceID(t *testing.T) { + marshaller := jaegerMarshaller{ + marshaller: jaegerProtoSpanMarshaller{}, + } + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + // fails in zero traceID + messages, err := marshaller.Marshal(td) + require.Error(t, err) + assert.Nil(t, messages) +} diff --git a/internal/otel_collector/exporter/kafkaexporter/kafka_exporter.go b/internal/otel_collector/exporter/kafkaexporter/kafka_exporter.go new file mode 100644 index 00000000000..fff86bdb695 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/kafka_exporter.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "context" + "fmt" + + "github.com/Shopify/sarama" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" +) + +var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") + +// kafkaTracesProducer uses sarama to produce trace messages to Kafka. +type kafkaTracesProducer struct { + producer sarama.SyncProducer + topic string + marshaller TracesMarshaller + logger *zap.Logger +} + +func (e *kafkaTracesProducer) traceDataPusher(_ context.Context, td pdata.Traces) (int, error) { + messages, err := e.marshaller.Marshal(td) + if err != nil { + return td.SpanCount(), consumererror.Permanent(err) + } + err = e.producer.SendMessages(producerMessages(messages, e.topic)) + if err != nil { + return td.SpanCount(), err + } + return 0, nil +} + +func (e *kafkaTracesProducer) Close(context.Context) error { + return e.producer.Close() +} + +// kafkaMetricsProducer uses sarama to produce metrics messages to kafka +type kafkaMetricsProducer struct { + producer sarama.SyncProducer + topic string + marshaller MetricsMarshaller + logger *zap.Logger +} + +func (e *kafkaMetricsProducer) metricsDataPusher(_ context.Context, md pdata.Metrics) (int, error) { + messages, err := e.marshaller.Marshal(md) + if err != nil { + return md.MetricCount(), consumererror.Permanent(err) + } + err = e.producer.SendMessages(producerMessages(messages, e.topic)) + if err != nil { + return md.MetricCount(), err + } + return 0, nil +} + +func (e *kafkaMetricsProducer) Close(context.Context) error { + return e.producer.Close() +} + +func newSaramaProducer(config Config) (sarama.SyncProducer, error) { + c := sarama.NewConfig() + // These setting are required by the sarama.SyncProducer implementation. + c.Producer.Return.Successes = true + c.Producer.Return.Errors = true + // Wait only the local commit to succeed before responding. + c.Producer.RequiredAcks = sarama.WaitForLocal + // Because sarama does not accept a Context for every message, set the Timeout here. + c.Producer.Timeout = config.Timeout + c.Metadata.Full = config.Metadata.Full + c.Metadata.Retry.Max = config.Metadata.Retry.Max + c.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff + if config.ProtocolVersion != "" { + version, err := sarama.ParseKafkaVersion(config.ProtocolVersion) + if err != nil { + return nil, err + } + c.Version = version + } + if err := ConfigureAuthentication(config.Authentication, c); err != nil { + return nil, err + } + producer, err := sarama.NewSyncProducer(config.Brokers, c) + if err != nil { + return nil, err + } + return producer, nil +} + +func newMetricsExporter(config Config, params component.ExporterCreateParams, marshallers map[string]MetricsMarshaller) (*kafkaMetricsProducer, error) { + marshaller := marshallers[config.Encoding] + if marshaller == nil { + return nil, errUnrecognizedEncoding + } + producer, err := newSaramaProducer(config) + if err != nil { + return nil, err + } + + return &kafkaMetricsProducer{ + producer: producer, + topic: config.Topic, + marshaller: marshaller, + logger: params.Logger, + }, nil + +} + +// newTracesExporter creates Kafka exporter. +func newTracesExporter(config Config, params component.ExporterCreateParams, marshallers map[string]TracesMarshaller) (*kafkaTracesProducer, error) { + marshaller := marshallers[config.Encoding] + if marshaller == nil { + return nil, errUnrecognizedEncoding + } + producer, err := newSaramaProducer(config) + if err != nil { + return nil, err + } + return &kafkaTracesProducer{ + producer: producer, + topic: config.Topic, + marshaller: marshaller, + logger: params.Logger, + }, nil +} + +func producerMessages(messages []Message, topic string) []*sarama.ProducerMessage { + producerMessages := make([]*sarama.ProducerMessage, len(messages)) + for i := range messages { + producerMessages[i] = &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(messages[i].Value), + } + } + return producerMessages +} diff --git a/internal/otel_collector/exporter/kafkaexporter/kafka_exporter_test.go b/internal/otel_collector/exporter/kafkaexporter/kafka_exporter_test.go new file mode 100644 index 00000000000..6630699a657 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/kafka_exporter_test.go @@ -0,0 +1,218 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "context" + "fmt" + "testing" + + "github.com/Shopify/sarama" + "github.com/Shopify/sarama/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestNewExporter_err_version(t *testing.T) { + c := Config{ProtocolVersion: "0.0.0", Encoding: defaultEncoding} + texp, err := newTracesExporter(c, component.ExporterCreateParams{Logger: zap.NewNop()}, tracesMarshallers()) + assert.Error(t, err) + assert.Nil(t, texp) +} + +func TestNewExporter_err_encoding(t *testing.T) { + c := Config{Encoding: "foo"} + texp, err := newTracesExporter(c, component.ExporterCreateParams{Logger: zap.NewNop()}, tracesMarshallers()) + assert.EqualError(t, err, errUnrecognizedEncoding.Error()) + assert.Nil(t, texp) +} + +func TestNewMetricsExporter_err_version(t *testing.T) { + c := Config{ProtocolVersion: "0.0.0", Encoding: defaultEncoding} + mexp, err := newMetricsExporter(c, component.ExporterCreateParams{Logger: zap.NewNop()}, metricsMarshallers()) + assert.Error(t, err) + assert.Nil(t, mexp) +} + +func TestNewMetricsExporter_err_encoding(t *testing.T) { + c := Config{Encoding: "bar"} + mexp, err := newMetricsExporter(c, component.ExporterCreateParams{Logger: zap.NewNop()}, metricsMarshallers()) + assert.EqualError(t, err, errUnrecognizedEncoding.Error()) + assert.Nil(t, mexp) +} + +func TestNewMetricsExporter_err_traces_encoding(t *testing.T) { + c := Config{Encoding: "jaeger_proto"} + mexp, err := newMetricsExporter(c, component.ExporterCreateParams{Logger: zap.NewNop()}, metricsMarshallers()) + assert.EqualError(t, err, errUnrecognizedEncoding.Error()) + assert.Nil(t, mexp) +} + +func TestNewExporter_err_auth_type(t *testing.T) { + c := Config{ + ProtocolVersion: "2.0.0", + Authentication: Authentication{ + TLS: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/doesnotexist", + }, + }, + }, + Encoding: defaultEncoding, + Metadata: Metadata{ + Full: false, + }, + } + texp, err := newTracesExporter(c, component.ExporterCreateParams{Logger: zap.NewNop()}, tracesMarshallers()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to load TLS config") + assert.Nil(t, texp) + mexp, err := newMetricsExporter(c, component.ExporterCreateParams{Logger: zap.NewNop()}, metricsMarshallers()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to load TLS config") + assert.Nil(t, mexp) +} + +func TestTraceDataPusher(t *testing.T) { + c := sarama.NewConfig() + producer := mocks.NewSyncProducer(t, c) + producer.ExpectSendMessageAndSucceed() + + p := kafkaTracesProducer{ + producer: producer, + marshaller: &otlpTracesPbMarshaller{}, + } + t.Cleanup(func() { + require.NoError(t, p.Close(context.Background())) + }) + droppedSpans, err := p.traceDataPusher(context.Background(), testdata.GenerateTraceDataTwoSpansSameResource()) + require.NoError(t, err) + assert.Equal(t, 0, droppedSpans) +} + +func TestTraceDataPusher_err(t *testing.T) { + c := sarama.NewConfig() + producer := mocks.NewSyncProducer(t, c) + expErr := fmt.Errorf("failed to send") + producer.ExpectSendMessageAndFail(expErr) + + p := kafkaTracesProducer{ + producer: producer, + marshaller: &otlpTracesPbMarshaller{}, + logger: zap.NewNop(), + } + t.Cleanup(func() { + require.NoError(t, p.Close(context.Background())) + }) + td := testdata.GenerateTraceDataTwoSpansSameResource() + droppedSpans, err := p.traceDataPusher(context.Background(), td) + assert.EqualError(t, err, expErr.Error()) + assert.Equal(t, td.SpanCount(), droppedSpans) +} + +func TestTraceDataPusher_marshall_error(t *testing.T) { + expErr := fmt.Errorf("failed to marshall") + p := kafkaTracesProducer{ + marshaller: &tracesErrorMarshaller{err: expErr}, + logger: zap.NewNop(), + } + td := testdata.GenerateTraceDataTwoSpansSameResource() + droppedSpans, err := p.traceDataPusher(context.Background(), td) + require.Error(t, err) + assert.Contains(t, err.Error(), expErr.Error()) + assert.Equal(t, td.SpanCount(), droppedSpans) +} + +func TestMetricsDataPusher(t *testing.T) { + c := sarama.NewConfig() + producer := mocks.NewSyncProducer(t, c) + producer.ExpectSendMessageAndSucceed() + + p := kafkaMetricsProducer{ + producer: producer, + marshaller: &otlpMetricsPbMarshaller{}, + } + t.Cleanup(func() { + require.NoError(t, p.Close(context.Background())) + }) + dropped, err := p.metricsDataPusher(context.Background(), testdata.GenerateMetricsTwoMetrics()) + require.NoError(t, err) + assert.Equal(t, 0, dropped) +} + +func TestMetricsDataPusher_err(t *testing.T) { + c := sarama.NewConfig() + producer := mocks.NewSyncProducer(t, c) + expErr := fmt.Errorf("failed to send") + producer.ExpectSendMessageAndFail(expErr) + + p := kafkaMetricsProducer{ + producer: producer, + marshaller: &otlpMetricsPbMarshaller{}, + logger: zap.NewNop(), + } + t.Cleanup(func() { + require.NoError(t, p.Close(context.Background())) + }) + md := testdata.GenerateMetricsTwoMetrics() + dropped, err := p.metricsDataPusher(context.Background(), md) + assert.EqualError(t, err, expErr.Error()) + assert.Equal(t, md.MetricCount(), dropped) +} + +func TestMetricsDataPusher_marshal_error(t *testing.T) { + expErr := fmt.Errorf("failed to marshall") + p := kafkaMetricsProducer{ + marshaller: &metricsErrorMarshaller{err: expErr}, + logger: zap.NewNop(), + } + md := testdata.GenerateMetricsTwoMetrics() + dropped, err := p.metricsDataPusher(context.Background(), md) + require.Error(t, err) + assert.Contains(t, err.Error(), expErr.Error()) + assert.Equal(t, md.MetricCount(), dropped) +} + +type tracesErrorMarshaller struct { + err error +} + +type metricsErrorMarshaller struct { + err error +} + +func (e metricsErrorMarshaller) Marshal(_ pdata.Metrics) ([]Message, error) { + return nil, e.err +} + +func (e metricsErrorMarshaller) Encoding() string { + panic("implement me") +} + +var _ TracesMarshaller = (*tracesErrorMarshaller)(nil) + +func (e tracesErrorMarshaller) Marshal(_ pdata.Traces) ([]Message, error) { + return nil, e.err +} + +func (e tracesErrorMarshaller) Encoding() string { + panic("implement me") +} diff --git a/internal/otel_collector/exporter/kafkaexporter/marshaller.go b/internal/otel_collector/exporter/kafkaexporter/marshaller.go new file mode 100644 index 00000000000..676dc69969f --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/marshaller.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// TracesMarshaller marshals traces into Message array. +type TracesMarshaller interface { + // Marshal serializes spans into Messages + Marshal(traces pdata.Traces) ([]Message, error) + + // Encoding returns encoding name + Encoding() string +} + +// MetricsMarshaller marshals metrics into Message array +type MetricsMarshaller interface { + // Marshal serializes metrics into Messages + Marshal(metrics pdata.Metrics) ([]Message, error) + + // Encoding returns encoding name + Encoding() string +} + +// Message encapsulates Kafka's message payload. +type Message struct { + Value []byte +} + +// tracesMarshallers returns map of supported encodings with TracesMarshaller. +func tracesMarshallers() map[string]TracesMarshaller { + otlppb := &otlpTracesPbMarshaller{} + jaegerProto := jaegerMarshaller{marshaller: jaegerProtoSpanMarshaller{}} + jaegerJSON := jaegerMarshaller{marshaller: newJaegerJSONMarshaller()} + return map[string]TracesMarshaller{ + otlppb.Encoding(): otlppb, + jaegerProto.Encoding(): jaegerProto, + jaegerJSON.Encoding(): jaegerJSON, + } +} + +// metricsMarshallers returns map of supported encodings and MetricsMarshaller +func metricsMarshallers() map[string]MetricsMarshaller { + otlppb := &otlpMetricsPbMarshaller{} + return map[string]MetricsMarshaller{ + otlppb.Encoding(): otlppb, + } +} diff --git a/internal/otel_collector/exporter/kafkaexporter/marshaller_test.go b/internal/otel_collector/exporter/kafkaexporter/marshaller_test.go new file mode 100644 index 00000000000..8c428211317 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/marshaller_test.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDefaultTracesMarshallers(t *testing.T) { + expectedEncodings := []string{ + "otlp_proto", + "jaeger_proto", + "jaeger_json", + } + marshallers := tracesMarshallers() + assert.Equal(t, len(expectedEncodings), len(marshallers)) + for _, e := range expectedEncodings { + t.Run(e, func(t *testing.T) { + m, ok := marshallers[e] + require.True(t, ok) + assert.NotNil(t, m) + }) + } +} + +func TestDefaultMetricsMarshallers(t *testing.T) { + expectedEncodings := []string{ + "otlp_proto", + } + marshallers := metricsMarshallers() + assert.Equal(t, len(expectedEncodings), len(marshallers)) + for _, e := range expectedEncodings { + t.Run(e, func(t *testing.T) { + m, ok := marshallers[e] + require.True(t, ok) + assert.NotNil(t, m) + }) + } +} diff --git a/internal/otel_collector/exporter/kafkaexporter/otlp_marshaller.go b/internal/otel_collector/exporter/kafkaexporter/otlp_marshaller.go new file mode 100644 index 00000000000..29232fa2946 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/otlp_marshaller.go @@ -0,0 +1,60 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "go.opentelemetry.io/collector/consumer/pdata" + otlpmetric "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" +) + +var _ TracesMarshaller = (*otlpTracesPbMarshaller)(nil) +var _ MetricsMarshaller = (*otlpMetricsPbMarshaller)(nil) + +type otlpTracesPbMarshaller struct { +} + +func (m *otlpTracesPbMarshaller) Encoding() string { + return defaultEncoding +} + +func (m *otlpTracesPbMarshaller) Marshal(traces pdata.Traces) ([]Message, error) { + request := otlptrace.ExportTraceServiceRequest{ + ResourceSpans: pdata.TracesToOtlp(traces), + } + bts, err := request.Marshal() + if err != nil { + return nil, err + } + return []Message{{Value: bts}}, nil +} + +type otlpMetricsPbMarshaller struct { +} + +func (m *otlpMetricsPbMarshaller) Encoding() string { + return defaultEncoding +} + +func (m *otlpMetricsPbMarshaller) Marshal(metrics pdata.Metrics) ([]Message, error) { + request := otlpmetric.ExportMetricsServiceRequest{ + ResourceMetrics: pdata.MetricsToOtlp(metrics), + } + bts, err := request.Marshal() + if err != nil { + return nil, err + } + return []Message{{Value: bts}}, nil +} diff --git a/internal/otel_collector/exporter/kafkaexporter/otlp_marshaller_test.go b/internal/otel_collector/exporter/kafkaexporter/otlp_marshaller_test.go new file mode 100644 index 00000000000..1b70acdf6a5 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/otlp_marshaller_test.go @@ -0,0 +1,59 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + otlpmetric "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestOTLPTracesPbMarshaller(t *testing.T) { + td := testdata.GenerateTraceDataTwoSpansSameResource() + request := &otlptrace.ExportTraceServiceRequest{ + ResourceSpans: pdata.TracesToOtlp(td), + } + expected, err := request.Marshal() + require.NoError(t, err) + require.NotNil(t, expected) + + m := otlpTracesPbMarshaller{} + assert.Equal(t, "otlp_proto", m.Encoding()) + messages, err := m.Marshal(td) + require.NoError(t, err) + assert.Equal(t, []Message{{Value: expected}}, messages) +} + +func TestOTLPMetricsPbMarshaller(t *testing.T) { + md := testdata.GenerateMetricsTwoMetrics() + request := &otlpmetric.ExportMetricsServiceRequest{ + ResourceMetrics: pdata.MetricsToOtlp(md), + } + expected, err := request.Marshal() + require.NoError(t, err) + require.NotNil(t, expected) + + m := otlpMetricsPbMarshaller{} + assert.Equal(t, "otlp_proto", m.Encoding()) + messages, err := m.Marshal(md) + require.NoError(t, err) + assert.Equal(t, []Message{{Value: expected}}, messages) +} diff --git a/internal/otel_collector/exporter/kafkaexporter/testdata/config.yaml b/internal/otel_collector/exporter/kafkaexporter/testdata/config.yaml new file mode 100644 index 00000000000..3c8843b496f --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/testdata/config.yaml @@ -0,0 +1,37 @@ +exporters: + kafka: + topic: spans + brokers: + - "foo:123" + - "bar:456" + metadata: + full: false + retry: + max: 15 + timeout: 10s + auth: + plain_text: + username: jdoe + password: pass + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + +processors: + exampleprocessor: + +receivers: + examplereceiver: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [kafka] diff --git a/internal/otel_collector/exporter/loggingexporter/README.md b/internal/otel_collector/exporter/loggingexporter/README.md new file mode 100644 index 00000000000..1ad42f63daa --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/README.md @@ -0,0 +1,29 @@ +# Logging Exporter + +Exports data to the console via zap.Logger. + +Supported pipeline types: traces, metrics, logs + +## Getting Started + +The following settings are optional: + +- `loglevel` (default = `info`): the log level of the logging export + (debug|info|warn|error). When set to `debug`, pipeline data is verbosely + logged. +- `sampling_initial` (default = `2`): number of messages initially logged each + second. +- `sampling_thereafter` (default = `500`): sampling rate after the initial + messages are logged (every Mth message is logged). Refer to [Zap + docs](https://godoc.org/go.uber.org/zap/zapcore#NewSampler) for more details. + on how sampling parameters impact number of messages. + +Example: + +```yaml +exporters: + logging: + loglevel: debug + sampling_initial: 5 + sampling_thereafter: 200 +``` diff --git a/internal/otel_collector/exporter/loggingexporter/config.go b/internal/otel_collector/exporter/loggingexporter/config.go new file mode 100644 index 00000000000..204c81d5145 --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/config.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loggingexporter + +import ( + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for logging exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + + // LogLevel defines log level of the logging exporter; options are debug, info, warn, error. + LogLevel string `mapstructure:"loglevel"` + + // SamplingInitial defines how many samples are initially logged during each second. + SamplingInitial int `mapstructure:"sampling_initial"` + + // SamplingThereafter defines the sampling rate after the initial samples are logged. + SamplingThereafter int `mapstructure:"sampling_thereafter"` +} diff --git a/internal/otel_collector/exporter/loggingexporter/config_test.go b/internal/otel_collector/exporter/loggingexporter/config_test.go new file mode 100644 index 00000000000..6213b1dff22 --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/config_test.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loggingexporter + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + e0 := cfg.Exporters["logging"] + assert.Equal(t, e0, factory.CreateDefaultConfig()) + + e1 := cfg.Exporters["logging/2"] + assert.Equal(t, e1, + &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "logging/2", + TypeVal: "logging", + }, + LogLevel: "debug", + SamplingInitial: 10, + SamplingThereafter: 50, + }) +} diff --git a/internal/otel_collector/exporter/loggingexporter/factory.go b/internal/otel_collector/exporter/loggingexporter/factory.go new file mode 100644 index 00000000000..36c203e35c9 --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/factory.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loggingexporter + +import ( + "context" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "logging" + defaultSamplingInitial = 2 + defaultSamplingThereafter = 500 +) + +// NewFactory creates a factory for Logging exporter +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTraceExporter), + exporterhelper.WithMetrics(createMetricsExporter), + exporterhelper.WithLogs(createLogsExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + LogLevel: "info", + SamplingInitial: defaultSamplingInitial, + SamplingThereafter: defaultSamplingThereafter, + } +} + +func createTraceExporter(_ context.Context, _ component.ExporterCreateParams, config configmodels.Exporter) (component.TracesExporter, error) { + cfg := config.(*Config) + + exporterLogger, err := createLogger(cfg) + if err != nil { + return nil, err + } + + return newTraceExporter(config, cfg.LogLevel, exporterLogger) +} + +func createMetricsExporter(_ context.Context, _ component.ExporterCreateParams, config configmodels.Exporter) (component.MetricsExporter, error) { + cfg := config.(*Config) + + exporterLogger, err := createLogger(cfg) + if err != nil { + return nil, err + } + + return newMetricsExporter(config, cfg.LogLevel, exporterLogger) +} + +func createLogsExporter(_ context.Context, _ component.ExporterCreateParams, config configmodels.Exporter) (component.LogsExporter, error) { + cfg := config.(*Config) + + exporterLogger, err := createLogger(cfg) + if err != nil { + return nil, err + } + + return newLogsExporter(config, cfg.LogLevel, exporterLogger) +} + +func createLogger(cfg *Config) (*zap.Logger, error) { + var level zapcore.Level + err := (&level).UnmarshalText([]byte(cfg.LogLevel)) + if err != nil { + return nil, err + } + + // We take development config as the base since it matches the purpose + // of logging exporter being used for debugging reasons (so e.g. console encoder) + conf := zap.NewDevelopmentConfig() + conf.Level = zap.NewAtomicLevelAt(level) + conf.Sampling = &zap.SamplingConfig{ + Initial: cfg.SamplingInitial, + Thereafter: cfg.SamplingThereafter, + } + + logginglogger, err := conf.Build() + if err != nil { + return nil, err + } + return logginglogger, nil +} diff --git a/internal/otel_collector/exporter/loggingexporter/factory_test.go b/internal/otel_collector/exporter/loggingexporter/factory_test.go new file mode 100644 index 00000000000..bf04ef5d544 --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/factory_test.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loggingexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateMetricsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + me, err := factory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + assert.NoError(t, err) + assert.NotNil(t, me) +} + +func TestCreateTraceExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + te, err := factory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + assert.NoError(t, err) + assert.NotNil(t, te) +} + +func TestCreateLogsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + te, err := factory.CreateLogsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + assert.NoError(t, err) + assert.NotNil(t, te) +} diff --git a/internal/otel_collector/exporter/loggingexporter/logging_exporter.go b/internal/otel_collector/exporter/loggingexporter/logging_exporter.go new file mode 100644 index 00000000000..fbc1dd7e8cd --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/logging_exporter.go @@ -0,0 +1,508 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loggingexporter + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +type logDataBuffer struct { + str strings.Builder +} + +func (b *logDataBuffer) logEntry(format string, a ...interface{}) { + b.str.WriteString(fmt.Sprintf(format, a...)) + b.str.WriteString("\n") +} + +func (b *logDataBuffer) logAttr(label string, value string) { + b.logEntry(" %-15s: %s", label, value) +} + +func (b *logDataBuffer) logAttributeMap(label string, am pdata.AttributeMap) { + if am.Len() == 0 { + return + } + + b.logEntry("%s:", label) + am.ForEach(func(k string, v pdata.AttributeValue) { + b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) + }) +} + +func (b *logDataBuffer) logStringMap(description string, sm pdata.StringMap) { + if sm.Len() == 0 { + return + } + + b.logEntry("%s:", description) + sm.ForEach(func(k string, v string) { + b.logEntry(" -> %s: %s", k, v) + }) +} + +func (b *logDataBuffer) logInstrumentationLibrary(il pdata.InstrumentationLibrary) { + b.logEntry( + "InstrumentationLibrary %s %s", + il.Name(), + il.Version()) +} + +func (b *logDataBuffer) logMetricDescriptor(md pdata.Metric) { + b.logEntry("Descriptor:") + b.logEntry(" -> Name: %s", md.Name()) + b.logEntry(" -> Description: %s", md.Description()) + b.logEntry(" -> Unit: %s", md.Unit()) + b.logEntry(" -> DataType: %s", md.DataType().String()) +} + +func (b *logDataBuffer) logMetricDataPoints(m pdata.Metric) { + switch m.DataType() { + case pdata.MetricDataTypeNone: + return + case pdata.MetricDataTypeIntGauge: + b.logIntDataPoints(m.IntGauge().DataPoints()) + case pdata.MetricDataTypeDoubleGauge: + b.logDoubleDataPoints(m.DoubleGauge().DataPoints()) + case pdata.MetricDataTypeIntSum: + data := m.IntSum() + b.logEntry(" -> IsMonotonic: %t", data.IsMonotonic()) + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logIntDataPoints(data.DataPoints()) + case pdata.MetricDataTypeDoubleSum: + data := m.DoubleSum() + b.logEntry(" -> IsMonotonic: %t", data.IsMonotonic()) + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logDoubleDataPoints(data.DataPoints()) + case pdata.MetricDataTypeIntHistogram: + data := m.IntHistogram() + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logIntHistogramDataPoints(data.DataPoints()) + case pdata.MetricDataTypeDoubleHistogram: + data := m.DoubleHistogram() + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logDoubleHistogramDataPoints(data.DataPoints()) + case pdata.MetricDataTypeDoubleSummary: + data := m.DoubleSummary() + b.logDoubleSummaryDataPoints(data.DataPoints()) + } +} + +func (b *logDataBuffer) logIntDataPoints(ps pdata.IntDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("IntDataPoints #%d", i) + b.logDataPointLabels(p.LabelsMap()) + + b.logEntry("StartTime: %d", p.StartTime()) + b.logEntry("Timestamp: %d", p.Timestamp()) + b.logEntry("Value: %d", p.Value()) + } +} + +func (b *logDataBuffer) logDoubleDataPoints(ps pdata.DoubleDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("DoubleDataPoints #%d", i) + b.logDataPointLabels(p.LabelsMap()) + + b.logEntry("StartTime: %d", p.StartTime()) + b.logEntry("Timestamp: %d", p.Timestamp()) + b.logEntry("Value: %f", p.Value()) + } +} + +func (b *logDataBuffer) logDoubleHistogramDataPoints(ps pdata.DoubleHistogramDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("HistogramDataPoints #%d", i) + b.logDataPointLabels(p.LabelsMap()) + + b.logEntry("StartTime: %d", p.StartTime()) + b.logEntry("Timestamp: %d", p.Timestamp()) + b.logEntry("Count: %d", p.Count()) + b.logEntry("Sum: %f", p.Sum()) + + bounds := p.ExplicitBounds() + if len(bounds) != 0 { + for i, bound := range bounds { + b.logEntry("ExplicitBounds #%d: %f", i, bound) + } + } + + buckets := p.BucketCounts() + if len(buckets) != 0 { + for j, bucket := range buckets { + b.logEntry("Buckets #%d, Count: %d", j, bucket) + } + } + } +} + +func (b *logDataBuffer) logIntHistogramDataPoints(ps pdata.IntHistogramDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("HistogramDataPoints #%d", i) + b.logDataPointLabels(p.LabelsMap()) + + b.logEntry("StartTime: %d", p.StartTime()) + b.logEntry("Timestamp: %d", p.Timestamp()) + b.logEntry("Count: %d", p.Count()) + b.logEntry("Sum: %d", p.Sum()) + + bounds := p.ExplicitBounds() + if len(bounds) != 0 { + for i, bound := range bounds { + b.logEntry("ExplicitBounds #%d: %f", i, bound) + } + } + + buckets := p.BucketCounts() + if len(buckets) != 0 { + for j, bucket := range buckets { + b.logEntry("Buckets #%d, Count: %d", j, bucket) + } + } + } +} + +func (b *logDataBuffer) logDoubleSummaryDataPoints(ps pdata.DoubleSummaryDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("SummaryDataPoints #%d", i) + b.logDataPointLabels(p.LabelsMap()) + + b.logEntry("StartTime: %d", p.StartTime()) + b.logEntry("Timestamp: %d", p.Timestamp()) + b.logEntry("Count: %d", p.Count()) + b.logEntry("Sum: %f", p.Sum()) + + quantiles := p.QuantileValues() + for i := 0; i < quantiles.Len(); i++ { + quantile := quantiles.At(i) + b.logEntry("QuantileValue #%d: Quantile %f, Value %f", i, quantile.Quantile(), quantile.Value()) + } + } +} + +func (b *logDataBuffer) logDataPointLabels(labels pdata.StringMap) { + b.logStringMap("Data point labels", labels) +} + +func (b *logDataBuffer) logLogRecord(lr pdata.LogRecord) { + b.logEntry("Timestamp: %d", lr.Timestamp()) + b.logEntry("Severity: %s", lr.SeverityText()) + b.logEntry("ShortName: %s", lr.Name()) + b.logEntry("Body: %s", attributeValueToString(lr.Body())) + b.logAttributeMap("Attributes", lr.Attributes()) +} + +func (b *logDataBuffer) logEvents(description string, se pdata.SpanEventSlice) { + if se.Len() == 0 { + return + } + + b.logEntry("%s:", description) + for i := 0; i < se.Len(); i++ { + e := se.At(i) + b.logEntry("SpanEvent #%d", i) + b.logEntry(" -> Name: %s", e.Name()) + b.logEntry(" -> Timestamp: %d", e.Timestamp()) + b.logEntry(" -> DroppedAttributesCount: %d", e.DroppedAttributesCount()) + + if e.Attributes().Len() == 0 { + return + } + b.logEntry(" -> Attributes:") + e.Attributes().ForEach(func(k string, v pdata.AttributeValue) { + b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) + }) + } +} + +func (b *logDataBuffer) logLinks(description string, sl pdata.SpanLinkSlice) { + if sl.Len() == 0 { + return + } + + b.logEntry("%s:", description) + + for i := 0; i < sl.Len(); i++ { + l := sl.At(i) + b.logEntry("SpanLink #%d", i) + b.logEntry(" -> Trace ID: %s", l.TraceID().HexString()) + b.logEntry(" -> ID: %s", l.SpanID().HexString()) + b.logEntry(" -> TraceState: %s", l.TraceState()) + b.logEntry(" -> DroppedAttributesCount: %d", l.DroppedAttributesCount()) + if l.Attributes().Len() == 0 { + return + } + b.logEntry(" -> Attributes:") + l.Attributes().ForEach(func(k string, v pdata.AttributeValue) { + b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) + }) + } +} + +func attributeValueToString(av pdata.AttributeValue) string { + switch av.Type() { + case pdata.AttributeValueSTRING: + return av.StringVal() + case pdata.AttributeValueBOOL: + return strconv.FormatBool(av.BoolVal()) + case pdata.AttributeValueDOUBLE: + return strconv.FormatFloat(av.DoubleVal(), 'f', -1, 64) + case pdata.AttributeValueINT: + return strconv.FormatInt(av.IntVal(), 10) + case pdata.AttributeValueARRAY: + return attributeValueArrayToString(av.ArrayVal()) + default: + return fmt.Sprintf("", av.Type()) + } +} + +func attributeValueArrayToString(av pdata.AnyValueArray) string { + var b strings.Builder + b.WriteByte('[') + for i := 0; i < av.Len(); i++ { + if i < av.Len()-1 { + fmt.Fprintf(&b, "%s, ", attributeValueToString(av.At(i))) + } else { + b.WriteString(attributeValueToString(av.At(i))) + } + } + + b.WriteByte(']') + return b.String() +} + +type loggingExporter struct { + logger *zap.Logger + debug bool +} + +func (s *loggingExporter) pushTraceData( + _ context.Context, + td pdata.Traces, +) (int, error) { + + s.logger.Info("TracesExporter", zap.Int("#spans", td.SpanCount())) + + if !s.debug { + return 0, nil + } + + buf := logDataBuffer{} + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + buf.logEntry("ResourceSpans #%d", i) + rs := rss.At(i) + buf.logAttributeMap("Resource labels", rs.Resource().Attributes()) + ilss := rs.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + buf.logEntry("InstrumentationLibrarySpans #%d", j) + ils := ilss.At(j) + buf.logInstrumentationLibrary(ils.InstrumentationLibrary()) + + spans := ils.Spans() + for k := 0; k < spans.Len(); k++ { + buf.logEntry("Span #%d", k) + span := spans.At(k) + buf.logAttr("Trace ID", span.TraceID().HexString()) + buf.logAttr("Parent ID", span.ParentSpanID().HexString()) + buf.logAttr("ID", span.SpanID().HexString()) + buf.logAttr("Name", span.Name()) + buf.logAttr("Kind", span.Kind().String()) + buf.logAttr("Start time", span.StartTime().String()) + buf.logAttr("End time", span.EndTime().String()) + + buf.logAttr("Status code", span.Status().Code().String()) + buf.logAttr("Status message", span.Status().Message()) + + buf.logAttributeMap("Attributes", span.Attributes()) + buf.logEvents("Events", span.Events()) + buf.logLinks("Links", span.Links()) + } + } + } + s.logger.Debug(buf.str.String()) + + return 0, nil +} + +func (s *loggingExporter) pushMetricsData( + _ context.Context, + md pdata.Metrics, +) (int, error) { + s.logger.Info("MetricsExporter", zap.Int("#metrics", md.MetricCount())) + + if !s.debug { + return 0, nil + } + + buf := logDataBuffer{} + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + buf.logEntry("ResourceMetrics #%d", i) + rm := rms.At(i) + buf.logAttributeMap("Resource labels", rm.Resource().Attributes()) + ilms := rm.InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + buf.logEntry("InstrumentationLibraryMetrics #%d", j) + ilm := ilms.At(j) + buf.logInstrumentationLibrary(ilm.InstrumentationLibrary()) + metrics := ilm.Metrics() + for k := 0; k < metrics.Len(); k++ { + buf.logEntry("Metric #%d", k) + metric := metrics.At(k) + buf.logMetricDescriptor(metric) + buf.logMetricDataPoints(metric) + } + } + } + + s.logger.Debug(buf.str.String()) + + return 0, nil +} + +// newTraceExporter creates an exporter.TracesExporter that just drops the +// received data and logs debugging messages. +func newTraceExporter(config configmodels.Exporter, level string, logger *zap.Logger) (component.TracesExporter, error) { + s := &loggingExporter{ + debug: strings.ToLower(level) == "debug", + logger: logger, + } + + return exporterhelper.NewTraceExporter( + config, + logger, + s.pushTraceData, + // Disable Timeout/RetryOnFailure and SendingQueue + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), + exporterhelper.WithQueue(exporterhelper.QueueSettings{Enabled: false}), + exporterhelper.WithShutdown(loggerSync(logger)), + ) +} + +// newMetricsExporter creates an exporter.MetricsExporter that just drops the +// received data and logs debugging messages. +func newMetricsExporter(config configmodels.Exporter, level string, logger *zap.Logger) (component.MetricsExporter, error) { + s := &loggingExporter{ + debug: strings.ToLower(level) == "debug", + logger: logger, + } + + return exporterhelper.NewMetricsExporter( + config, + logger, + s.pushMetricsData, + // Disable Timeout/RetryOnFailure and SendingQueue + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), + exporterhelper.WithQueue(exporterhelper.QueueSettings{Enabled: false}), + exporterhelper.WithShutdown(loggerSync(logger)), + ) +} + +// newLogsExporter creates an exporter.LogsExporter that just drops the +// received data and logs debugging messages. +func newLogsExporter(config configmodels.Exporter, level string, logger *zap.Logger) (component.LogsExporter, error) { + s := &loggingExporter{ + debug: strings.ToLower(level) == "debug", + logger: logger, + } + + return exporterhelper.NewLogsExporter( + config, + logger, + s.pushLogData, + // Disable Timeout/RetryOnFailure and SendingQueue + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), + exporterhelper.WithQueue(exporterhelper.QueueSettings{Enabled: false}), + exporterhelper.WithShutdown(loggerSync(logger)), + ) +} + +func (s *loggingExporter) pushLogData( + _ context.Context, + ld pdata.Logs, +) (int, error) { + s.logger.Info("LogsExporter", zap.Int("#logs", ld.LogRecordCount())) + + if !s.debug { + return 0, nil + } + + buf := logDataBuffer{} + rls := ld.ResourceLogs() + for i := 0; i < rls.Len(); i++ { + buf.logEntry("ResourceLog #%d", i) + rl := rls.At(i) + buf.logAttributeMap("Resource labels", rl.Resource().Attributes()) + ills := rl.InstrumentationLibraryLogs() + for j := 0; j < ills.Len(); j++ { + buf.logEntry("InstrumentationLibraryLogs #%d", j) + ils := ills.At(j) + buf.logInstrumentationLibrary(ils.InstrumentationLibrary()) + + logs := ils.Logs() + for k := 0; k < logs.Len(); k++ { + buf.logEntry("LogRecord #%d", k) + lr := logs.At(k) + buf.logLogRecord(lr) + } + } + } + + s.logger.Debug(buf.str.String()) + + return 0, nil +} + +func loggerSync(logger *zap.Logger) func(context.Context) error { + return func(context.Context) error { + // Currently Sync() on stdout and stderr return errors on Linux and macOS, + // respectively: + // + // - sync /dev/stdout: invalid argument + // - sync /dev/stdout: inappropriate ioctl for device + // + // Since these are not actionable ignore them. + err := logger.Sync() + if osErr, ok := err.(*os.PathError); ok { + wrappedErr := osErr.Unwrap() + switch wrappedErr { + case syscall.EINVAL, syscall.ENOTSUP, syscall.ENOTTY: + err = nil + } + } + return err + } +} diff --git a/internal/otel_collector/exporter/loggingexporter/logging_exporter_test.go b/internal/otel_collector/exporter/loggingexporter/logging_exporter_test.go new file mode 100644 index 00000000000..0f839264471 --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/logging_exporter_test.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package loggingexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestLoggingTraceExporterNoErrors(t *testing.T) { + lte, err := newTraceExporter(&configmodels.ExporterSettings{}, "Debug", zap.NewNop()) + require.NotNil(t, lte) + assert.NoError(t, err) + + assert.NoError(t, lte.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) + assert.NoError(t, lte.ConsumeTraces(context.Background(), testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent())) + + assert.NoError(t, lte.Shutdown(context.Background())) +} + +func TestLoggingMetricsExporterNoErrors(t *testing.T) { + lme, err := newMetricsExporter(&configmodels.ExporterSettings{}, "DEBUG", zap.NewNop()) + require.NotNil(t, lme) + assert.NoError(t, err) + + assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) + assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GeneratMetricsAllTypesWithSampleDatapoints())) + assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GenerateMetricsAllTypesEmptyDataPoint())) + assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GenerateMetricsMetricTypeInvalid())) + + assert.NoError(t, lme.Shutdown(context.Background())) +} + +func TestLoggingLogsExporterNoErrors(t *testing.T) { + lle, err := newLogsExporter(&configmodels.ExporterSettings{}, "debug", zap.NewNop()) + require.NotNil(t, lle) + assert.NoError(t, err) + + assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) + assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataOneEmptyResourceLogs())) + assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataNoLogRecords())) + assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataOneEmptyLogs())) + + assert.NoError(t, lle.Shutdown(context.Background())) +} + +func TestNestedArraySerializesCorrectly(t *testing.T) { + ava := pdata.NewAttributeValueArray() + av := ava.ArrayVal() + av.Append(pdata.NewAttributeValueString("foo")) + av.Append(pdata.NewAttributeValueInt(42)) + + ava2 := pdata.NewAttributeValueArray() + av2 := ava2.ArrayVal() + av2.Append(pdata.NewAttributeValueString("bar")) + + av.Append(ava2) + + assert.Equal(t, 3, ava.ArrayVal().Len()) + assert.Equal(t, "[foo, 42, [bar]]", attributeValueToString(ava)) +} diff --git a/internal/otel_collector/exporter/loggingexporter/testdata/config.yaml b/internal/otel_collector/exporter/loggingexporter/testdata/config.yaml new file mode 100644 index 00000000000..2d86aabbb1a --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/testdata/config.yaml @@ -0,0 +1,22 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + logging: + logging/2: + loglevel: debug + sampling_initial: 10 + sampling_thereafter: 50 + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [logging] + metrics: + receivers: [examplereceiver] + exporters: [logging,logging/2] diff --git a/internal/otel_collector/exporter/opencensusexporter/README.md b/internal/otel_collector/exporter/opencensusexporter/README.md new file mode 100644 index 00000000000..e8fd199a2ad --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/README.md @@ -0,0 +1,47 @@ +# OpenCensus gRPC Exporter + +Exports traces and/or metrics via gRPC using +[OpenCensus](https://opencensus.io/) format. + +Supported pipeline types: traces, metrics + +## Getting Started + +The following settings are required: + +- `endpoint` (no default): host:port to which the exporter is going to send Jaeger trace data, +using the gRPC protocol. The valid syntax is described +[here](https://github.com/grpc/grpc/blob/master/doc/naming.md) + +By default, TLS is enabled: + +- `insecure` (default = `false`): whether to enable client transport security for + the exporter's connection. + +As a result, the following parameters are also required: + +- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` (no default): path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +Example: + +```yaml +exporters: + opencensus: + endpoint: opencensus2:55678 + cert_file: file.cert + key_file: file.key + otlp/2: + endpoint: opencensus2:55678 + insecure: true +``` + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configgrpc/README.md) +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/exporterhelper/README.md) diff --git a/internal/otel_collector/exporter/opencensusexporter/config.go b/internal/otel_collector/exporter/opencensusexporter/config.go new file mode 100644 index 00000000000..a2df6a41540 --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/config.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusexporter + +import ( + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for OpenCensus exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + + configgrpc.GRPCClientSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + + // The number of workers that send the gRPC requests. + NumWorkers int `mapstructure:"num_workers"` +} diff --git a/internal/otel_collector/exporter/opencensusexporter/config_test.go b/internal/otel_collector/exporter/opencensusexporter/config_test.go new file mode 100644 index 00000000000..ad9f71701e7 --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/config_test.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusexporter + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + e0 := cfg.Exporters["opencensus"] + assert.Equal(t, e0, factory.CreateDefaultConfig()) + + e1 := cfg.Exporters["opencensus/2"] + assert.Equal(t, e1, + &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "opencensus/2", + TypeVal: "opencensus", + }, + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: map[string]string{ + "can you have a . here?": "F0000000-0000-0000-0000-000000000000", + "header1": "234", + "another": "somevalue", + }, + Endpoint: "1.2.3.4:1234", + Compression: "on", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/var/lib/mycert.pem", + }, + Insecure: false, + }, + Keepalive: &configgrpc.KeepaliveClientConfig{ + Time: 20, + PermitWithoutStream: true, + Timeout: 30, + }, + WriteBufferSize: 512 * 1024, + BalancerName: "round_robin", + }, + NumWorkers: 123, + }) +} diff --git a/internal/otel_collector/exporter/opencensusexporter/factory.go b/internal/otel_collector/exporter/opencensusexporter/factory.go new file mode 100644 index 00000000000..aabdd8e6e30 --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/factory.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusexporter + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "opencensus" +) + +// NewFactory creates a factory for OTLP exporter. +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTraceExporter), + exporterhelper.WithMetrics(createMetricsExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: map[string]string{}, + // We almost read 0 bytes, so no need to tune ReadBufferSize. + WriteBufferSize: 512 * 1024, + }, + NumWorkers: 2, + } +} + +func createTraceExporter(ctx context.Context, params component.ExporterCreateParams, config configmodels.Exporter) (component.TracesExporter, error) { + oCfg := config.(*Config) + return newTraceExporter(ctx, oCfg, params.Logger) +} + +func createMetricsExporter(ctx context.Context, params component.ExporterCreateParams, config configmodels.Exporter) (component.MetricsExporter, error) { + oCfg := config.(*Config) + return newMetricsExporter(ctx, oCfg, params.Logger) +} diff --git a/internal/otel_collector/exporter/opencensusexporter/factory_test.go b/internal/otel_collector/exporter/opencensusexporter/factory_test.go new file mode 100644 index 00000000000..e243af20865 --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/factory_test.go @@ -0,0 +1,180 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusexporter + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/testutil" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateTraceExporter(t *testing.T) { + endpoint := testutil.GetAvailableLocalAddress(t) + tests := []struct { + name string + config Config + mustFail bool + }{ + { + name: "NoEndpoint", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: "", + }, + NumWorkers: 3, + }, + mustFail: true, + }, + { + name: "ZeroNumWorkers", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + }, + NumWorkers: 0, + }, + mustFail: true, + }, + { + name: "UseSecure", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + }, + NumWorkers: 3, + }, + }, + { + name: "Keepalive", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + Keepalive: &configgrpc.KeepaliveClientConfig{ + Time: 30 * time.Second, + Timeout: 25 * time.Second, + PermitWithoutStream: true, + }, + }, + NumWorkers: 3, + }, + }, + { + name: "Compression", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + Compression: configgrpc.CompressionGzip, + }, + NumWorkers: 3, + }, + }, + { + name: "Headers", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + Headers: map[string]string{ + "hdr1": "val1", + "hdr2": "val2", + }, + }, + NumWorkers: 3, + }, + }, + { + name: "CompressionError", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + Compression: "unknown compression", + }, + NumWorkers: 3, + }, + mustFail: true, + }, + { + name: "CaCert", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "testdata/test_cert.pem", + }, + }, + }, + NumWorkers: 3, + }, + }, + { + name: "CertPemFileError", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "nosuchfile", + }, + }, + }, + NumWorkers: 3, + }, + mustFail: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + params := component.ExporterCreateParams{Logger: zap.NewNop()} + tReceiver, tErr := createTraceExporter(context.Background(), params, &tt.config) + checkErrorsAndShutdown(t, tReceiver, tErr, tt.mustFail) + mReceiver, mErr := createMetricsExporter(context.Background(), params, &tt.config) + checkErrorsAndShutdown(t, mReceiver, mErr, tt.mustFail) + }) + } +} + +func checkErrorsAndShutdown(t *testing.T, receiver component.Receiver, err error, mustFail bool) { + if mustFail { + assert.NotNil(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, receiver) + + require.NoError(t, receiver.Shutdown(context.Background())) + } +} diff --git a/internal/otel_collector/exporter/opencensusexporter/opencensus.go b/internal/otel_collector/exporter/opencensusexporter/opencensus.go new file mode 100644 index 00000000000..42e98e90876 --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/opencensus.go @@ -0,0 +1,285 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusexporter + +import ( + "context" + "errors" + "fmt" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" + agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/translator/internaldata" +) + +// See https://godoc.org/google.golang.org/grpc#ClientConn.NewStream +// why we need to keep the cancel func to cancel the stream +type tracesClientWithCancel struct { + cancel context.CancelFunc + tsec agenttracepb.TraceService_ExportClient +} + +// See https://godoc.org/google.golang.org/grpc#ClientConn.NewStream +// why we need to keep the cancel func to cancel the stream +type metricsClientWithCancel struct { + cancel context.CancelFunc + msec agentmetricspb.MetricsService_ExportClient +} + +type ocExporter struct { + cfg *Config + // gRPC clients and connection. + traceSvcClient agenttracepb.TraceServiceClient + metricsSvcClient agentmetricspb.MetricsServiceClient + // In any of the channels we keep always NumWorkers object (sometimes nil), + // to make sure we don't open more than NumWorkers RPCs at any moment. + tracesClients chan *tracesClientWithCancel + metricsClients chan *metricsClientWithCancel + grpcClientConn *grpc.ClientConn + metadata metadata.MD +} + +func newOcExporter(ctx context.Context, cfg *Config) (*ocExporter, error) { + if cfg.Endpoint == "" { + return nil, errors.New("OpenCensus exporter cfg requires an Endpoint") + } + + if cfg.NumWorkers <= 0 { + return nil, errors.New("OpenCensus exporter cfg requires at least one worker") + } + + dialOpts, err := cfg.GRPCClientSettings.ToDialOptions() + if err != nil { + return nil, err + } + + var clientConn *grpc.ClientConn + if clientConn, err = grpc.DialContext(ctx, cfg.GRPCClientSettings.Endpoint, dialOpts...); err != nil { + return nil, err + } + + oce := &ocExporter{ + cfg: cfg, + grpcClientConn: clientConn, + metadata: metadata.New(cfg.GRPCClientSettings.Headers), + } + return oce, nil +} + +func (oce *ocExporter) shutdown(context.Context) error { + if oce.tracesClients != nil { + // First remove all the clients from the channel. + for i := 0; i < oce.cfg.NumWorkers; i++ { + <-oce.tracesClients + } + // Now close the channel + close(oce.tracesClients) + } + if oce.metricsClients != nil { + // First remove all the clients from the channel. + for i := 0; i < oce.cfg.NumWorkers; i++ { + <-oce.metricsClients + } + // Now close the channel + close(oce.metricsClients) + } + return oce.grpcClientConn.Close() +} + +func newTraceExporter(ctx context.Context, cfg *Config, logger *zap.Logger) (component.TracesExporter, error) { + oce, err := newOcExporter(ctx, cfg) + if err != nil { + return nil, err + } + oce.traceSvcClient = agenttracepb.NewTraceServiceClient(oce.grpcClientConn) + oce.tracesClients = make(chan *tracesClientWithCancel, cfg.NumWorkers) + // Try to create rpc clients now. + for i := 0; i < cfg.NumWorkers; i++ { + // Populate the channel with NumWorkers nil RPCs to keep the number of workers + // constant in the channel. + oce.tracesClients <- nil + } + + return exporterhelper.NewTraceExporter( + cfg, + logger, + oce.pushTraceData, + exporterhelper.WithShutdown(oce.shutdown)) +} + +func newMetricsExporter(ctx context.Context, cfg *Config, logger *zap.Logger) (component.MetricsExporter, error) { + oce, err := newOcExporter(ctx, cfg) + if err != nil { + return nil, err + } + oce.metricsSvcClient = agentmetricspb.NewMetricsServiceClient(oce.grpcClientConn) + oce.metricsClients = make(chan *metricsClientWithCancel, cfg.NumWorkers) + // Try to create rpc clients now. + for i := 0; i < cfg.NumWorkers; i++ { + // Populate the channel with NumWorkers nil RPCs to keep the number of workers + // constant in the channel. + oce.metricsClients <- nil + } + + return exporterhelper.NewMetricsExporter( + cfg, + logger, + oce.pushMetricsData, + exporterhelper.WithShutdown(oce.shutdown)) +} + +func (oce *ocExporter) pushTraceData(_ context.Context, td pdata.Traces) (int, error) { + // Get first available trace Client. + tClient, ok := <-oce.tracesClients + if !ok { + err := errors.New("failed to push traces, OpenCensus exporter was already stopped") + return td.SpanCount(), err + } + + // In any of the metricsClients channel we keep always NumWorkers object (sometimes nil), + // to make sure we don't open more than NumWorkers RPCs at any moment. + // Here check if the client is nil and create a new one if that is the case. A nil + // object means that an error happened: could not connect, service went down, etc. + if tClient == nil { + var err error + tClient, err = oce.createTraceServiceRPC() + if err != nil { + // Cannot create an RPC, put back nil to keep the number of workers constant. + oce.tracesClients <- nil + return td.SpanCount(), err + } + } + + octds := internaldata.TraceDataToOC(td) + for _, octd := range octds { + // This is a hack because OC protocol expects a Node for the initial message. + node := octd.Node + if node == nil { + node = &commonpb.Node{} + } + resource := octd.Resource + if resource == nil { + resource = &resourcepb.Resource{} + } + req := &agenttracepb.ExportTraceServiceRequest{ + Spans: octd.Spans, + Resource: resource, + Node: node, + } + if err := tClient.tsec.Send(req); err != nil { + // Error received, cancel the context used to create the RPC to free all resources, + // put back nil to keep the number of workers constant. + tClient.cancel() + oce.tracesClients <- nil + return td.SpanCount(), err + } + } + oce.tracesClients <- tClient + return 0, nil +} + +func (oce *ocExporter) pushMetricsData(_ context.Context, md pdata.Metrics) (int, error) { + // Get first available mClient. + mClient, ok := <-oce.metricsClients + if !ok { + err := errors.New("failed to push metrics, OpenCensus exporter was already stopped") + return metricPointCount(md), err + } + + // In any of the metricsClients channel we keep always NumWorkers object (sometimes nil), + // to make sure we don't open more than NumWorkers RPCs at any moment. + // Here check if the client is nil and create a new one if that is the case. A nil + // object means that an error happened: could not connect, service went down, etc. + if mClient == nil { + var err error + mClient, err = oce.createMetricsServiceRPC() + if err != nil { + // Cannot create an RPC, put back nil to keep the number of workers constant. + oce.metricsClients <- nil + return metricPointCount(md), err + } + } + + ocmds := internaldata.MetricsToOC(md) + for _, ocmd := range ocmds { + // This is a hack because OC protocol expects a Node for the initial message. + node := ocmd.Node + if node == nil { + node = &commonpb.Node{} + } + resource := ocmd.Resource + if resource == nil { + resource = &resourcepb.Resource{} + } + req := &agentmetricspb.ExportMetricsServiceRequest{ + Metrics: ocmd.Metrics, + Resource: resource, + Node: node, + } + if err := mClient.msec.Send(req); err != nil { + // Error received, cancel the context used to create the RPC to free all resources, + // put back nil to keep the number of workers constant. + mClient.cancel() + oce.metricsClients <- nil + return metricPointCount(md), err + } + } + oce.metricsClients <- mClient + return 0, nil +} + +func (oce *ocExporter) createTraceServiceRPC() (*tracesClientWithCancel, error) { + // Initiate the trace service by sending over node identifier info. + ctx, cancel := context.WithCancel(context.Background()) + if len(oce.cfg.Headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(oce.cfg.Headers)) + } + // Cannot use grpc.WaitForReady(cfg.WaitForReady) because will block forever. + traceClient, err := oce.traceSvcClient.Export(ctx) + if err != nil { + cancel() + return nil, fmt.Errorf("TraceServiceClient: %w", err) + } + return &tracesClientWithCancel{cancel: cancel, tsec: traceClient}, nil +} + +func (oce *ocExporter) createMetricsServiceRPC() (*metricsClientWithCancel, error) { + // Initiate the trace service by sending over node identifier info. + ctx, cancel := context.WithCancel(context.Background()) + if len(oce.cfg.Headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(oce.cfg.Headers)) + } + // Cannot use grpc.WaitForReady(cfg.WaitForReady) because will block forever. + metricsClient, err := oce.metricsSvcClient.Export(ctx) + if err != nil { + cancel() + return nil, fmt.Errorf("MetricsServiceClient: %w", err) + } + return &metricsClientWithCancel{cancel: cancel, msec: metricsClient}, nil +} + +func metricPointCount(md pdata.Metrics) int { + _, pc := md.MetricAndDataPointCount() + return pc +} diff --git a/internal/otel_collector/exporter/opencensusexporter/opencensus_test.go b/internal/otel_collector/exporter/opencensusexporter/opencensus_test.go new file mode 100644 index 00000000000..abfc4770ebf --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/opencensus_test.go @@ -0,0 +1,227 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/receiver/opencensusreceiver" + "go.opentelemetry.io/collector/testutil" +) + +func TestSendTraces(t *testing.T) { + sink := new(consumertest.TracesSink) + rFactory := opencensusreceiver.NewFactory() + rCfg := rFactory.CreateDefaultConfig().(*opencensusreceiver.Config) + endpoint := testutil.GetAvailableLocalAddress(t) + rCfg.GRPCServerSettings.NetAddr.Endpoint = endpoint + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + recv, err := rFactory.CreateTracesReceiver(context.Background(), params, rCfg, sink) + assert.NoError(t, err) + assert.NoError(t, recv.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, recv.Shutdown(context.Background())) + }) + + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + cfg.NumWorkers = 1 + exp, err := factory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + host := componenttest.NewNopHost() + require.NoError(t, exp.Start(context.Background(), host)) + t.Cleanup(func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }) + + td := testdata.GenerateTraceDataOneSpan() + assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) + testutil.WaitFor(t, func() bool { + return len(sink.AllTraces()) == 1 + }) + traces := sink.AllTraces() + require.Len(t, traces, 1) + assert.Equal(t, td, traces[0]) + + sink.Reset() + // Sending data no Node. + td.ResourceSpans().At(0).Resource().Attributes().InitEmptyWithCapacity(0) + assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) + testutil.WaitFor(t, func() bool { + return len(sink.AllTraces()) == 1 + }) + traces = sink.AllTraces() + require.Len(t, traces, 1) + assert.Equal(t, td, traces[0]) +} + +func TestSendTraces_NoBackend(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: "localhost:56569", + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + exp, err := factory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + host := componenttest.NewNopHost() + require.NoError(t, exp.Start(context.Background(), host)) + t.Cleanup(func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }) + + td := testdata.GenerateTraceDataOneSpan() + for i := 0; i < 10000; i++ { + assert.Error(t, exp.ConsumeTraces(context.Background(), td)) + } +} + +func TestSendTraces_AfterStop(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: "localhost:56569", + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + exp, err := factory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + host := componenttest.NewNopHost() + require.NoError(t, exp.Start(context.Background(), host)) + assert.NoError(t, exp.Shutdown(context.Background())) + + td := testdata.GenerateTraceDataOneSpan() + assert.Error(t, exp.ConsumeTraces(context.Background(), td)) +} + +func TestSendMetrics(t *testing.T) { + sink := new(consumertest.MetricsSink) + rFactory := opencensusreceiver.NewFactory() + rCfg := rFactory.CreateDefaultConfig().(*opencensusreceiver.Config) + endpoint := testutil.GetAvailableLocalAddress(t) + rCfg.GRPCServerSettings.NetAddr.Endpoint = endpoint + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + recv, err := rFactory.CreateMetricsReceiver(context.Background(), params, rCfg, sink) + assert.NoError(t, err) + assert.NoError(t, recv.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, recv.Shutdown(context.Background())) + }) + + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + cfg.NumWorkers = 1 + exp, err := factory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + host := componenttest.NewNopHost() + require.NoError(t, exp.Start(context.Background(), host)) + t.Cleanup(func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }) + + md := testdata.GenerateMetricsOneMetric() + assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) + testutil.WaitFor(t, func() bool { + return len(sink.AllMetrics()) == 1 + }) + metrics := sink.AllMetrics() + require.Len(t, metrics, 1) + assert.Equal(t, md, metrics[0]) + + // Sending data no node. + sink.Reset() + md.ResourceMetrics().At(0).Resource().Attributes().InitEmptyWithCapacity(0) + assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) + testutil.WaitFor(t, func() bool { + return len(sink.AllMetrics()) == 1 + }) + metrics = sink.AllMetrics() + require.Len(t, metrics, 1) + assert.Equal(t, md, metrics[0]) +} + +func TestSendMetrics_NoBackend(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: "localhost:56569", + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + exp, err := factory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + host := componenttest.NewNopHost() + require.NoError(t, exp.Start(context.Background(), host)) + t.Cleanup(func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }) + + md := testdata.GenerateMetricsOneMetric() + for i := 0; i < 10000; i++ { + assert.Error(t, exp.ConsumeMetrics(context.Background(), md)) + } +} + +func TestSendMetrics_AfterStop(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: "localhost:56569", + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + exp, err := factory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + host := componenttest.NewNopHost() + require.NoError(t, exp.Start(context.Background(), host)) + assert.NoError(t, exp.Shutdown(context.Background())) + + md := testdata.GenerateMetricsOneMetric() + assert.Error(t, exp.ConsumeMetrics(context.Background(), md)) +} diff --git a/internal/otel_collector/exporter/opencensusexporter/testdata/config.yaml b/internal/otel_collector/exporter/opencensusexporter/testdata/config.yaml new file mode 100644 index 00000000000..20b1284dd59 --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/testdata/config.yaml @@ -0,0 +1,29 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + opencensus: + opencensus/2: + endpoint: "1.2.3.4:1234" + compression: "on" + num_workers: 123 + ca_file: /var/lib/mycert.pem + headers: + "can you have a . here?": "F0000000-0000-0000-0000-000000000000" + header1: 234 + another: "somevalue" + balancer_name: "round_robin" + keepalive: + time: 20 + timeout: 30 + permit_without_stream: true + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [opencensus] diff --git a/internal/otel_collector/exporter/opencensusexporter/testdata/test_cert.pem b/internal/otel_collector/exporter/opencensusexporter/testdata/test_cert.pem new file mode 100644 index 00000000000..b2e77b89d49 --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/testdata/test_cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE6jCCAtICCQDVU4PtqpqADTANBgkqhkiG9w0BAQsFADA3MQswCQYDVQQGEwJV +UzETMBEGA1UECAwKY2FsaWZvcm5pYTETMBEGA1UECgwKb3BlbmNlbnN1czAeFw0x +OTAzMDQxODA3MjZaFw0yMDAzMDMxODA3MjZaMDcxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIDApjYWxpZm9ybmlhMRMwEQYDVQQKDApvcGVuY2Vuc3VzMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAy9JQiAOMzArcdiS4szbTuzg5yYijSSY6SvGj +XMs4/LEFLxgGmFfyHXxoVQzV26lTu/AiUFlZi4JY2qlkZyPwmmmSg4fmzikpVPiC +Vv9pvSIojs8gs0sHaOt40Q8ym43bNt3Mh8rYrs+XMERi6Ol9//j4LnfePkNU5uEo +qC8KQamckaMR6UEHFNunyOwvNBsipgTPldQUPGVnCsNKk8olYGAXS7DR25bgbPli +4T9VCSElsSPAODmyo+2MEDagVXa1vVYxKyO2k6oeBS0lsvdRqRTmGggcg0B/dk+a +H1CL9ful0cu9P3dQif+hfGay8udPkwDLPEq1+WnjJFut3Pmbk3SqUCas5iWt76kK +eKFh4k8fCy4yiaZxzvSbm9+bEBHAl0ZXd8pjvAsBfCKe6G9SBzE1DK4FjWiiEGCb +5dGsyTKr33q3DekLvT3LF8ZeON/13d9toucX9PqG2HDwMP/Fb4WjQIzOc/H9wIak +pf7u6QBDGUiCMmoDrp1d8RsI1RPbEhoywH0YlLmwgf+cr1dU7vlISf576EsGxFz4 ++/sZjIBvZBHn/x0MH+bs4J8V3vMujfDoRdhL07bK7q/AkEALUxljKEfoWeqiuVzK +F9BVv3xNhiua2kgPVbMNWPrQ5uotkNp8IykJ3QOuQ3p5pzxdGfpLd6f8gmJDmcbi +AI9dWTcCAwEAATANBgkqhkiG9w0BAQsFAAOCAgEAVVi4t/Sumre+AGTaU7np9dl2 +tpllbES5ixe6m2uezt5wAzYNNyuQ2mMG2XrSkMy5gvBZRT9nRNSmLV8VEcxZihG0 +YHS5soXnLL3Jdlwxp98WTDPvM1ntxcHyEyqrrg9YDfKn4sOrr5vo2yZzoKwtxtc7 +lue9JormVx7GxMi7NwaUtCbnwAIcqJJpFjt1EhmJOxGqTJPgUvTBdeGvRj30c6fk +pqpUdPbZ7RKPEtbLoMoCBujKnErv+H0G6Vp9WyCHN+Mi9uTMsGwH14cmJjmfwGDC +8/WF4LdlawFnf/arIp9YcVwcP91d4ywyvbuuo2M7qdosQ7k4uRZ3tyggLYShS3RW +BMEhMRDz9dM0oKGF+HnaS824BIh6O6Hn82Vt8uCKS7IbEX99/kkN1KcqqQe6Lwjq +tG/lm4K5yf+FJVDivpZ9mYTvqTBjhTaOp6m3HYSNJfS0hLQVvEuBNXd8bHiXkcLp +rmFOYUWsjxV1Qku3U5Rner0UpB2Fuw9nJcXuDgWG0gjwzAZ83y3du1VIZp0Ad8Vv +IYpaucbImGJszMtNXn3l72K1wvQVIhm9eRwYc3QteJzweHaDsbytZEoS/GhTrZIT +wRe5ZGrjJBJngRANRSm1BH8j6PjLem9mzPb2eytwJJA0lLhUk4vYproVvXcx0vow +5F+5VB1YB8/tbWePmpo= +-----END CERTIFICATE----- diff --git a/internal/otel_collector/exporter/otlpexporter/README.md b/internal/otel_collector/exporter/otlpexporter/README.md new file mode 100644 index 00000000000..d3dd6fbc142 --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/README.md @@ -0,0 +1,51 @@ +# OTLP gRPC Exporter + +Exports data via gRPC using [OTLP]( +https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/protocol/otlp.md) +format. By default, this exporter requires TLS and offers queued retry capabilities. + +:warning: OTLP metrics and logs formats are currently marked as "Alpha" and may change in +incompatible way any time. + +Supported pipeline types: traces, metrics + +## Getting Started + +The following settings are required: + +- `endpoint` (no default): host:port to which the exporter is going to send OTLP trace data, +using the gRPC protocol. The valid syntax is described +[here](https://github.com/grpc/grpc/blob/master/doc/naming.md) + +By default, TLS is enabled: + +- `insecure` (default = `false`): whether to enable client transport security for + the exporter's connection. + +As a result, the following parameters are also required: + +- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` (no default): path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +Example: + +```yaml +exporters: + otlp: + endpoint: otelcol2:55680 + cert_file: file.cert + key_file: file.key + otlp/2: + endpoint: otelcol2:55680 + insecure: true +``` + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configgrpc/README.md) +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/exporterhelper/README.md) diff --git a/internal/otel_collector/exporter/otlpexporter/config.go b/internal/otel_collector/exporter/otlpexporter/config.go new file mode 100644 index 00000000000..c410ddf5689 --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/config.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for OpenCensus exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + configgrpc.GRPCClientSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. +} diff --git a/internal/otel_collector/exporter/otlpexporter/config_test.go b/internal/otel_collector/exporter/otlpexporter/config_test.go new file mode 100644 index 00000000000..bf3032a09f0 --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/config_test.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + e0 := cfg.Exporters["otlp"] + assert.Equal(t, e0, factory.CreateDefaultConfig()) + + e1 := cfg.Exporters["otlp/2"] + assert.Equal(t, e1, + &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "otlp/2", + TypeVal: "otlp", + }, + TimeoutSettings: exporterhelper.TimeoutSettings{ + Timeout: 10 * time.Second, + }, + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 10 * time.Second, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + }, + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: map[string]string{ + "can you have a . here?": "F0000000-0000-0000-0000-000000000000", + "header1": "234", + "another": "somevalue", + }, + Endpoint: "1.2.3.4:1234", + Compression: "on", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/var/lib/mycert.pem", + }, + Insecure: false, + }, + Keepalive: &configgrpc.KeepaliveClientConfig{ + Time: 20 * time.Second, + PermitWithoutStream: true, + Timeout: 30 * time.Second, + }, + WriteBufferSize: 512 * 1024, + PerRPCAuth: &configgrpc.PerRPCAuthConfig{ + AuthType: "bearer", + BearerToken: "some-token", + }, + BalancerName: "round_robin", + }, + }) +} diff --git a/internal/otel_collector/exporter/otlpexporter/factory.go b/internal/otel_collector/exporter/otlpexporter/factory.go new file mode 100644 index 00000000000..5fa88715efd --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/factory.go @@ -0,0 +1,133 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "otlp" +) + +// NewFactory creates a factory for OTLP exporter. +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTraceExporter), + exporterhelper.WithMetrics(createMetricsExporter), + exporterhelper.WithLogs(createLogsExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + TimeoutSettings: exporterhelper.DefaultTimeoutSettings(), + RetrySettings: exporterhelper.DefaultRetrySettings(), + QueueSettings: exporterhelper.DefaultQueueSettings(), + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: map[string]string{}, + // We almost read 0 bytes, so no need to tune ReadBufferSize. + WriteBufferSize: 512 * 1024, + }, + } +} + +func createTraceExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.TracesExporter, error) { + oce, err := newExporter(cfg) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + oexp, err := exporterhelper.NewTraceExporter( + cfg, + params.Logger, + oce.pushTraceData, + exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithShutdown(oce.shutdown)) + if err != nil { + return nil, err + } + + return oexp, nil +} + +func createMetricsExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.MetricsExporter, error) { + oce, err := newExporter(cfg) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + oexp, err := exporterhelper.NewMetricsExporter( + cfg, + params.Logger, + oce.pushMetricsData, + exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithShutdown(oce.shutdown), + ) + if err != nil { + return nil, err + } + + return oexp, nil +} + +func createLogsExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.LogsExporter, error) { + oce, err := newExporter(cfg) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + oexp, err := exporterhelper.NewLogsExporter( + cfg, + params.Logger, + oce.pushLogData, + exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithShutdown(oce.shutdown), + ) + if err != nil { + return nil, err + } + + return oexp, nil +} diff --git a/internal/otel_collector/exporter/otlpexporter/factory_test.go b/internal/otel_collector/exporter/otlpexporter/factory_test.go new file mode 100644 index 00000000000..0659e52b1cd --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/factory_test.go @@ -0,0 +1,198 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/testutil" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) + ocfg, ok := factory.CreateDefaultConfig().(*Config) + assert.True(t, ok) + assert.Equal(t, ocfg.RetrySettings, exporterhelper.DefaultRetrySettings()) + assert.Equal(t, ocfg.QueueSettings, exporterhelper.DefaultQueueSettings()) + assert.Equal(t, ocfg.TimeoutSettings, exporterhelper.DefaultTimeoutSettings()) +} + +func TestCreateMetricsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings.Endpoint = testutil.GetAvailableLocalAddress(t) + + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + oexp, err := factory.CreateMetricsExporter(context.Background(), creationParams, cfg) + require.Nil(t, err) + require.NotNil(t, oexp) +} + +func TestCreateTraceExporter(t *testing.T) { + endpoint := testutil.GetAvailableLocalAddress(t) + + tests := []struct { + name string + config Config + mustFail bool + }{ + { + name: "NoEndpoint", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: "", + }, + }, + mustFail: true, + }, + { + name: "UseSecure", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + }, + }, + }, + { + name: "Keepalive", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + Keepalive: &configgrpc.KeepaliveClientConfig{ + Time: 30 * time.Second, + Timeout: 25 * time.Second, + PermitWithoutStream: true, + }, + }, + }, + }, + { + name: "Compression", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + Compression: configgrpc.CompressionGzip, + }, + }, + }, + { + name: "Headers", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + Headers: map[string]string{ + "hdr1": "val1", + "hdr2": "val2", + }, + }, + }, + }, + { + name: "NumConsumers", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + }, + }, + }, + { + name: "CompressionError", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + Compression: "unknown compression", + }, + }, + mustFail: true, + }, + { + name: "CaCert", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "testdata/test_cert.pem", + }, + }, + }, + }, + }, + { + name: "CertPemFileError", + config: Config{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "nosuchfile", + }, + }, + }, + }, + mustFail: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + consumer, err := factory.CreateTracesExporter(context.Background(), creationParams, &tt.config) + + if tt.mustFail { + assert.NotNil(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, consumer) + + err = consumer.Shutdown(context.Background()) + if err != nil { + // Since the endpoint of OTLP exporter doesn't actually exist, + // exporter may already stop because it cannot connect. + assert.Equal(t, err.Error(), "rpc error: code = Canceled desc = grpc: the client connection is closing") + } + } + }) + } +} + +func TestCreateLogsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings.Endpoint = testutil.GetAvailableLocalAddress(t) + + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + oexp, err := factory.CreateLogsExporter(context.Background(), creationParams, cfg) + require.Nil(t, err) + require.NotNil(t, oexp) +} diff --git a/internal/otel_collector/exporter/otlpexporter/otlp.go b/internal/otel_collector/exporter/otlpexporter/otlp.go new file mode 100644 index 00000000000..cf6dec789ee --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/otlp.go @@ -0,0 +1,248 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "context" + "errors" + "fmt" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/internal" + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" +) + +type exporterImp struct { + // Input configuration. + config *Config + w *grpcSender +} + +var ( + errPermanentError = consumererror.Permanent(errors.New("fatal error sending to server")) +) + +// Crete new exporter and start it. The exporter will begin connecting but +// this function may return before the connection is established. +func newExporter(cfg configmodels.Exporter) (*exporterImp, error) { + oCfg := cfg.(*Config) + + if oCfg.Endpoint == "" { + return nil, errors.New("OTLP exporter config requires an Endpoint") + } + + e := &exporterImp{} + e.config = oCfg + w, err := newGrpcSender(oCfg) + if err != nil { + return nil, err + } + e.w = w + return e, nil +} + +func (e *exporterImp) shutdown(context.Context) error { + return e.w.stop() +} + +func (e *exporterImp) pushTraceData(ctx context.Context, td pdata.Traces) (int, error) { + request := &otlptrace.ExportTraceServiceRequest{ + ResourceSpans: pdata.TracesToOtlp(td), + } + err := e.w.exportTrace(ctx, request) + + if err != nil { + return td.SpanCount(), fmt.Errorf("failed to push trace data via OTLP exporter: %w", err) + } + return 0, nil +} + +func (e *exporterImp) pushMetricsData(ctx context.Context, md pdata.Metrics) (int, error) { + request := &otlpmetrics.ExportMetricsServiceRequest{ + ResourceMetrics: pdata.MetricsToOtlp(md), + } + err := e.w.exportMetrics(ctx, request) + + if err != nil { + return md.MetricCount(), fmt.Errorf("failed to push metrics data via OTLP exporter: %w", err) + } + return 0, nil +} + +func (e *exporterImp) pushLogData(ctx context.Context, logs pdata.Logs) (int, error) { + request := &otlplogs.ExportLogsServiceRequest{ + ResourceLogs: internal.LogsToOtlp(logs.InternalRep()), + } + err := e.w.exportLogs(ctx, request) + + if err != nil { + return logs.LogRecordCount(), fmt.Errorf("failed to push log data via OTLP exporter: %w", err) + } + return 0, nil +} + +type grpcSender struct { + // gRPC clients and connection. + traceExporter otlptrace.TraceServiceClient + metricExporter otlpmetrics.MetricsServiceClient + logExporter otlplogs.LogsServiceClient + grpcClientConn *grpc.ClientConn + metadata metadata.MD + waitForReady bool +} + +func newGrpcSender(config *Config) (*grpcSender, error) { + dialOpts, err := config.GRPCClientSettings.ToDialOptions() + if err != nil { + return nil, err + } + + var clientConn *grpc.ClientConn + if clientConn, err = grpc.Dial(config.GRPCClientSettings.Endpoint, dialOpts...); err != nil { + return nil, err + } + + gs := &grpcSender{ + traceExporter: otlptrace.NewTraceServiceClient(clientConn), + metricExporter: otlpmetrics.NewMetricsServiceClient(clientConn), + logExporter: otlplogs.NewLogsServiceClient(clientConn), + grpcClientConn: clientConn, + metadata: metadata.New(config.GRPCClientSettings.Headers), + waitForReady: config.GRPCClientSettings.WaitForReady, + } + return gs, nil +} + +func (gs *grpcSender) stop() error { + return gs.grpcClientConn.Close() +} + +func (gs *grpcSender) exportTrace(ctx context.Context, request *otlptrace.ExportTraceServiceRequest) error { + _, err := gs.traceExporter.Export(gs.enhanceContext(ctx), request, grpc.WaitForReady(gs.waitForReady)) + return processError(err) +} + +func (gs *grpcSender) exportMetrics(ctx context.Context, request *otlpmetrics.ExportMetricsServiceRequest) error { + _, err := gs.metricExporter.Export(gs.enhanceContext(ctx), request, grpc.WaitForReady(gs.waitForReady)) + return processError(err) +} + +func (gs *grpcSender) exportLogs(ctx context.Context, request *otlplogs.ExportLogsServiceRequest) error { + _, err := gs.logExporter.Export(gs.enhanceContext(ctx), request, grpc.WaitForReady(gs.waitForReady)) + return processError(err) +} + +func (gs *grpcSender) enhanceContext(ctx context.Context) context.Context { + if gs.metadata.Len() > 0 { + return metadata.NewOutgoingContext(ctx, gs.metadata) + } + return ctx +} + +// Send a trace or metrics request to the server. "perform" function is expected to make +// the actual gRPC unary call that sends the request. This function implements the +// common OTLP logic around request handling such as retries and throttling. +func processError(err error) error { + if err == nil { + // Request is successful, we are done. + return nil + } + + // We have an error, check gRPC status code. + + st := status.Convert(err) + if st.Code() == codes.OK { + // Not really an error, still success. + return nil + } + + // Now, this is this a real error. + + if !shouldRetry(st.Code()) { + // It is not a retryable error, we should not retry. + return errPermanentError + } + + // Need to retry. + + // Check if server returned throttling information. + throttleDuration := getThrottleDuration(st) + if throttleDuration != 0 { + return exporterhelper.NewThrottleRetry(err, throttleDuration) + } + + return err +} + +func shouldRetry(code codes.Code) bool { + switch code { + case codes.OK: + // Success. This function should not be called for this code, the best we + // can do is tell the caller not to retry. + return false + + case codes.Canceled, + codes.DeadlineExceeded, + codes.PermissionDenied, + codes.Unauthenticated, + codes.ResourceExhausted, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // These are retryable errors. + return true + + case codes.Unknown, + codes.InvalidArgument, + codes.NotFound, + codes.AlreadyExists, + codes.FailedPrecondition, + codes.Unimplemented, + codes.Internal: + // These are fatal errors, don't retry. + return false + + default: + // Don't retry on unknown codes. + return false + } +} + +func getThrottleDuration(status *status.Status) time.Duration { + // See if throttling information is available. + for _, detail := range status.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + if t.RetryDelay.Seconds > 0 || t.RetryDelay.Nanos > 0 { + // We are throttled. Wait before retrying as requested by the server. + return time.Duration(t.RetryDelay.Seconds)*time.Second + time.Duration(t.RetryDelay.Nanos)*time.Nanosecond + } + return 0 + } + } + return 0 +} diff --git a/internal/otel_collector/exporter/otlpexporter/otlp_test.go b/internal/otel_collector/exporter/otlpexporter/otlp_test.go new file mode 100644 index 00000000000..8859b89288b --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/otlp_test.go @@ -0,0 +1,535 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "context" + "net" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer/pdata" + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + otlptraces "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/testutil" +) + +type mockReceiver struct { + srv *grpc.Server + requestCount int32 + totalItems int32 + mux sync.Mutex + metadata metadata.MD +} + +func (r *mockReceiver) GetMetadata() metadata.MD { + r.mux.Lock() + defer r.mux.Unlock() + return r.metadata +} + +type mockTraceReceiver struct { + mockReceiver + lastRequest *otlptraces.ExportTraceServiceRequest +} + +func (r *mockTraceReceiver) Export( + ctx context.Context, + req *otlptraces.ExportTraceServiceRequest, +) (*otlptraces.ExportTraceServiceResponse, error) { + atomic.AddInt32(&r.requestCount, 1) + spanCount := 0 + for _, rs := range req.ResourceSpans { + for _, ils := range rs.InstrumentationLibrarySpans { + spanCount += len(ils.Spans) + } + } + atomic.AddInt32(&r.totalItems, int32(spanCount)) + r.mux.Lock() + defer r.mux.Unlock() + r.lastRequest = req + r.metadata, _ = metadata.FromIncomingContext(ctx) + return &otlptraces.ExportTraceServiceResponse{}, nil +} + +func (r *mockTraceReceiver) GetLastRequest() *otlptraces.ExportTraceServiceRequest { + r.mux.Lock() + defer r.mux.Unlock() + return r.lastRequest +} + +func otlpTraceReceiverOnGRPCServer(ln net.Listener) *mockTraceReceiver { + rcv := &mockTraceReceiver{ + mockReceiver: mockReceiver{ + srv: obsreport.GRPCServerWithObservabilityEnabled(), + }, + } + + // Now run it as a gRPC server + otlptraces.RegisterTraceServiceServer(rcv.srv, rcv) + go func() { + _ = rcv.srv.Serve(ln) + }() + + return rcv +} + +type mockLogsReceiver struct { + mockReceiver + lastRequest *otlplogs.ExportLogsServiceRequest +} + +func (r *mockLogsReceiver) Export( + ctx context.Context, + req *otlplogs.ExportLogsServiceRequest, +) (*otlplogs.ExportLogsServiceResponse, error) { + atomic.AddInt32(&r.requestCount, 1) + recordCount := 0 + for _, rs := range req.ResourceLogs { + for _, il := range rs.InstrumentationLibraryLogs { + recordCount += len(il.Logs) + } + } + atomic.AddInt32(&r.totalItems, int32(recordCount)) + r.mux.Lock() + defer r.mux.Unlock() + r.lastRequest = req + r.metadata, _ = metadata.FromIncomingContext(ctx) + return &otlplogs.ExportLogsServiceResponse{}, nil +} + +func (r *mockLogsReceiver) GetLastRequest() *otlplogs.ExportLogsServiceRequest { + r.mux.Lock() + defer r.mux.Unlock() + return r.lastRequest +} + +func otlpLogsReceiverOnGRPCServer(ln net.Listener) *mockLogsReceiver { + rcv := &mockLogsReceiver{ + mockReceiver: mockReceiver{ + srv: obsreport.GRPCServerWithObservabilityEnabled(), + }, + } + + // Now run it as a gRPC server + otlplogs.RegisterLogsServiceServer(rcv.srv, rcv) + go func() { + _ = rcv.srv.Serve(ln) + }() + + return rcv +} + +type mockMetricsReceiver struct { + mockReceiver + lastRequest *otlpmetrics.ExportMetricsServiceRequest +} + +func (r *mockMetricsReceiver) Export( + ctx context.Context, + req *otlpmetrics.ExportMetricsServiceRequest, +) (*otlpmetrics.ExportMetricsServiceResponse, error) { + atomic.AddInt32(&r.requestCount, 1) + _, recordCount := pdata.MetricsFromOtlp(req.ResourceMetrics).MetricAndDataPointCount() + atomic.AddInt32(&r.totalItems, int32(recordCount)) + r.mux.Lock() + defer r.mux.Unlock() + r.lastRequest = req + r.metadata, _ = metadata.FromIncomingContext(ctx) + return &otlpmetrics.ExportMetricsServiceResponse{}, nil +} + +func (r *mockMetricsReceiver) GetLastRequest() *otlpmetrics.ExportMetricsServiceRequest { + r.mux.Lock() + defer r.mux.Unlock() + return r.lastRequest +} + +func otlpMetricsReceiverOnGRPCServer(ln net.Listener) *mockMetricsReceiver { + rcv := &mockMetricsReceiver{ + mockReceiver: mockReceiver{ + srv: obsreport.GRPCServerWithObservabilityEnabled(), + }, + } + + // Now run it as a gRPC server + otlpmetrics.RegisterMetricsServiceServer(rcv.srv, rcv) + go func() { + _ = rcv.srv.Serve(ln) + }() + + return rcv +} + +func TestSendTraces(t *testing.T) { + // Start an OTLP-compatible receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv := otlpTraceReceiverOnGRPCServer(ln) + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + Headers: map[string]string{ + "header": "header-value", + }, + } + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + exp, err := factory.CreateTracesExporter(context.Background(), creationParams, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) + + // Send empty trace. + td := testdata.GenerateTraceDataEmpty() + assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) + + // Wait until it is received. + testutil.WaitFor(t, func() bool { + return atomic.LoadInt32(&rcv.requestCount) > 0 + }, "receive a request") + + // Ensure it was received empty. + assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.totalItems)) + + // A trace with 2 spans. + td = testdata.GenerateTraceDataTwoSpansSameResource() + + expectedOTLPReq := &otlptraces.ExportTraceServiceRequest{ + ResourceSpans: testdata.GenerateTraceOtlpSameResourceTwoSpans(), + } + + err = exp.ConsumeTraces(context.Background(), td) + assert.NoError(t, err) + + // Wait until it is received. + testutil.WaitFor(t, func() bool { + return atomic.LoadInt32(&rcv.requestCount) > 1 + }, "receive a request") + + expectedHeader := []string{"header-value"} + + // Verify received span. + assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.totalItems)) + assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.requestCount)) + assert.EqualValues(t, expectedOTLPReq, rcv.GetLastRequest()) + + require.EqualValues(t, rcv.GetMetadata().Get("header"), expectedHeader) +} + +func TestSendMetrics(t *testing.T) { + // Start an OTLP-compatible receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv := otlpMetricsReceiverOnGRPCServer(ln) + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + Headers: map[string]string{ + "header": "header-value", + }, + } + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + exp, err := factory.CreateMetricsExporter(context.Background(), creationParams, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) + + // Send empty trace. + md := testdata.GenerateMetricsEmpty() + assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) + + // Wait until it is received. + testutil.WaitFor(t, func() bool { + return atomic.LoadInt32(&rcv.requestCount) > 0 + }, "receive a request") + + // Ensure it was received empty. + assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.totalItems)) + + // A trace with 2 spans. + md = testdata.GenerateMetricsTwoMetrics() + + expectedOTLPReq := &otlpmetrics.ExportMetricsServiceRequest{ + ResourceMetrics: testdata.GenerateMetricsOtlpTwoMetrics(), + } + + err = exp.ConsumeMetrics(context.Background(), md) + assert.NoError(t, err) + + // Wait until it is received. + testutil.WaitFor(t, func() bool { + return atomic.LoadInt32(&rcv.requestCount) > 1 + }, "receive a request") + + expectedHeader := []string{"header-value"} + + // Verify received metrics. + assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.requestCount)) + assert.EqualValues(t, 4, atomic.LoadInt32(&rcv.totalItems)) + assert.EqualValues(t, expectedOTLPReq, rcv.GetLastRequest()) + + require.EqualValues(t, rcv.GetMetadata().Get("header"), expectedHeader) +} + +func TestSendTraceDataServerDownAndUp(t *testing.T) { + // Find the addr, but don't start the server. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + // Disable queuing to ensure that we execute the request when calling ConsumeTraces + // otherwise we will not see the error. + cfg.QueueSettings.Enabled = false + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + // Need to wait for every request blocking until either request timeouts or succeed. + // Do not rely on external retry logic here, if that is intended set InitialInterval to 100ms. + WaitForReady: true, + } + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + exp, err := factory.CreateTracesExporter(context.Background(), creationParams, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // A trace with 2 spans. + td := testdata.GenerateTraceDataTwoSpansSameResource() + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + assert.Error(t, exp.ConsumeTraces(ctx, td)) + assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + assert.Error(t, exp.ConsumeTraces(ctx, td)) + assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) + cancel() + + startServerAndMakeRequest(t, exp, td, ln) + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + assert.Error(t, exp.ConsumeTraces(ctx, td)) + assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) + cancel() + + // First call to startServerAndMakeRequest closed the connection. There is a race condition here that the + // port may be reused, if this gets flaky rethink what to do. + ln, err = net.Listen("tcp", ln.Addr().String()) + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + startServerAndMakeRequest(t, exp, td, ln) + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + assert.Error(t, exp.ConsumeTraces(ctx, td)) + assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) + cancel() +} + +func TestSendTraceDataServerStartWhileRequest(t *testing.T) { + // Find the addr, but don't start the server. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + exp, err := factory.CreateTracesExporter(context.Background(), creationParams, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // A trace with 2 spans. + td := testdata.GenerateTraceDataTwoSpansSameResource() + done := make(chan bool, 1) + defer close(done) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + go func() { + assert.NoError(t, exp.ConsumeTraces(ctx, td)) + done <- true + }() + + time.Sleep(2 * time.Second) + rcv := otlpTraceReceiverOnGRPCServer(ln) + defer rcv.srv.GracefulStop() + // Wait until one of the conditions below triggers. + select { + case <-ctx.Done(): + t.Fail() + case <-done: + assert.NoError(t, ctx.Err()) + } + cancel() +} + +func startServerAndMakeRequest(t *testing.T, exp component.TracesExporter, td pdata.Traces, ln net.Listener) { + rcv := otlpTraceReceiverOnGRPCServer(ln) + defer rcv.srv.GracefulStop() + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) + + // Resend the request, this should succeed. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + assert.NoError(t, exp.ConsumeTraces(ctx, td)) + cancel() + + // Wait until it is received. + testutil.WaitFor(t, func() bool { + return atomic.LoadInt32(&rcv.requestCount) > 0 + }, "receive a request") + + expectedOTLPReq := &otlptraces.ExportTraceServiceRequest{ + ResourceSpans: testdata.GenerateTraceOtlpSameResourceTwoSpans(), + } + + // Verify received span. + assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.totalItems)) + assert.EqualValues(t, expectedOTLPReq, rcv.GetLastRequest()) +} + +func TestSendLogData(t *testing.T) { + // Start an OTLP-compatible receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv := otlpLogsReceiverOnGRPCServer(ln) + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.GRPCClientSettings = configgrpc.GRPCClientSettings{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + } + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + exp, err := factory.CreateLogsExporter(context.Background(), creationParams, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) + + // Send empty request. + td := testdata.GenerateLogDataEmpty() + assert.NoError(t, exp.ConsumeLogs(context.Background(), td)) + + // Wait until it is received. + testutil.WaitFor(t, func() bool { + return atomic.LoadInt32(&rcv.requestCount) > 0 + }, "receive a request") + + // Ensure it was received empty. + assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.totalItems)) + + // A request with 2 log entries. + td = testdata.GenerateLogDataTwoLogsSameResource() + + expectedOTLPReq := &otlplogs.ExportLogsServiceRequest{ + ResourceLogs: testdata.GenerateLogOtlpSameResourceTwoLogs(), + } + + err = exp.ConsumeLogs(context.Background(), td) + assert.NoError(t, err) + + // Wait until it is received. + testutil.WaitFor(t, func() bool { + return atomic.LoadInt32(&rcv.requestCount) > 1 + }, "receive a request") + + // Verify received logs. + assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.requestCount)) + assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.totalItems)) + assert.EqualValues(t, expectedOTLPReq, rcv.GetLastRequest()) +} diff --git a/internal/otel_collector/exporter/otlpexporter/testdata/config.yaml b/internal/otel_collector/exporter/otlpexporter/testdata/config.yaml new file mode 100644 index 00000000000..c889a7af635 --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/testdata/config.yaml @@ -0,0 +1,41 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + otlp: + otlp/2: + endpoint: "1.2.3.4:1234" + compression: "on" + ca_file: /var/lib/mycert.pem + timeout: 10s + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + per_rpc_auth: + type: bearer + bearer_token: some-token + headers: + "can you have a . here?": "F0000000-0000-0000-0000-000000000000" + header1: 234 + another: "somevalue" + keepalive: + time: 20s + timeout: 30s + permit_without_stream: true + balancer_name: "round_robin" + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [otlp] diff --git a/internal/otel_collector/exporter/otlpexporter/testdata/test_cert.pem b/internal/otel_collector/exporter/otlpexporter/testdata/test_cert.pem new file mode 100644 index 00000000000..b2e77b89d49 --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/testdata/test_cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE6jCCAtICCQDVU4PtqpqADTANBgkqhkiG9w0BAQsFADA3MQswCQYDVQQGEwJV +UzETMBEGA1UECAwKY2FsaWZvcm5pYTETMBEGA1UECgwKb3BlbmNlbnN1czAeFw0x +OTAzMDQxODA3MjZaFw0yMDAzMDMxODA3MjZaMDcxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIDApjYWxpZm9ybmlhMRMwEQYDVQQKDApvcGVuY2Vuc3VzMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAy9JQiAOMzArcdiS4szbTuzg5yYijSSY6SvGj +XMs4/LEFLxgGmFfyHXxoVQzV26lTu/AiUFlZi4JY2qlkZyPwmmmSg4fmzikpVPiC +Vv9pvSIojs8gs0sHaOt40Q8ym43bNt3Mh8rYrs+XMERi6Ol9//j4LnfePkNU5uEo +qC8KQamckaMR6UEHFNunyOwvNBsipgTPldQUPGVnCsNKk8olYGAXS7DR25bgbPli +4T9VCSElsSPAODmyo+2MEDagVXa1vVYxKyO2k6oeBS0lsvdRqRTmGggcg0B/dk+a +H1CL9ful0cu9P3dQif+hfGay8udPkwDLPEq1+WnjJFut3Pmbk3SqUCas5iWt76kK +eKFh4k8fCy4yiaZxzvSbm9+bEBHAl0ZXd8pjvAsBfCKe6G9SBzE1DK4FjWiiEGCb +5dGsyTKr33q3DekLvT3LF8ZeON/13d9toucX9PqG2HDwMP/Fb4WjQIzOc/H9wIak +pf7u6QBDGUiCMmoDrp1d8RsI1RPbEhoywH0YlLmwgf+cr1dU7vlISf576EsGxFz4 ++/sZjIBvZBHn/x0MH+bs4J8V3vMujfDoRdhL07bK7q/AkEALUxljKEfoWeqiuVzK +F9BVv3xNhiua2kgPVbMNWPrQ5uotkNp8IykJ3QOuQ3p5pzxdGfpLd6f8gmJDmcbi +AI9dWTcCAwEAATANBgkqhkiG9w0BAQsFAAOCAgEAVVi4t/Sumre+AGTaU7np9dl2 +tpllbES5ixe6m2uezt5wAzYNNyuQ2mMG2XrSkMy5gvBZRT9nRNSmLV8VEcxZihG0 +YHS5soXnLL3Jdlwxp98WTDPvM1ntxcHyEyqrrg9YDfKn4sOrr5vo2yZzoKwtxtc7 +lue9JormVx7GxMi7NwaUtCbnwAIcqJJpFjt1EhmJOxGqTJPgUvTBdeGvRj30c6fk +pqpUdPbZ7RKPEtbLoMoCBujKnErv+H0G6Vp9WyCHN+Mi9uTMsGwH14cmJjmfwGDC +8/WF4LdlawFnf/arIp9YcVwcP91d4ywyvbuuo2M7qdosQ7k4uRZ3tyggLYShS3RW +BMEhMRDz9dM0oKGF+HnaS824BIh6O6Hn82Vt8uCKS7IbEX99/kkN1KcqqQe6Lwjq +tG/lm4K5yf+FJVDivpZ9mYTvqTBjhTaOp6m3HYSNJfS0hLQVvEuBNXd8bHiXkcLp +rmFOYUWsjxV1Qku3U5Rner0UpB2Fuw9nJcXuDgWG0gjwzAZ83y3du1VIZp0Ad8Vv +IYpaucbImGJszMtNXn3l72K1wvQVIhm9eRwYc3QteJzweHaDsbytZEoS/GhTrZIT +wRe5ZGrjJBJngRANRSm1BH8j6PjLem9mzPb2eytwJJA0lLhUk4vYproVvXcx0vow +5F+5VB1YB8/tbWePmpo= +-----END CERTIFICATE----- diff --git a/internal/otel_collector/exporter/otlphttpexporter/README.md b/internal/otel_collector/exporter/otlphttpexporter/README.md new file mode 100644 index 00000000000..06aa3d4f47b --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/README.md @@ -0,0 +1,50 @@ +# OTLP/HTTP Exporter + +Exports traces and/or metrics via HTTP using [OTLP]( +https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/protocol/otlp.md) +format. + +*Important: OTLP metrics format is currently marked as "Alpha" and may change in +incompatible way any time.* + +The following settings are required: + +- `endpoint` (no default): The target base URL to send data to (e.g.: https://example.com:55681). + To send each signal a corresponding path will be added to this base URL, i.e. for traces + "/v1/traces" will appended, for metrics "/v1/metrics" will be appended, for logs + "/v1/logs" will be appended. + +The following settings can be optionally configured: + +- `traces_endpoint` (no default): The target URL to send trace data to (e.g.: https://example.com:55681/v1/traces). + If this setting is present the the `endpoint` setting is ignored for traces. +- `metrics_endpoint` (no default): The target URL to send metric data to (e.g.: https://example.com:55681/v1/metrics). + If this setting is present the the `endpoint` setting is ignored for metrics. +- `logs_endpoint` (no default): The target URL to send log data to (e.g.: https://example.com:55681/v1/logs). + If this setting is present the the `endpoint` setting is ignored logs. + +- `insecure` (default = false): when set to true disables verifying the server's + certificate chain and host name. The connection is still encrypted but server identity + is not verified. +- `ca_file` path to the CA cert. For a client this verifies the server certificate. Should + only be used if `insecure` is set to false. +- `cert_file` path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +- `timeout` (default = 30s): HTTP request time limit. For details see https://golang.org/pkg/net/http/#Client +- `read_buffer_size` (default = 0): ReadBufferSize for HTTP client. +- `write_buffer_size` (default = 512 * 1024): WriteBufferSize for HTTP client. + + +Example: + +```yaml +exporters: + otlphttp: + endpoint: https://example.com:55681/v1/traces +``` + +The full list of settings exposed for this exporter are documented [here](./config.go) +with detailed sample configurations [here](./testdata/config.yaml). diff --git a/internal/otel_collector/exporter/otlphttpexporter/config.go b/internal/otel_collector/exporter/otlphttpexporter/config.go new file mode 100644 index 00000000000..6eb8c76efa3 --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/config.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for OTLP/HTTP exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + confighttp.HTTPClientSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + // The URL to send traces to. If omitted the Endpoint + "/v1/traces" will be used. + TracesEndpoint string `mapstructure:"traces_endpoint"` + + // The URL to send metrics to. If omitted the Endpoint + "/v1/metrics" will be used. + MetricsEndpoint string `mapstructure:"metrics_endpoint"` + + // The URL to send logs to. If omitted the Endpoint + "/v1/logs" will be used. + LogsEndpoint string `mapstructure:"logs_endpoint"` +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/config_test.go b/internal/otel_collector/exporter/otlphttpexporter/config_test.go new file mode 100644 index 00000000000..a0d3aa83a3b --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/config_test.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + e0 := cfg.Exporters["otlphttp"] + assert.Equal(t, e0, factory.CreateDefaultConfig()) + + e1 := cfg.Exporters["otlphttp/2"] + assert.Equal(t, e1, + &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "otlphttp/2", + TypeVal: "otlphttp", + }, + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 10 * time.Second, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + }, + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Headers: map[string]string{ + "can you have a . here?": "F0000000-0000-0000-0000-000000000000", + "header1": "234", + "another": "somevalue", + }, + Endpoint: "https://1.2.3.4:1234", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/var/lib/mycert.pem", + CertFile: "certfile", + KeyFile: "keyfile", + }, + Insecure: true, + }, + ReadBufferSize: 123, + WriteBufferSize: 345, + Timeout: time.Second * 10, + }, + }) +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/factory.go b/internal/otel_collector/exporter/otlphttpexporter/factory.go new file mode 100644 index 00000000000..592f0e016ab --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/factory.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "context" + "fmt" + "net/url" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "otlphttp" +) + +// NewFactory creates a factory for OTLP exporter. +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTraceExporter), + exporterhelper.WithMetrics(createMetricsExporter), + exporterhelper.WithLogs(createLogsExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + RetrySettings: exporterhelper.DefaultRetrySettings(), + QueueSettings: exporterhelper.DefaultQueueSettings(), + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "", + Timeout: 30 * time.Second, + Headers: map[string]string{}, + // We almost read 0 bytes, so no need to tune ReadBufferSize. + WriteBufferSize: 512 * 1024, + }, + } +} + +func composeSignalURL(oCfg *Config, signalOverrideURL string, signalName string) (string, error) { + switch { + case signalOverrideURL != "": + _, err := url.Parse(signalOverrideURL) + if err != nil { + return "", fmt.Errorf("%s_endpoint must be a valid URL", signalName) + } + return signalOverrideURL, nil + case oCfg.Endpoint == "": + return "", fmt.Errorf("either endpoint or %s_endpoint must be specified", signalName) + default: + return oCfg.Endpoint + "/v1/" + signalName, nil + } +} + +func createTraceExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.TracesExporter, error) { + oce, err := newExporter(cfg, params.Logger) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.tracesURL, err = composeSignalURL(oCfg, oCfg.TracesEndpoint, "traces") + if err != nil { + return nil, err + } + + return exporterhelper.NewTraceExporter( + cfg, + params.Logger, + oce.pushTraceData, + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} + +func createMetricsExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.MetricsExporter, error) { + oce, err := newExporter(cfg, params.Logger) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.metricsURL, err = composeSignalURL(oCfg, oCfg.MetricsEndpoint, "metrics") + if err != nil { + return nil, err + } + + return exporterhelper.NewMetricsExporter( + cfg, + params.Logger, + oce.pushMetricsData, + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} + +func createLogsExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.LogsExporter, error) { + oce, err := newExporter(cfg, params.Logger) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.logsURL, err = composeSignalURL(oCfg, oCfg.LogsEndpoint, "logs") + if err != nil { + return nil, err + } + + return exporterhelper.NewLogsExporter( + cfg, + params.Logger, + oce.pushLogData, + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/factory_test.go b/internal/otel_collector/exporter/otlphttpexporter/factory_test.go new file mode 100644 index 00000000000..c9c76a3e2dd --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/factory_test.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/testutil" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) + ocfg, ok := factory.CreateDefaultConfig().(*Config) + assert.True(t, ok) + assert.Equal(t, ocfg.HTTPClientSettings.Endpoint, "") + assert.Equal(t, ocfg.HTTPClientSettings.Timeout, 30*time.Second, "default timeout is 30 second") + assert.Equal(t, ocfg.RetrySettings.Enabled, true, "default retry is enabled") + assert.Equal(t, ocfg.RetrySettings.MaxElapsedTime, 300*time.Second, "default retry MaxElapsedTime") + assert.Equal(t, ocfg.RetrySettings.InitialInterval, 5*time.Second, "default retry InitialInterval") + assert.Equal(t, ocfg.RetrySettings.MaxInterval, 30*time.Second, "default retry MaxInterval") + assert.Equal(t, ocfg.QueueSettings.Enabled, true, "default sending queue is enabled") +} + +func TestCreateMetricsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.HTTPClientSettings.Endpoint = "http://" + testutil.GetAvailableLocalAddress(t) + + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + oexp, err := factory.CreateMetricsExporter(context.Background(), creationParams, cfg) + require.Nil(t, err) + require.NotNil(t, oexp) +} + +func TestCreateTraceExporter(t *testing.T) { + endpoint := "http://" + testutil.GetAvailableLocalAddress(t) + + tests := []struct { + name string + config Config + mustFail bool + }{ + { + name: "NoEndpoint", + config: Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "", + }, + }, + mustFail: true, + }, + { + name: "UseSecure", + config: Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + Insecure: false, + }, + }, + }, + }, + { + name: "Headers", + config: Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: endpoint, + Headers: map[string]string{ + "hdr1": "val1", + "hdr2": "val2", + }, + }, + }, + }, + { + name: "CaCert", + config: Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "testdata/test_cert.pem", + }, + }, + }, + }, + }, + { + name: "CertPemFileError", + config: Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: endpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "nosuchfile", + }, + }, + }, + }, + mustFail: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + consumer, err := factory.CreateTracesExporter(context.Background(), creationParams, &tt.config) + + if tt.mustFail { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, consumer) + + err = consumer.Shutdown(context.Background()) + if err != nil { + // Since the endpoint of OTLP exporter doesn't actually exist, + // exporter may already stop because it cannot connect. + assert.Equal(t, err.Error(), "rpc error: code = Canceled desc = grpc: the client connection is closing") + } + } + }) + } +} + +func TestCreateLogsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.HTTPClientSettings.Endpoint = "http://" + testutil.GetAvailableLocalAddress(t) + + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + oexp, err := factory.CreateLogsExporter(context.Background(), creationParams, cfg) + require.Nil(t, err) + require.NotNil(t, oexp) +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/otlp.go b/internal/otel_collector/exporter/otlphttpexporter/otlp.go new file mode 100644 index 00000000000..0b2618f283b --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/otlp.go @@ -0,0 +1,206 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "go.uber.org/zap" + "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +type exporterImp struct { + // Input configuration. + config *Config + client *http.Client + tracesURL string + metricsURL string + logsURL string + logger *zap.Logger +} + +const ( + headerRetryAfter = "Retry-After" + maxHTTPResponseReadBytes = 64 * 1024 +) + +// Crete new exporter. +func newExporter(cfg configmodels.Exporter, logger *zap.Logger) (*exporterImp, error) { + oCfg := cfg.(*Config) + + if oCfg.Endpoint != "" { + _, err := url.Parse(oCfg.Endpoint) + if err != nil { + return nil, errors.New("endpoint must be a valid URL") + } + } + + client, err := oCfg.HTTPClientSettings.ToClient() + if err != nil { + return nil, err + } + + return &exporterImp{ + config: oCfg, + client: client, + logger: logger, + }, nil +} + +func (e *exporterImp) pushTraceData(ctx context.Context, traces pdata.Traces) (int, error) { + request, err := traces.ToOtlpProtoBytes() + if err != nil { + return traces.SpanCount(), consumererror.Permanent(err) + } + + err = e.export(ctx, e.tracesURL, request) + if err != nil { + return traces.SpanCount(), err + } + + return 0, nil +} + +func (e *exporterImp) pushMetricsData(ctx context.Context, metrics pdata.Metrics) (int, error) { + request, err := metrics.ToOtlpProtoBytes() + if err != nil { + return metrics.MetricCount(), consumererror.Permanent(err) + } + + err = e.export(ctx, e.metricsURL, request) + if err != nil { + return metrics.MetricCount(), err + } + + return 0, nil +} + +func (e *exporterImp) pushLogData(ctx context.Context, logs pdata.Logs) (int, error) { + request, err := logs.ToOtlpProtoBytes() + if err != nil { + return logs.LogRecordCount(), consumererror.Permanent(err) + } + + err = e.export(ctx, e.logsURL, request) + if err != nil { + return logs.LogRecordCount(), err + } + + return 0, nil +} + +func (e *exporterImp) export(ctx context.Context, url string, request []byte) error { + e.logger.Debug("Preparing to make HTTP request", zap.String("url", url)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(request)) + if err != nil { + return consumererror.Permanent(err) + } + req.Header.Set("Content-Type", "application/x-protobuf") + + resp, err := e.client.Do(req) + if err != nil { + return fmt.Errorf("failed to make an HTTP request: %w", err) + } + + defer func() { + // Discard any remaining response body when we are done reading. + io.CopyN(ioutil.Discard, resp.Body, maxHTTPResponseReadBytes) + resp.Body.Close() + }() + + if resp.StatusCode >= 200 && resp.StatusCode <= 299 { + // Request is successful. + return nil + } + + respStatus := readResponse(resp) + + // Format the error message. Use the status if it is present in the response. + var formattedErr error + if respStatus != nil { + formattedErr = fmt.Errorf( + "error exporting items, request to %s responded with HTTP Status Code %d, Message=%s, Details=%v", + url, resp.StatusCode, respStatus.Message, respStatus.Details) + } else { + formattedErr = fmt.Errorf( + "error exporting items, request to %s responded with HTTP Status Code %d", + url, resp.StatusCode) + } + + // Check if the server is overwhelmed. + // See spec https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/protocol/otlp.md#throttling-1 + if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { + // Fallback to 0 if the Retry-After header is not present. This will trigger the + // default backoff policy by our caller (retry handler). + retryAfter := 0 + if val := resp.Header.Get(headerRetryAfter); val != "" { + if seconds, err2 := strconv.Atoi(val); err2 == nil { + retryAfter = seconds + } + } + // Indicate to our caller to pause for the specified number of seconds. + return exporterhelper.NewThrottleRetry(formattedErr, time.Duration(retryAfter)*time.Second) + } + + if resp.StatusCode == http.StatusBadRequest { + // Report the failure as permanent if the server thinks the request is malformed. + return consumererror.Permanent(formattedErr) + } + + // All other errors are retryable, so don't wrap them in consumererror.Permanent(). + return formattedErr +} + +// Read the response and decode the status.Status from the body. +// Returns nil if the response is empty or cannot be decoded. +func readResponse(resp *http.Response) *status.Status { + var respStatus *status.Status + if resp.StatusCode >= 400 && resp.StatusCode <= 599 { + // Request failed. Read the body. OTLP spec says: + // "Response body for all HTTP 4xx and HTTP 5xx responses MUST be a + // Protobuf-encoded Status message that describes the problem." + maxRead := resp.ContentLength + if maxRead == -1 || maxRead > maxHTTPResponseReadBytes { + maxRead = maxHTTPResponseReadBytes + } + respBytes := make([]byte, maxRead) + n, err := io.ReadFull(resp.Body, respBytes) + if err == nil && n > 0 { + // Decode it as Status struct. See https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/protocol/otlp.md#failures + respStatus = &status.Status{} + err = proto.Unmarshal(respBytes, respStatus) + if err != nil { + respStatus = nil + } + } + } + + return respStatus +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/otlp_test.go b/internal/otel_collector/exporter/otlphttpexporter/otlp_test.go new file mode 100644 index 00000000000..f9bc3ce98df --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/otlp_test.go @@ -0,0 +1,416 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/collector/testutil" +) + +func TestInvalidConfig(t *testing.T) { + config := &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "", + }, + } + f := NewFactory() + params := component.ExporterCreateParams{Logger: zap.NewNop()} + _, err := f.CreateTracesExporter(context.Background(), params, config) + require.Error(t, err) + _, err = f.CreateMetricsExporter(context.Background(), params, config) + require.Error(t, err) + _, err = f.CreateLogsExporter(context.Background(), params, config) + require.Error(t, err) +} + +func TestTraceNoBackend(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + exp := startTraceExporter(t, "", fmt.Sprintf("http://%s/v1/traces", addr)) + td := testdata.GenerateTraceDataOneSpan() + assert.Error(t, exp.ConsumeTraces(context.Background(), td)) +} + +func TestTraceInvalidUrl(t *testing.T) { + exp := startTraceExporter(t, "http:/\\//this_is_an/*/invalid_url", "") + td := testdata.GenerateTraceDataOneSpan() + assert.Error(t, exp.ConsumeTraces(context.Background(), td)) + + exp = startTraceExporter(t, "", "http:/\\//this_is_an/*/invalid_url") + td = testdata.GenerateTraceDataOneSpan() + assert.Error(t, exp.ConsumeTraces(context.Background(), td)) +} + +func TestTraceError(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + + sink := new(consumertest.TracesSink) + sink.SetConsumeError(errors.New("my_error")) + startTraceReceiver(t, addr, sink) + exp := startTraceExporter(t, "", fmt.Sprintf("http://%s/v1/traces", addr)) + + td := testdata.GenerateTraceDataOneSpan() + assert.Error(t, exp.ConsumeTraces(context.Background(), td)) +} + +func TestTraceRoundTrip(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + + tests := []struct { + name string + baseURL string + overrideURL string + }{ + { + name: "wrongbase", + baseURL: "http://wronghostname", + overrideURL: fmt.Sprintf("http://%s/v1/traces", addr), + }, + { + name: "onlybase", + baseURL: fmt.Sprintf("http://%s", addr), + overrideURL: "", + }, + { + name: "override", + baseURL: "", + overrideURL: fmt.Sprintf("http://%s/v1/traces", addr), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sink := new(consumertest.TracesSink) + startTraceReceiver(t, addr, sink) + exp := startTraceExporter(t, test.baseURL, test.overrideURL) + + td := testdata.GenerateTraceDataOneSpan() + assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) + require.Eventually(t, func() bool { + return sink.SpansCount() > 0 + }, 1*time.Second, 10*time.Millisecond) + allTraces := sink.AllTraces() + require.Len(t, allTraces, 1) + assert.EqualValues(t, td, allTraces[0]) + }) + } +} + +func TestMetricsError(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + + sink := new(consumertest.MetricsSink) + sink.SetConsumeError(errors.New("my_error")) + startMetricsReceiver(t, addr, sink) + exp := startMetricsExporter(t, "", fmt.Sprintf("http://%s/v1/metrics", addr)) + + md := testdata.GenerateMetricsOneMetric() + assert.Error(t, exp.ConsumeMetrics(context.Background(), md)) +} + +func TestMetricsRoundTrip(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + + tests := []struct { + name string + baseURL string + overrideURL string + }{ + { + name: "wrongbase", + baseURL: "http://wronghostname", + overrideURL: fmt.Sprintf("http://%s/v1/metrics", addr), + }, + { + name: "onlybase", + baseURL: fmt.Sprintf("http://%s", addr), + overrideURL: "", + }, + { + name: "override", + baseURL: "", + overrideURL: fmt.Sprintf("http://%s/v1/metrics", addr), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sink := new(consumertest.MetricsSink) + startMetricsReceiver(t, addr, sink) + exp := startMetricsExporter(t, test.baseURL, test.overrideURL) + + md := testdata.GenerateMetricsOneMetric() + assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) + require.Eventually(t, func() bool { + return sink.MetricsCount() > 0 + }, 1*time.Second, 10*time.Millisecond) + allMetrics := sink.AllMetrics() + require.Len(t, allMetrics, 1) + assert.EqualValues(t, md, allMetrics[0]) + }) + } +} + +func TestLogsError(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + + sink := new(consumertest.LogsSink) + sink.SetConsumeError(errors.New("my_error")) + startLogsReceiver(t, addr, sink) + exp := startLogsExporter(t, "", fmt.Sprintf("http://%s/v1/logs", addr)) + + md := testdata.GenerateLogDataOneLog() + assert.Error(t, exp.ConsumeLogs(context.Background(), md)) +} + +func TestLogsRoundTrip(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + + tests := []struct { + name string + baseURL string + overrideURL string + }{ + { + name: "wrongbase", + baseURL: "http://wronghostname", + overrideURL: fmt.Sprintf("http://%s/v1/logs", addr), + }, + { + name: "onlybase", + baseURL: fmt.Sprintf("http://%s", addr), + overrideURL: "", + }, + { + name: "override", + baseURL: "", + overrideURL: fmt.Sprintf("http://%s/v1/logs", addr), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sink := new(consumertest.LogsSink) + startLogsReceiver(t, addr, sink) + exp := startLogsExporter(t, test.baseURL, test.overrideURL) + + md := testdata.GenerateLogDataOneLog() + assert.NoError(t, exp.ConsumeLogs(context.Background(), md)) + require.Eventually(t, func() bool { + return sink.LogRecordsCount() > 0 + }, 1*time.Second, 10*time.Millisecond) + allLogs := sink.AllLogs() + require.Len(t, allLogs, 1) + assert.EqualValues(t, md, allLogs[0]) + }) + } +} + +func startTraceExporter(t *testing.T, baseURL string, overrideURL string) component.TracesExporter { + factory := NewFactory() + cfg := createExporterConfig(baseURL, factory.CreateDefaultConfig()) + cfg.TracesEndpoint = overrideURL + exp, err := factory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + startAndCleanup(t, exp) + return exp +} + +func startMetricsExporter(t *testing.T, baseURL string, overrideURL string) component.MetricsExporter { + factory := NewFactory() + cfg := createExporterConfig(baseURL, factory.CreateDefaultConfig()) + cfg.MetricsEndpoint = overrideURL + exp, err := factory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + startAndCleanup(t, exp) + return exp +} + +func startLogsExporter(t *testing.T, baseURL string, overrideURL string) component.LogsExporter { + factory := NewFactory() + cfg := createExporterConfig(baseURL, factory.CreateDefaultConfig()) + cfg.LogsEndpoint = overrideURL + exp, err := factory.CreateLogsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + startAndCleanup(t, exp) + return exp +} + +func createExporterConfig(baseURL string, defaultCfg configmodels.Exporter) *Config { + cfg := defaultCfg.(*Config) + cfg.Endpoint = baseURL + cfg.QueueSettings.Enabled = false + cfg.RetrySettings.Enabled = false + return cfg +} + +func startTraceReceiver(t *testing.T, addr string, next consumer.TracesConsumer) { + factory := otlpreceiver.NewFactory() + cfg := createReceiverConfig(addr, factory.CreateDefaultConfig()) + recv, err := factory.CreateTracesReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, next) + require.NoError(t, err) + startAndCleanup(t, recv) +} + +func startMetricsReceiver(t *testing.T, addr string, next consumer.MetricsConsumer) { + factory := otlpreceiver.NewFactory() + cfg := createReceiverConfig(addr, factory.CreateDefaultConfig()) + recv, err := factory.CreateMetricsReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, next) + require.NoError(t, err) + startAndCleanup(t, recv) +} + +func startLogsReceiver(t *testing.T, addr string, next consumer.LogsConsumer) { + factory := otlpreceiver.NewFactory() + cfg := createReceiverConfig(addr, factory.CreateDefaultConfig()) + recv, err := factory.CreateLogsReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, next) + require.NoError(t, err) + startAndCleanup(t, recv) +} + +func createReceiverConfig(addr string, defaultCfg configmodels.Exporter) *otlpreceiver.Config { + cfg := defaultCfg.(*otlpreceiver.Config) + cfg.HTTP.Endpoint = addr + cfg.GRPC = nil + return cfg +} + +func startAndCleanup(t *testing.T, cmp component.Component) { + require.NoError(t, cmp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + require.NoError(t, cmp.Shutdown(context.Background())) + }) +} + +func TestErrorResponses(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + errMsgPrefix := fmt.Sprintf("error exporting items, request to http://%s/v1/traces responded with HTTP Status Code ", addr) + + tests := []struct { + name string + responseStatus int + responseBody *status.Status + err error + isPermErr bool + headers map[string]string + }{ + { + name: "400", + responseStatus: http.StatusBadRequest, + responseBody: status.New(codes.InvalidArgument, "Bad field"), + isPermErr: true, + }, + { + name: "404", + responseStatus: http.StatusNotFound, + err: fmt.Errorf(errMsgPrefix + "404"), + }, + { + name: "419", + responseStatus: http.StatusTooManyRequests, + responseBody: status.New(codes.InvalidArgument, "Quota exceeded"), + err: exporterhelper.NewThrottleRetry( + fmt.Errorf(errMsgPrefix+"429, Message=Quota exceeded, Details=[]"), + time.Duration(0)*time.Second), + }, + { + name: "503", + responseStatus: http.StatusServiceUnavailable, + responseBody: status.New(codes.InvalidArgument, "Server overloaded"), + err: exporterhelper.NewThrottleRetry( + fmt.Errorf(errMsgPrefix+"503, Message=Server overloaded, Details=[]"), + time.Duration(0)*time.Second), + }, + { + name: "503-Retry-After", + responseStatus: http.StatusServiceUnavailable, + responseBody: status.New(codes.InvalidArgument, "Server overloaded"), + headers: map[string]string{"Retry-After": "30"}, + err: exporterhelper.NewThrottleRetry( + fmt.Errorf(errMsgPrefix+"503, Message=Server overloaded, Details=[]"), + time.Duration(30)*time.Second), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/v1/traces", func(writer http.ResponseWriter, request *http.Request) { + for k, v := range test.headers { + writer.Header().Add(k, v) + } + writer.WriteHeader(test.responseStatus) + if test.responseBody != nil { + msg, err := proto.Marshal(test.responseBody.Proto()) + require.NoError(t, err) + writer.Write(msg) + } + }) + srv := http.Server{ + Addr: addr, + Handler: mux, + } + ln, err := net.Listen("tcp", addr) + require.NoError(t, err) + go func() { + _ = srv.Serve(ln) + }() + + cfg := &Config{ + TracesEndpoint: fmt.Sprintf("http://%s/v1/traces", addr), + // Create without QueueSettings and RetrySettings so that ConsumeTraces + // returns the errors that we want to check immediately. + } + exp, err := createTraceExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + + traces := pdata.NewTraces() + err = exp.ConsumeTraces(context.Background(), traces) + assert.Error(t, err) + + if test.isPermErr { + assert.True(t, consumererror.IsPermanent(err)) + } else { + assert.EqualValues(t, test.err, err) + } + + srv.Close() + }) + } +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/testdata/config.yaml b/internal/otel_collector/exporter/otlphttpexporter/testdata/config.yaml new file mode 100644 index 00000000000..bf6bb642445 --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/testdata/config.yaml @@ -0,0 +1,37 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + otlphttp: + otlphttp/2: + endpoint: "https://1.2.3.4:1234" + insecure: true + ca_file: /var/lib/mycert.pem + cert_file: certfile + key_file: keyfile + timeout: 10s + read_buffer_size: 123 + write_buffer_size: 345 + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + headers: + "can you have a . here?": "F0000000-0000-0000-0000-000000000000" + header1: 234 + another: "somevalue" + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [otlphttp] diff --git a/internal/otel_collector/exporter/otlphttpexporter/testdata/test_cert.pem b/internal/otel_collector/exporter/otlphttpexporter/testdata/test_cert.pem new file mode 100644 index 00000000000..b2e77b89d49 --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/testdata/test_cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE6jCCAtICCQDVU4PtqpqADTANBgkqhkiG9w0BAQsFADA3MQswCQYDVQQGEwJV +UzETMBEGA1UECAwKY2FsaWZvcm5pYTETMBEGA1UECgwKb3BlbmNlbnN1czAeFw0x +OTAzMDQxODA3MjZaFw0yMDAzMDMxODA3MjZaMDcxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIDApjYWxpZm9ybmlhMRMwEQYDVQQKDApvcGVuY2Vuc3VzMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAy9JQiAOMzArcdiS4szbTuzg5yYijSSY6SvGj +XMs4/LEFLxgGmFfyHXxoVQzV26lTu/AiUFlZi4JY2qlkZyPwmmmSg4fmzikpVPiC +Vv9pvSIojs8gs0sHaOt40Q8ym43bNt3Mh8rYrs+XMERi6Ol9//j4LnfePkNU5uEo +qC8KQamckaMR6UEHFNunyOwvNBsipgTPldQUPGVnCsNKk8olYGAXS7DR25bgbPli +4T9VCSElsSPAODmyo+2MEDagVXa1vVYxKyO2k6oeBS0lsvdRqRTmGggcg0B/dk+a +H1CL9ful0cu9P3dQif+hfGay8udPkwDLPEq1+WnjJFut3Pmbk3SqUCas5iWt76kK +eKFh4k8fCy4yiaZxzvSbm9+bEBHAl0ZXd8pjvAsBfCKe6G9SBzE1DK4FjWiiEGCb +5dGsyTKr33q3DekLvT3LF8ZeON/13d9toucX9PqG2HDwMP/Fb4WjQIzOc/H9wIak +pf7u6QBDGUiCMmoDrp1d8RsI1RPbEhoywH0YlLmwgf+cr1dU7vlISf576EsGxFz4 ++/sZjIBvZBHn/x0MH+bs4J8V3vMujfDoRdhL07bK7q/AkEALUxljKEfoWeqiuVzK +F9BVv3xNhiua2kgPVbMNWPrQ5uotkNp8IykJ3QOuQ3p5pzxdGfpLd6f8gmJDmcbi +AI9dWTcCAwEAATANBgkqhkiG9w0BAQsFAAOCAgEAVVi4t/Sumre+AGTaU7np9dl2 +tpllbES5ixe6m2uezt5wAzYNNyuQ2mMG2XrSkMy5gvBZRT9nRNSmLV8VEcxZihG0 +YHS5soXnLL3Jdlwxp98WTDPvM1ntxcHyEyqrrg9YDfKn4sOrr5vo2yZzoKwtxtc7 +lue9JormVx7GxMi7NwaUtCbnwAIcqJJpFjt1EhmJOxGqTJPgUvTBdeGvRj30c6fk +pqpUdPbZ7RKPEtbLoMoCBujKnErv+H0G6Vp9WyCHN+Mi9uTMsGwH14cmJjmfwGDC +8/WF4LdlawFnf/arIp9YcVwcP91d4ywyvbuuo2M7qdosQ7k4uRZ3tyggLYShS3RW +BMEhMRDz9dM0oKGF+HnaS824BIh6O6Hn82Vt8uCKS7IbEX99/kkN1KcqqQe6Lwjq +tG/lm4K5yf+FJVDivpZ9mYTvqTBjhTaOp6m3HYSNJfS0hLQVvEuBNXd8bHiXkcLp +rmFOYUWsjxV1Qku3U5Rner0UpB2Fuw9nJcXuDgWG0gjwzAZ83y3du1VIZp0Ad8Vv +IYpaucbImGJszMtNXn3l72K1wvQVIhm9eRwYc3QteJzweHaDsbytZEoS/GhTrZIT +wRe5ZGrjJBJngRANRSm1BH8j6PjLem9mzPb2eytwJJA0lLhUk4vYproVvXcx0vow +5F+5VB1YB8/tbWePmpo= +-----END CERTIFICATE----- diff --git a/internal/otel_collector/exporter/prometheusexporter/README.md b/internal/otel_collector/exporter/prometheusexporter/README.md new file mode 100644 index 00000000000..5f77e711825 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/README.md @@ -0,0 +1,31 @@ +# Prometheus Exporter + +Exports data to a [Prometheus](https://prometheus.io/) back-end. + +Supported pipeline types: metrics + +## Getting Started + +The following settings are required: + +- `endpoint` (no default): Where to send metric data + +The following settings can be optionally configured: + +- `constlabels` (no default): key/values that are applied for every exported metric. +- `namespace` (no default): if set, exports metrics under the provided value. +- `send_timestamps` (default = `false`): if true, sends the timestamp of the underlying + metric sample in the response. + +Example: + +```yaml +exporters: + prometheus: + endpoint: "1.2.3.4:1234" + namespace: test-space + const_labels: + label1: value1 + "another label": spaced value + send_timestamps: true +``` diff --git a/internal/otel_collector/exporter/prometheusexporter/config.go b/internal/otel_collector/exporter/prometheusexporter/config.go new file mode 100644 index 00000000000..e02185d35b5 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/config.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusexporter + +import ( + "github.com/prometheus/client_golang/prometheus" + + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for Prometheus exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + + // The address on which the Prometheus scrape handler will be run on. + Endpoint string `mapstructure:"endpoint"` + + // Namespace if set, exports metrics under the provided value. + Namespace string `mapstructure:"namespace"` + + // ConstLabels are values that are applied for every exported metric. + ConstLabels prometheus.Labels `mapstructure:"const_labels"` + + // SendTimestamps will send the underlying scrape timestamp with the export + SendTimestamps bool `mapstructure:"send_timestamps"` +} diff --git a/internal/otel_collector/exporter/prometheusexporter/config_test.go b/internal/otel_collector/exporter/prometheusexporter/config_test.go new file mode 100644 index 00000000000..db7ae836b41 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/config_test.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusexporter + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + e0 := cfg.Exporters["prometheus"] + assert.Equal(t, e0, factory.CreateDefaultConfig()) + + e1 := cfg.Exporters["prometheus/2"] + assert.Equal(t, e1, + &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "prometheus/2", + TypeVal: "prometheus", + }, + Endpoint: "1.2.3.4:1234", + Namespace: "test-space", + ConstLabels: map[string]string{ + "label1": "value1", + "another label": "spaced value", + }, + SendTimestamps: true, + }) +} diff --git a/internal/otel_collector/exporter/prometheusexporter/factory.go b/internal/otel_collector/exporter/prometheusexporter/factory.go new file mode 100644 index 00000000000..90f5c197c26 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/factory.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusexporter + +import ( + "context" + "net" + "net/http" + "strings" + + "github.com/orijtech/prometheus-go-metrics-exporter" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "prometheus" +) + +// NewFactory creates a factory for OTLP exporter. +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithMetrics(createMetricsExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + ConstLabels: map[string]string{}, + SendTimestamps: false, + } +} + +func createMetricsExporter( + _ context.Context, + _ component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.MetricsExporter, error) { + pcfg := cfg.(*Config) + + addr := strings.TrimSpace(pcfg.Endpoint) + if addr == "" { + return nil, errBlankPrometheusAddress + } + + opts := prometheus.Options{ + Namespace: pcfg.Namespace, + ConstLabels: pcfg.ConstLabels, + SendTimestamps: pcfg.SendTimestamps, + } + pe, err := prometheus.New(opts) + if err != nil { + return nil, err + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + + // The Prometheus metrics exporter has to run on the provided address + // as a server that'll be scraped by Prometheus. + mux := http.NewServeMux() + mux.Handle("/metrics", pe) + + srv := &http.Server{Handler: mux} + go func() { + _ = srv.Serve(ln) + }() + + pexp := &prometheusExporter{ + name: cfg.Name(), + exporter: pe, + shutdownFunc: ln.Close, + } + + return pexp, nil +} diff --git a/internal/otel_collector/exporter/prometheusexporter/factory_test.go b/internal/otel_collector/exporter/prometheusexporter/factory_test.go new file mode 100644 index 00000000000..22b2fb988fb --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/factory_test.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateMetricsExporter(t *testing.T) { + cfg := createDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Endpoint = "" + exp, err := createMetricsExporter( + context.Background(), + component.ExporterCreateParams{Logger: zap.NewNop()}, + cfg) + require.Equal(t, errBlankPrometheusAddress, err) + require.Nil(t, exp) +} diff --git a/internal/otel_collector/exporter/prometheusexporter/prometheus.go b/internal/otel_collector/exporter/prometheusexporter/prometheus.go new file mode 100644 index 00000000000..94e3a82bce9 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/prometheus.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusexporter + +import ( + "bytes" + "context" + "errors" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + // TODO: once this repository has been transferred to the + // official census-ecosystem location, update this import path. + "github.com/orijtech/prometheus-go-metrics-exporter" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/internaldata" +) + +var errBlankPrometheusAddress = errors.New("expecting a non-blank address to run the Prometheus metrics handler") + +type prometheusExporter struct { + name string + exporter *prometheus.Exporter + shutdownFunc func() error +} + +func (pe *prometheusExporter) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (pe *prometheusExporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + ocmds := internaldata.MetricsToOC(md) + for _, ocmd := range ocmds { + merged := make(map[string]*metricspb.Metric) + for _, metric := range ocmd.Metrics { + merge(merged, metric) + } + for _, metric := range merged { + _ = pe.exporter.ExportMetric(ctx, ocmd.Node, ocmd.Resource, metric) + } + } + return nil +} + +// The underlying exporter overwrites timeseries when there are conflicting metric signatures. +// Therefore, we need to merge timeseries that share a metric signature into a single metric before sending. +func merge(m map[string]*metricspb.Metric, metric *metricspb.Metric) { + key := metricSignature(metric) + current, ok := m[key] + if !ok { + m[key] = metric + return + } + current.Timeseries = append(current.Timeseries, metric.Timeseries...) +} + +// Unique identifier of a given promtheus metric +// Assumes label keys are always in the same order +func metricSignature(metric *metricspb.Metric) string { + var buf bytes.Buffer + buf.WriteString(metric.GetMetricDescriptor().GetName()) + labelKeys := metric.GetMetricDescriptor().GetLabelKeys() + for _, labelKey := range labelKeys { + buf.WriteString("-" + labelKey.Key) + } + return buf.String() +} + +// Shutdown stops the exporter and is invoked during shutdown. +func (pe *prometheusExporter) Shutdown(context.Context) error { + return pe.shutdownFunc() +} diff --git a/internal/otel_collector/exporter/prometheusexporter/prometheus_test.go b/internal/otel_collector/exporter/prometheusexporter/prometheus_test.go new file mode 100644 index 00000000000..5814c83e4f7 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/prometheus_test.go @@ -0,0 +1,248 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusexporter + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/translator/internaldata" +) + +func TestPrometheusExporter(t *testing.T) { + tests := []struct { + config *Config + wantErr string + }{ + { + config: &Config{ + Namespace: "test", + ConstLabels: map[string]string{ + "foo0": "bar0", + "code0": "one0", + }, + Endpoint: ":8999", + SendTimestamps: false, + }, + }, + { + config: &Config{}, + wantErr: "expecting a non-blank address to run the Prometheus metrics handler", + }, + } + + factory := NewFactory() + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + for _, tt := range tests { + // Run it a few times to ensure that shutdowns exit cleanly. + for j := 0; j < 3; j++ { + exp, err := factory.CreateMetricsExporter(context.Background(), creationParams, tt.config) + + if tt.wantErr != "" { + require.Error(t, err) + assert.Equal(t, tt.wantErr, err.Error()) + continue + } else { + require.NoError(t, err) + } + + assert.NotNil(t, exp) + require.Nil(t, err) + require.NoError(t, exp.Shutdown(context.Background())) + } + } +} + +func TestPrometheusExporter_endToEnd(t *testing.T) { + config := &Config{ + Namespace: "test", + ConstLabels: map[string]string{ + "foo1": "bar1", + "code1": "one1", + }, + Endpoint: ":7777", + } + + factory := NewFactory() + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + exp, err := factory.CreateMetricsExporter(context.Background(), creationParams, config) + assert.NoError(t, err) + + t.Cleanup(func() { + require.NoError(t, exp.Shutdown(context.Background())) + // trigger a get so that the server cleans up our keepalive socket + http.Get("http://localhost:7777/metrics") + }) + + assert.NotNil(t, exp) + + for delta := 0; delta <= 20; delta += 10 { + md := internaldata.OCToMetrics(consumerdata.MetricsData{Metrics: metricBuilder(int64(delta))}) + assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) + + res, err := http.Get("http://localhost:7777/metrics") + require.NoError(t, err, "Failed to perform a scrape") + + if g, w := res.StatusCode, 200; g != w { + t.Errorf("Mismatched HTTP response status code: Got: %d Want: %d", g, w) + } + blob, _ := ioutil.ReadAll(res.Body) + _ = res.Body.Close() + want := []string{ + `# HELP test_this_one_there_where_ Extra ones`, + `# TYPE test_this_one_there_where_ counter`, + fmt.Sprintf(`test_this_one_there_where_{arch="x86",code1="one1",foo1="bar1",os="windows"} %v`, 99+delta), + fmt.Sprintf(`test_this_one_there_where_{arch="x86",code1="one1",foo1="bar1",os="linux"} %v`, 100+delta), + } + + for _, w := range want { + if !strings.Contains(string(blob), w) { + t.Errorf("Missing %v from response:\n%v", w, string(blob)) + } + } + } +} + +func TestPrometheusExporter_endToEndWithTimestamps(t *testing.T) { + config := &Config{ + Namespace: "test", + ConstLabels: map[string]string{ + "foo2": "bar2", + "code2": "one2", + }, + Endpoint: ":7777", + SendTimestamps: true, + } + + factory := NewFactory() + creationParams := component.ExporterCreateParams{Logger: zap.NewNop()} + exp, err := factory.CreateMetricsExporter(context.Background(), creationParams, config) + assert.NoError(t, err) + + t.Cleanup(func() { + require.NoError(t, exp.Shutdown(context.Background())) + // trigger a get so that the server cleans up our keepalive socket + http.Get("http://localhost:7777/metrics") + }) + + assert.NotNil(t, exp) + + for delta := 0; delta <= 20; delta += 10 { + md := internaldata.OCToMetrics(consumerdata.MetricsData{Metrics: metricBuilder(int64(delta))}) + assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) + + res, err := http.Get("http://localhost:7777/metrics") + require.NoError(t, err, "Failed to perform a scrape") + + if g, w := res.StatusCode, 200; g != w { + t.Errorf("Mismatched HTTP response status code: Got: %d Want: %d", g, w) + } + blob, _ := ioutil.ReadAll(res.Body) + _ = res.Body.Close() + want := []string{ + `# HELP test_this_one_there_where_ Extra ones`, + `# TYPE test_this_one_there_where_ counter`, + fmt.Sprintf(`test_this_one_there_where_{arch="x86",code2="one2",foo2="bar2",os="windows"} %v %v`, 99+delta, 1543160298100), + fmt.Sprintf(`test_this_one_there_where_{arch="x86",code2="one2",foo2="bar2",os="linux"} %v %v`, 100+delta, 1543160298100), + } + + for _, w := range want { + if !strings.Contains(string(blob), w) { + t.Errorf("Missing %v from response:\n%v", w, string(blob)) + } + } + } +} + +func metricBuilder(delta int64) []*metricspb.Metric { + return []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "this/one/there(where)", + Description: "Extra ones", + Unit: "1", + Type: metricspb.MetricDescriptor_CUMULATIVE_INT64, + LabelKeys: []*metricspb.LabelKey{{Key: "os"}, {Key: "arch"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ×tamppb.Timestamp{ + Seconds: 1543160298, + Nanos: 100000090, + }, + LabelValues: []*metricspb.LabelValue{ + {Value: "windows", HasValue: true}, + {Value: "x86", HasValue: true}, + }, + Points: []*metricspb.Point{ + { + Timestamp: ×tamppb.Timestamp{ + Seconds: 1543160298, + Nanos: 100000997, + }, + Value: &metricspb.Point_Int64Value{ + Int64Value: 99 + delta, + }, + }, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "this/one/there(where)", + Description: "Extra ones", + Unit: "1", + Type: metricspb.MetricDescriptor_CUMULATIVE_INT64, + LabelKeys: []*metricspb.LabelKey{{Key: "os"}, {Key: "arch"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ×tamppb.Timestamp{ + Seconds: 1543160298, + Nanos: 100000090, + }, + LabelValues: []*metricspb.LabelValue{ + {Value: "linux", HasValue: true}, + {Value: "x86", HasValue: true}, + }, + Points: []*metricspb.Point{ + { + Timestamp: ×tamppb.Timestamp{ + Seconds: 1543160298, + Nanos: 100000997, + }, + Value: &metricspb.Point_Int64Value{ + Int64Value: 100 + delta, + }, + }, + }, + }, + }, + }, + } +} diff --git a/internal/otel_collector/exporter/prometheusexporter/testdata/config.yaml b/internal/otel_collector/exporter/prometheusexporter/testdata/config.yaml new file mode 100644 index 00000000000..8908e9e5a20 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/testdata/config.yaml @@ -0,0 +1,22 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + prometheus: + prometheus/2: + endpoint: "1.2.3.4:1234" + namespace: test-space + const_labels: + label1: value1 + "another label": spaced value + send_timestamps: true + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [prometheus] diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/DESIGN.md b/internal/otel_collector/exporter/prometheusremotewriteexporter/DESIGN.md new file mode 100644 index 00000000000..67e8ee46166 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/DESIGN.md @@ -0,0 +1,288 @@ + + +# **OpenTelemetry Collector Prometheus Remote Write/Cortex Exporter Design** + +Authors: @huyan0, @danielbang907 + +Date: July 30, 2020 + +## **1. Introduction** + +Prometheus can be integrated with remote storage systems that supports its remote write API. Existing remote storage integration support is included in [Cortex](https://cortexmetrics.io/docs/api/), [influxDB](https://docs.influxdata.com/influxdb/v1.8/supported_protocols/prometheus/), and many [others](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). + +The following diagram shows an example of Prometheus remote write API usage, with Cortex,n open source, horizontally scalable, highly available, multi-tenant, long term storage, as a remote storage backend. + +![Cortex Archietecture](./img/cortex.png) + +Our project is focused on developing an exporter for the OpenTelemetry Collector to any Prometheus remote storage backend. + +### **1.1 Remote Write API** + +The Prometheus remote write/Cortex exporter should write metrics to a remote URL in a snappy-compressed, [protocol buffer](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto#L22) encoded HTTP request defined by the Prometheus remote write API. Each request should encode multiple Prometheus remote write TimeSeries, which are composed of a set of labels and a collection of samples. Each label contains a name-value pair of strings, and each sample contains a timestamp-value number pair. + +![Image of TimeSeries](./img/timeseries.png) + +TimeSeries stores its metric name in its labels and does not describe metric types or start timestamps. To convert to TimeSeries data, buckets of a Histogram are broken down into individual TimeSeries with a bound label(`le`), and a similar process happens with quantiles in a Summary. + + +More details of Prometheus remote write API can be found in Prometheus [documentation](https://prometheus.io/docs/prometheus/latest/storage/#overview) and Cortex [documentation](https://cortexmetrics.io/docs/api/). + +### **1.2 Gaps and Assumptions** + +**Gap 1:** +Currently, metrics from the OpenTelemetry SDKs cannot be exported to Prometheus from the collector correctly ([#1255](https://github.com/open-telemetry/opentelemetry-collector/issues/1255)). This is because the SDKs send metrics to the collector via their OTLP exporter, which exports the delta value of cumulative counters. The same issue will arise for exporting to any Prometheus remote storage backend. + +To overcome this gap in the Collector pipeline, we had proposed 2 different solutions: + +1. Add a [metric aggregation processor](https://github.com/open-telemetry/opentelemetry-collector/issues/1422) to the collector pipeline to aggregate delta values into cumulative values for cumulative backends. This solution requires users to set up a collector agent next to each SDK to make sure delta values are aggregated correctly. +2. Require the OTLP exporters in SDKs to [send cumulative values for cumulative metric types to the Collector by default](https://github.com/open-telemetry/opentelemetry-specification/issues/731). Therefore, no aggregation of delta metric values is required in the Collector pipeline for Prometheus/storage backends to properly process the data. + +**Gap 2:** +Another gap is that OTLP metric definition is still in development. This exporter will require refactoring as OTLP changes in the future. + +**Assumptions:** +Because of the gaps mentioned above, this project will convert from the current OTLP metrics and work under the assumption one of the above solutions will be implemented, and all incoming monotonic scalars/histogram/summary metrics should be cumulative or otherwise dropped. More details on the behavior of the exporter is in section 2.2. + +## **2. Prometheus Remote Write/Cortex Exporter** + +The Prometheus remote write/Cortex exporter should receive OTLP metrics, group data points by metric name and label set, convert each group to a TimeSeries, and send all TimeSeries to a storage backend via HTTP. + +### **2.1 Receiving Metrics** +The Prometheus remote write/Cortex exporter receives a MetricsData instance in its PushMetrics() function. MetricsData contains a collection of Metric instances. Each Metric instance contains a series of data points, and each data point has a set of labels associated with it. Since Prometheus remote write TimeSeries are identified by unique sets of labels, the exporter needs to group data points within each Metric instance by their label set, and convert each group to a TimeSeries. + +To group data points by label set, the exporter should create a map with each PushMetrics() call. The key of the map should represent a combination of the following information: + +* the metric type +* the metric name +* the set of labels that identify a unique TimeSeries + + +The exporter should create a signature string as map key by concatenating metric type, metric name, and label names and label values at each data point. To ensure correctness, the label set at each data point should be sorted by label key before generating the signature string. + +An alternative key type is in the exiting label.Set implementation from the OpenTelemetry Go API. It provides a Distinct type that guarantees the result will equal the equivalent Distinct value of any label set with the same elements as this, where sets are made unique by choosing the last value in the input for any given key. If we allocate a Go API's kv.KeyValue for every label of a data point, then a label.Set from the API can be created, and its Distinct value can be used as map key. + + +The value of the map should be Prometheus TimeSeries, and each data point’s value and timestamp should be inserted to its corresponding TimeSeries in the map as a Sample, each metric’s label set and metric name should be combined and translated to a Prometheus label set; a new TimeSeries should be created if the string signature is not in the map. + + +Pseudocode: + + func PushMetrics(metricsData) { + + // Create a map that stores distinct TimeSeries + map := make(map[String][]TimeSeries) + + for metric in metricsData: + for point in metric: + // Generate signature string + sig := pointSignature(metric, point) + + // Find corresponding TimeSeries in map + // Add to TimeSeries + + // Sends TimeSeries to backend + export(map) + } + +### **2.2 Mapping of OTLP Metrics to TimeSeries** + +Each Prometheus remote write TimeSeries represents less semantic information than an OTLP metric. The temporality property of a OTLP metric is ignored in a TimeSeries because it is always considered as cumulative for monotonic types and histogram, and the type property of a OTLP metric is translated by mapping each metric to one or multiple TimeSeries. The following sections explain how to map each OTLP metric type to Prometheus remote write TimeSeries. + + +**INT64, MONOTONIC_INT64, DOUBLE, MONOTONIC_DOUBLE** + +Each unique label set within metrics of these types can be converted to exactly one TimeSeries. From the perspective of Prometheus client types, INT64 and DOUBLE correspond to gauge metrics, and MONOTONIC types correspond to counter metrics. In both cases, data points will be exported directly without aggregation. Any metric of the monotonic types that is not cumulative should be dropped; non-monotonic scalar types are assumed to represent gauge values, thus its temporality is not checked. Monotonic types need to have a `_total` suffix in its metric name when exporting; this is a requirement of Prometheus. + + +**HISTOGRAM** + +Each histogram data point can be converted to 2 + n + 1 Prometheus remote write TimeSeries: + +* 1 *TimeSeries* representing metric_name_count contains HistogramDataPoint.count +* 1 *TimeSeries* representing metric_name_sum contains HistogramDataPoint.sum +* n *TimeSeries* each representing metric_name_bucket{le=“upperbound”} contain the count of each bucket defined by the bounds of the data point +* 1 *TimeSeries* representing metric_name_bucket{le=“+Inf”} contains counts for the bucket with infinity as upper bound; its value is equivalent to metric_name_count. + +Prometheus bucket values are cumulative, meaning the count of each bucket should contain counts from buckets with lower bounds. In addition, Exemplars from a histogram data point are ignored. When adding a bucket of the histogram data point to the map, the string signature should also contain a `le` label that indicates the bound value. This label should also be exported. Any histogram metric that is not cumulative should be dropped. + + +**SUMMARY** + +Each summary data point can be converted to 2 + n Prometheus remote write TimeSeries: + +* 1 *TimeSeries* representing metric_name_count contains SummaryDataPoint.count +* 1 *TimeSeries* representing metric_name_sum contains SummaryDataPoint.sum +* and n *TimeSeries* each representing metric_name{quantile=“quantileValue”} contains the value of each quantile in the data point. + +When adding a quantile of the summary data point to the map, the string signature should also contain a `quantile ` label that indicates the quantile value. This label should also be exported. Any summary metric that is not cumulative should be dropped. + +### **2.3 Exporting Metrics** + +The Prometheus remote write/Cortex exporter should call proto.Marshal() to convert multiple TimeSeries to a byte array. Then, the exporter should send the byte array to Prometheus remote storage in a HTTP request. + + +Authentication credentials should be added to each request before sending to the backend. Basic auth and bearer token headers can be added using Golang http.Client’s default configuration options. Other authentication headers can be added by implementing a client interceptor. + + +Pseudocode: + + + func export(*map) error { + // Stores timeseries + arr := make([]TimeSeries) + + for timeseries in map: + arr = append(arr, timeseries) + + // Converts arr to WriteRequest + request := proto.Marshal(arr) + + // Sends HTTP request to endpoint + } + +## **3. Other Components** + +### **3.1 Config Struct** + +This struct is based on an inputted YAML file at the beginning of the pipeline and defines the configurations for an Exporter build. Examples of configuration parameters are HTTP endpoint, compression type, backend program, etc. + + +Converting YAML to a Go struct is done by the Collector, using [_the Viper package_](https://github.com/spf13/viper), which is an open-source library that seamlessly converts inputted YAML files into a usable, appropriate Config struct. + + +An example of the exporter section of the Collector config.yml YAML file can be seen below: + + ... + + exporters: + prometheus_remote_write: + http_endpoint: + # Prefix to metric name + namespace: + # Labels to add to each TimeSeries + const_labels: + [label: ] + # Allow users to add any header; only required headers listed here + headers: + [X-Prometheus-Remote-Write-Version:] + [Tenant-id:] + request_timeout: + + # ************************************************************************ + # below are configurations copied from Prometheus remote write config + # ************************************************************************ + # Sets the `Authorization` header on every remote write request with the + # configured username and password. + # password and password_file are mutually exclusive. + basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + + # Sets the `Authorization` header on every remote write request with + # the configured bearer token. It is mutually exclusive with `bearer_token_file`. + [ bearer_token: ] + + # Sets the `Authorization` header on every remote write request with the bearer token + # read from the configured file. It is mutually exclusive with `bearer_token`. + [ bearer_token_file: /path/to/bearer/token/file ] + + # Configures the remote write request's TLS settings. + tls_config: + # CA certificate to validate API server certificate with. + [ ca_file: ] + + # Certificate and key files for client cert authentication to the server. + [ cert_file: ] + [ key_file: ] + + # ServerName extension to indicate the name of the server. + # https://tools.ietf.org/html/rfc4366#section-3.1 + [ server_name: ] + + # Disable validation of the server certificate. + [ insecure_skip_verify: ] + + ... + +### **3.2 Factory Struct** + +This struct implements the ExporterFactory interface, and is used during collector’s pipeline initialization to create the Exporter instances as defined by the Config struct. The `exporterhelper` package will be used to create the exporter and the factory. + + +Our Factory type will look very similar to other exporters’ factory implementation. For our implementation, our Factory instance will implement three methods + + +**Methods** + + NewFactory +This method will use the NewFactory method within the `exporterhelper` package to create a instance of the factory. + + createDefaultConfig + +This method creates the default configuration for Prometheus remote write/Cortex exporter. + + + createMetricsExporter + +This method constructs a new http.Client with interceptors that add headers to any request it sends. Then, this method initializes a new Prometheus remote write exporter/Cortex exporter with the http.Client. This method constructs a collector Prometheus remote write/Cortex exporter exporter with the created SDK exporter + + + +## **4. Other Considerations** + +### **4.1 Concurrency** + +The Prometheus remote write/Cortex exporter should be thread-safe; In this design, the only resource shared across goroutines is the http.Client from the Golang library. It is thread-safe, thus, our code is thread-safe. + +### **4.2 Shutdown Behavior** + +Once the shutdown() function is called, the exporter should stop accepting incoming calls(return error), and wait for current operations to finish before returning. This can be done by using a stop channel and a wait group. + + func Shutdown () { + close(stopChan) + waitGroup.Wait() + } + + func PushMetrics() { + select: + case <- stopCh + return error + default: + waitGroup.Add(1) + defer waitGroup.Done() + // export metrics + ... + } + +### **4.3 Timeout Behavior** + +Users should be able to pass in a time for the each http request as part of the Configuration. The factory should read the configuration file and set the timeout field of the http.Client + + func (f *Factory) CreateNewExporter (config) { + ... + client := &http.Client{ + Timeout config.requestTimeout + } + ... + } + +### **4.4 Error Behavior** + +The PushMetricsData() function should return the number of dropped metrics. Any monotonic and histogram metrics that are not cumulative should be dropped. This can be done by checking the temporality of each received metric. Any error should be returned to the caller, and the error message should be descriptive. + + + +### **4.5 Test Strategy** + +We will follow test-driven development practices while completing this project. We’ll write unit tests before implementing production code. Tests will cover normal and abnormal inputs and test for edge cases. We will provide end-to-end tests using mock backend/client. Our target is to get 90% or more of code coverage. + + + +## **Request for Feedback** +We'd like to get some feedback on whether we made the appropriate assumptions in [this](#1.2-gaps-and-ssumptions) section, and appreciate more comments, updates , and suggestions on the topic. + +Please let us know if there are any revisions, technical or informational, necessary for this document. Thank you! + + + diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/README.md b/internal/otel_collector/exporter/prometheusremotewriteexporter/README.md new file mode 100644 index 00000000000..28e487f2f12 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/README.md @@ -0,0 +1,54 @@ +# Prometheus Remote Write Exporter + +This exporter sends data in Prometheus TimeSeries format to Cortex or any +Prometheus [remote write compatible +backend](https://prometheus.io/docs/operating/integrations/). +By default, this exporter requires TLS and offers queued retry capabilities. + +:warning: Non-cumulative monotonic, histogram, and summary OTLP metrics are +dropped by this exporter. + +_Here is a link to the overall project [design](./DESIGN.md)_ + +Supported pipeline types: metrics + +## Getting Started + +The following settings are required: + +- `endpoint` (no default): protocol:host:port to which the exporter is going to send data. + +By default, TLS is enabled: + +- `insecure` (default = `false`): whether to enable client transport security for + the exporter's connection. + +As a result, the following parameters are also required: + +- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` (no default): path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +The following settings can be optionally configured: + +- `external_labels`: list of labels to be attached to each metric data point +- `headers`: additional headers attached to each HTTP request. + - *Note the following headers cannot be changed: `Content-Encoding`, `Content-Type`, `X-Prometheus-Remote-Write-Version`, and `User-Agent`.* +- `namespace`: prefix attached to each exported metric name. + +Example: + +```yaml +exporters: + prometheusremotewrite: + endpoint: "http://some.url:9411/api/prom/push" +``` + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/confighttp/README.md) +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/exporterhelper/README.md) diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/config.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/config.go new file mode 100644 index 00000000000..ea265be0ded --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/config.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewriteexporter + +import ( + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for Remote Write exporter. +type Config struct { + // squash ensures fields are correctly decoded in embedded struct. + configmodels.ExporterSettings `mapstructure:",squash"` + exporterhelper.TimeoutSettings `mapstructure:",squash"` + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + // prefix attached to each exported metric name + // See: https://prometheus.io/docs/practices/naming/#metric-names + Namespace string `mapstructure:"namespace"` + + // ExternalLabels defines a map of label keys and values that are allowed to start with reserved prefix "__" + ExternalLabels map[string]string `mapstructure:"external_labels"` + + HTTPClientSettings confighttp.HTTPClientSettings `mapstructure:",squash"` +} diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/config_test.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/config_test.go new file mode 100644 index 00000000000..cfc7a9e638c --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/config_test.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewriteexporter + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// TestLoadConfig checks whether yaml configuration can be loaded correctly +func Test_loadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + // From the default configurations -- checks if a correct exporter is instantiated + e0 := cfg.Exporters["prometheusremotewrite"] + assert.Equal(t, e0, factory.CreateDefaultConfig()) + + // checks if the correct Config struct can be instantiated from testdata/config.yaml + e1 := cfg.Exporters["prometheusremotewrite/2"] + assert.Equal(t, e1, + &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "prometheusremotewrite/2", + TypeVal: "prometheusremotewrite", + }, + TimeoutSettings: exporterhelper.DefaultTimeoutSettings(), + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 10 * time.Second, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + }, + Namespace: "test-space", + ExternalLabels: map[string]string{"key1": "value1", "key2": "value2"}, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "localhost:8888", + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/var/lib/mycert.pem", // This is subject to change, but currently I have no idea what else to put here lol + }, + Insecure: false, + }, + ReadBufferSize: 0, + WriteBufferSize: 512 * 1024, + Timeout: 5 * time.Second, + Headers: map[string]string{ + "prometheus-remote-write-version": "0.1.0", + "x-scope-orgid": "234"}, + }, + }) +} diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter.go new file mode 100644 index 00000000000..e56143c9f40 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter.go @@ -0,0 +1,345 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheusremotewriteexporter implements an exporter that sends Prometheus remote write requests. +package prometheusremotewriteexporter + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/prometheus/prompb" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + otlp "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" + "go.opentelemetry.io/collector/internal/version" +) + +const ( + maxConcurrentRequests = 5 + maxBatchByteSize = 3000000 +) + +// PrwExporter converts OTLP metrics to Prometheus remote write TimeSeries and sends them to a remote endpoint. +type PrwExporter struct { + namespace string + externalLabels map[string]string + endpointURL *url.URL + client *http.Client + wg *sync.WaitGroup + closeChan chan struct{} +} + +// NewPrwExporter initializes a new PrwExporter instance and sets fields accordingly. +// client parameter cannot be nil. +func NewPrwExporter(namespace string, endpoint string, client *http.Client, externalLabels map[string]string) (*PrwExporter, error) { + if client == nil { + return nil, errors.New("http client cannot be nil") + } + + sanitizedLabels, err := validateAndSanitizeExternalLabels(externalLabels) + if err != nil { + return nil, err + } + + endpointURL, err := url.ParseRequestURI(endpoint) + if err != nil { + return nil, errors.New("invalid endpoint") + } + + return &PrwExporter{ + namespace: namespace, + externalLabels: sanitizedLabels, + endpointURL: endpointURL, + client: client, + wg: new(sync.WaitGroup), + closeChan: make(chan struct{}), + }, nil +} + +// Shutdown stops the exporter from accepting incoming calls(and return error), and wait for current export operations +// to finish before returning +func (prwe *PrwExporter) Shutdown(context.Context) error { + close(prwe.closeChan) + prwe.wg.Wait() + return nil +} + +// PushMetrics converts metrics to Prometheus remote write TimeSeries and send to remote endpoint. It maintain a map of +// TimeSeries, validates and handles each individual metric, adding the converted TimeSeries to the map, and finally +// exports the map. +func (prwe *PrwExporter) PushMetrics(ctx context.Context, md pdata.Metrics) (int, error) { + prwe.wg.Add(1) + defer prwe.wg.Done() + + select { + case <-prwe.closeChan: + return md.MetricCount(), errors.New("shutdown has been called") + default: + tsMap := map[string]*prompb.TimeSeries{} + dropped := 0 + var errs []error + resourceMetrics := pdata.MetricsToOtlp(md) + for _, resourceMetric := range resourceMetrics { + if resourceMetric == nil { + continue + } + // TODO: add resource attributes as labels, probably in next PR + for _, instrumentationMetrics := range resourceMetric.InstrumentationLibraryMetrics { + if instrumentationMetrics == nil { + continue + } + // TODO: decide if instrumentation library information should be exported as labels + for _, metric := range instrumentationMetrics.Metrics { + if metric == nil { + dropped++ + continue + } + // check for valid type and temporality combination and for matching data field and type + if ok := validateMetrics(metric); !ok { + dropped++ + errs = append(errs, consumererror.Permanent(errors.New("invalid temporality and type combination"))) + continue + } + // handle individual metric based on type + switch metric.Data.(type) { + case *otlp.Metric_DoubleSum, *otlp.Metric_IntSum, *otlp.Metric_DoubleGauge, *otlp.Metric_IntGauge: + if err := prwe.handleScalarMetric(tsMap, metric); err != nil { + dropped++ + errs = append(errs, consumererror.Permanent(err)) + } + case *otlp.Metric_DoubleHistogram, *otlp.Metric_IntHistogram: + if err := prwe.handleHistogramMetric(tsMap, metric); err != nil { + dropped++ + errs = append(errs, consumererror.Permanent(err)) + } + case *otlp.Metric_DoubleSummary: + if err := prwe.handleSummaryMetric(tsMap, metric); err != nil { + dropped++ + errs = append(errs, consumererror.Permanent(err)) + } + default: + dropped++ + errs = append(errs, consumererror.Permanent(errors.New("unsupported metric type"))) + } + } + } + } + + if exportErrors := prwe.export(ctx, tsMap); len(exportErrors) != 0 { + dropped = md.MetricCount() + errs = append(errs, exportErrors...) + } + + if dropped != 0 { + return dropped, componenterror.CombineErrors(errs) + } + + return 0, nil + } +} + +func validateAndSanitizeExternalLabels(externalLabels map[string]string) (map[string]string, error) { + sanitizedLabels := make(map[string]string) + for key, value := range externalLabels { + if key == "" || value == "" { + return nil, fmt.Errorf("prometheus remote write: external labels configuration contains an empty key or value") + } + + // Sanitize label keys to meet Prometheus Requirements + if len(key) > 2 && key[:2] == "__" { + key = "__" + sanitize(key[2:]) + } else { + key = sanitize(key) + } + sanitizedLabels[key] = value + } + + return sanitizedLabels, nil +} + +// handleScalarMetric processes data points in a single OTLP scalar metric by adding the each point as a Sample into +// its corresponding TimeSeries in tsMap. +// tsMap and metric cannot be nil, and metric must have a non-nil descriptor +func (prwe *PrwExporter) handleScalarMetric(tsMap map[string]*prompb.TimeSeries, metric *otlp.Metric) error { + switch metric.Data.(type) { + // int points + case *otlp.Metric_DoubleGauge: + if metric.GetDoubleGauge().GetDataPoints() == nil { + return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + } + for _, pt := range metric.GetDoubleGauge().GetDataPoints() { + addSingleDoubleDataPoint(pt, metric, prwe.namespace, tsMap, prwe.externalLabels) + } + case *otlp.Metric_IntGauge: + if metric.GetIntGauge().GetDataPoints() == nil { + return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + } + for _, pt := range metric.GetIntGauge().GetDataPoints() { + addSingleIntDataPoint(pt, metric, prwe.namespace, tsMap, prwe.externalLabels) + } + case *otlp.Metric_DoubleSum: + if metric.GetDoubleSum().GetDataPoints() == nil { + return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + } + for _, pt := range metric.GetDoubleSum().GetDataPoints() { + addSingleDoubleDataPoint(pt, metric, prwe.namespace, tsMap, prwe.externalLabels) + } + case *otlp.Metric_IntSum: + if metric.GetIntSum().GetDataPoints() == nil { + return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + } + for _, pt := range metric.GetIntSum().GetDataPoints() { + addSingleIntDataPoint(pt, metric, prwe.namespace, tsMap, prwe.externalLabels) + } + } + return nil +} + +// handleHistogramMetric processes data points in a single OTLP histogram metric by mapping the sum, count and each +// bucket of every data point as a Sample, and adding each Sample to its corresponding TimeSeries. +// tsMap and metric cannot be nil. +func (prwe *PrwExporter) handleHistogramMetric(tsMap map[string]*prompb.TimeSeries, metric *otlp.Metric) error { + switch metric.Data.(type) { + case *otlp.Metric_IntHistogram: + if metric.GetIntHistogram().GetDataPoints() == nil { + return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + } + for _, pt := range metric.GetIntHistogram().GetDataPoints() { + addSingleIntHistogramDataPoint(pt, metric, prwe.namespace, tsMap, prwe.externalLabels) + } + case *otlp.Metric_DoubleHistogram: + if metric.GetDoubleHistogram().GetDataPoints() == nil { + return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + } + for _, pt := range metric.GetDoubleHistogram().GetDataPoints() { + addSingleDoubleHistogramDataPoint(pt, metric, prwe.namespace, tsMap, prwe.externalLabels) + } + } + return nil +} + +// handleSummaryMetric processes data points in a single OTLP summary metric by mapping the sum, count and each +// quantile of every data point as a Sample, and adding each Sample to its corresponding TimeSeries. +// tsMap and metric cannot be nil. +func (prwe *PrwExporter) handleSummaryMetric(tsMap map[string]*prompb.TimeSeries, metric *otlp.Metric) error { + if metric.GetDoubleSummary().GetDataPoints() == nil { + return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + } + for _, pt := range metric.GetDoubleSummary().GetDataPoints() { + addSingleDoubleSummaryDataPoint(pt, metric, prwe.namespace, tsMap, prwe.externalLabels) + } + return nil +} + +// export sends a Snappy-compressed WriteRequest containing TimeSeries to a remote write endpoint in order +func (prwe *PrwExporter) export(ctx context.Context, tsMap map[string]*prompb.TimeSeries) []error { + var errs []error + // Calls the helper function to convert and batch the TsMap to the desired format + requests, err := batchTimeSeries(tsMap, maxBatchByteSize) + if err != nil { + errs = append(errs, consumererror.Permanent(err)) + return errs + } + + input := make(chan *prompb.WriteRequest, len(requests)) + for _, request := range requests { + input <- request + } + close(input) + + var mu sync.Mutex + var wg sync.WaitGroup + + concurrencyLimit := int(math.Min(maxConcurrentRequests, float64(len(requests)))) + wg.Add(concurrencyLimit) // used to wait for workers to be finished + + // Run concurrencyLimit of workers until there + // is no more requests to execute in the input channel. + for i := 0; i < concurrencyLimit; i++ { + go func() { + defer wg.Done() + + for request := range input { + err := prwe.execute(ctx, request) + if err != nil { + mu.Lock() + errs = append(errs, err) + mu.Unlock() + } + } + }() + } + wg.Wait() + + return errs +} + +func (prwe *PrwExporter) execute(ctx context.Context, writeReq *prompb.WriteRequest) error { + // Uses proto.Marshal to convert the WriteRequest into bytes array + data, err := proto.Marshal(writeReq) + if err != nil { + return consumererror.Permanent(err) + } + buf := make([]byte, len(data), cap(data)) + compressedData := snappy.Encode(buf, data) + + // Create the HTTP POST request to send to the endpoint + req, err := http.NewRequestWithContext(ctx, "POST", prwe.endpointURL.String(), bytes.NewReader(compressedData)) + if err != nil { + return consumererror.Permanent(err) + } + + // Add necessary headers specified by: + // https://cortexmetrics.io/docs/apis/#remote-api + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", "application/x-protobuf") + req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + req.Header.Set("User-Agent", "OpenTelemetry-Collector/"+version.Version) + + resp, err := prwe.client.Do(req) + if err != nil { + return consumererror.Permanent(err) + } + + // 2xx status code is considered a success + // 5xx errors are recoverable and the exporter should retry + // Reference for different behavior according to status code: + // https://github.com/prometheus/prometheus/pull/2552/files#diff-ae8db9d16d8057358e49d694522e7186 + if resp.StatusCode/100 != 2 { + scanner := bufio.NewScanner(io.LimitReader(resp.Body, 256)) + var line string + if scanner.Scan() { + line = scanner.Text() + } + err := fmt.Errorf("server returned HTTP status %v: %v ", resp.Status, line) + if resp.StatusCode >= 500 && resp.StatusCode < 600 { + return err + } + return consumererror.Permanent(err) + } + return nil +} diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter_test.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter_test.go new file mode 100644 index 00000000000..2333f40eda3 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter_test.go @@ -0,0 +1,751 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewriteexporter + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "sync" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/exporterhelper" + otlp "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/internal/version" +) + +// Test_ NewPrwExporter checks that a new exporter instance with non-nil fields is initialized +func Test_NewPrwExporter(t *testing.T) { + config := &Config{ + ExporterSettings: configmodels.ExporterSettings{}, + TimeoutSettings: exporterhelper.TimeoutSettings{}, + QueueSettings: exporterhelper.QueueSettings{}, + RetrySettings: exporterhelper.RetrySettings{}, + Namespace: "", + ExternalLabels: map[string]string{}, + HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: ""}, + } + tests := []struct { + name string + config *Config + namespace string + endpoint string + externalLabels map[string]string + client *http.Client + returnError bool + }{ + { + "invalid_URL", + config, + "test", + "invalid URL", + map[string]string{"Key1": "Val1"}, + http.DefaultClient, + true, + }, + { + "nil_client", + config, + "test", + "http://some.url:9411/api/prom/push", + map[string]string{"Key1": "Val1"}, + nil, + true, + }, + { + "invalid_labels_case", + config, + "test", + "http://some.url:9411/api/prom/push", + map[string]string{"Key1": ""}, + http.DefaultClient, + true, + }, + { + "success_case", + config, + "test", + "http://some.url:9411/api/prom/push", + map[string]string{"Key1": "Val1"}, + http.DefaultClient, + false, + }, + { + "success_case_no_labels", + config, + "test", + "http://some.url:9411/api/prom/push", + map[string]string{}, + http.DefaultClient, + false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prwe, err := NewPrwExporter(tt.namespace, tt.endpoint, tt.client, tt.externalLabels) + if tt.returnError { + assert.Error(t, err) + return + } + require.NotNil(t, prwe) + assert.NotNil(t, prwe.namespace) + assert.NotNil(t, prwe.endpointURL) + assert.NotNil(t, prwe.externalLabels) + assert.NotNil(t, prwe.client) + assert.NotNil(t, prwe.closeChan) + assert.NotNil(t, prwe.wg) + }) + } +} + +// Test_Shutdown checks after Shutdown is called, incoming calls to PushMetrics return error. +func Test_Shutdown(t *testing.T) { + prwe := &PrwExporter{ + wg: new(sync.WaitGroup), + closeChan: make(chan struct{}), + } + wg := new(sync.WaitGroup) + errChan := make(chan error, 5) + err := prwe.Shutdown(context.Background()) + require.NoError(t, err) + errChan = make(chan error, 5) + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _, ok := prwe.PushMetrics(context.Background(), testdata.GenerateMetricsEmpty()) + errChan <- ok + }() + } + wg.Wait() + close(errChan) + for ok := range errChan { + assert.Error(t, ok) + } +} + +// Test whether or not the Server receives the correct TimeSeries. +// Currently considering making this test an iterative for loop of multiple TimeSeries much akin to Test_PushMetrics +func Test_export(t *testing.T) { + // First we will instantiate a dummy TimeSeries instance to pass into both the export call and compare the http request + labels := getPromLabels(label11, value11, label12, value12, label21, value21, label22, value22) + sample1 := getSample(floatVal1, msTime1) + sample2 := getSample(floatVal2, msTime2) + ts1 := getTimeSeries(labels, sample1, sample2) + handleFunc := func(w http.ResponseWriter, r *http.Request, code int) { + // The following is a handler function that reads the sent httpRequest, unmarshals, and checks if the WriteRequest + // preserves the TimeSeries data correctly + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, body) + // Receives the http requests and unzip, unmarshals, and extracts TimeSeries + assert.Equal(t, "0.1.0", r.Header.Get("X-Prometheus-Remote-Write-Version")) + assert.Equal(t, "snappy", r.Header.Get("Content-Encoding")) + assert.Equal(t, "OpenTelemetry-Collector/"+version.Version, r.Header.Get("User-Agent")) + writeReq := &prompb.WriteRequest{} + unzipped := []byte{} + + dest, err := snappy.Decode(unzipped, body) + require.NoError(t, err) + + ok := proto.Unmarshal(dest, writeReq) + require.NoError(t, ok) + + assert.EqualValues(t, 1, len(writeReq.Timeseries)) + require.NotNil(t, writeReq.GetTimeseries()) + assert.Equal(t, *ts1, writeReq.GetTimeseries()[0]) + w.WriteHeader(code) + } + + // Create in test table format to check if different HTTP response codes or server errors + // are properly identified + tests := []struct { + name string + ts prompb.TimeSeries + serverUp bool + httpResponseCode int + returnError bool + }{ + {"success_case", + *ts1, + true, + http.StatusAccepted, + false, + }, + { + "server_no_response_case", + *ts1, + false, + http.StatusAccepted, + true, + }, { + "error_status_code_case", + *ts1, + true, + http.StatusForbidden, + true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if handleFunc != nil { + handleFunc(w, r, tt.httpResponseCode) + } + })) + defer server.Close() + serverURL, uErr := url.Parse(server.URL) + assert.NoError(t, uErr) + if !tt.serverUp { + server.Close() + } + errs := runExportPipeline(ts1, serverURL) + if tt.returnError { + assert.Error(t, errs[0]) + return + } + assert.Len(t, errs, 0) + }) + } +} + +func runExportPipeline(ts *prompb.TimeSeries, endpoint *url.URL) []error { + var errs []error + + // First we will construct a TimeSeries array from the testutils package + testmap := make(map[string]*prompb.TimeSeries) + testmap["test"] = ts + + HTTPClient := http.DefaultClient + // after this, instantiate a CortexExporter with the current HTTP client and endpoint set to passed in endpoint + prwe, err := NewPrwExporter("test", endpoint.String(), HTTPClient, map[string]string{}) + if err != nil { + errs = append(errs, err) + return errs + } + errs = append(errs, prwe.export(context.Background(), testmap)...) + return errs +} + +// Test_PushMetrics checks the number of TimeSeries received by server and the number of metrics dropped is the same as +// expected +func Test_PushMetrics(t *testing.T) { + + invalidTypeBatch := testdata.GenerateMetricsMetricTypeInvalid() + + // success cases + intSumBatch := testdata.GenerateMetricsManyMetricsSameResource(10) + + doubleSumMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + validMetrics1[validDoubleSum], + validMetrics2[validDoubleSum], + }, + }, + }, + }, + } + doubleSumBatch := pdata.MetricsFromOtlp(doubleSumMetric) + + intGaugeMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + validMetrics1[validIntGauge], + validMetrics2[validIntGauge], + }, + }, + }, + }, + } + intGaugeBatch := pdata.MetricsFromOtlp(intGaugeMetric) + + doubleGaugeMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + validMetrics1[validDoubleGauge], + validMetrics2[validDoubleGauge], + }, + }, + }, + }, + } + doubleGaugeBatch := pdata.MetricsFromOtlp(doubleGaugeMetric) + + intHistogramMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + validMetrics1[validIntHistogram], + validMetrics2[validIntHistogram], + }, + }, + }, + }, + } + intHistogramBatch := pdata.MetricsFromOtlp(intHistogramMetric) + + doubleHistogramMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + validMetrics1[validDoubleHistogram], + validMetrics2[validDoubleHistogram], + }, + }, + }, + }, + } + doubleHistogramBatch := pdata.MetricsFromOtlp(doubleHistogramMetric) + + doubleSummaryMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + validMetrics1[validDoubleSummary], + validMetrics2[validDoubleSummary], + }, + }, + }, + }, + } + doubleSummaryBatch := pdata.MetricsFromOtlp(doubleSummaryMetric) + + // len(BucketCount) > len(ExplicitBounds) + unmatchedBoundBucketIntHistMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + validMetrics2[unmatchedBoundBucketIntHist], + }, + }, + }, + }, + } + unmatchedBoundBucketIntHistBatch := pdata.MetricsFromOtlp(unmatchedBoundBucketIntHistMetric) + + unmatchedBoundBucketDoubleHistMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + validMetrics2[unmatchedBoundBucketDoubleHist], + }, + }, + }, + }, + } + unmatchedBoundBucketDoubleHistBatch := pdata.MetricsFromOtlp(unmatchedBoundBucketDoubleHistMetric) + + // fail cases + nilDataPointIntGaugeMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + errorMetrics[nilDataPointIntGauge], + }, + }, + }, + }, + } + nilDataPointIntGaugeBatch := pdata.MetricsFromOtlp(nilDataPointIntGaugeMetric) + + nilDataPointDoubleGaugeMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + errorMetrics[nilDataPointDoubleGauge], + }, + }, + }, + }, + } + nilDataPointDoubleGaugeBatch := pdata.MetricsFromOtlp(nilDataPointDoubleGaugeMetric) + + nilDataPointIntSumMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + errorMetrics[nilDataPointIntSum], + }, + }, + }, + }, + } + nilDataPointIntSumBatch := pdata.MetricsFromOtlp(nilDataPointIntSumMetric) + + nilDataPointDoubleSumMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + errorMetrics[nilDataPointDoubleSum], + }, + }, + }, + }, + } + nilDataPointDoubleSumBatch := pdata.MetricsFromOtlp(nilDataPointDoubleSumMetric) + + nilDataPointIntHistogramMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + errorMetrics[nilDataPointIntHistogram], + }, + }, + }, + }, + } + nilDataPointIntHistogramBatch := pdata.MetricsFromOtlp(nilDataPointIntHistogramMetric) + + nilDataPointDoubleHistogramMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + errorMetrics[nilDataPointDoubleHistogram], + }, + }, + }, + }, + } + nilDataPointDoubleHistogramBatch := pdata.MetricsFromOtlp(nilDataPointDoubleHistogramMetric) + + nilDataPointDoubleSummaryMetric := []*otlp.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ + { + Metrics: []*otlp.Metric{ + errorMetrics[nilDataPointDoubleSummary], + }, + }, + }, + }, + } + nilDataPointDoubleSummaryBatch := pdata.MetricsFromOtlp(nilDataPointDoubleSummaryMetric) + + checkFunc := func(t *testing.T, r *http.Request, expected int) { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, len(body)) + dest, err := snappy.Decode(buf, body) + assert.Equal(t, "0.1.0", r.Header.Get("x-prometheus-remote-write-version")) + assert.Equal(t, "snappy", r.Header.Get("content-encoding")) + assert.Equal(t, "OpenTelemetry-Collector/"+version.Version, r.Header.Get("user-agent")) + assert.NotNil(t, r.Header.Get("tenant-id")) + require.NoError(t, err) + wr := &prompb.WriteRequest{} + ok := proto.Unmarshal(dest, wr) + require.Nil(t, ok) + assert.EqualValues(t, expected, len(wr.Timeseries)) + } + + tests := []struct { + name string + md *pdata.Metrics + reqTestFunc func(t *testing.T, r *http.Request, expected int) + expectedTimeSeries int + httpResponseCode int + numDroppedTimeSeries int + returnErr bool + }{ + { + "invalid_type_case", + &invalidTypeBatch, + nil, + 0, + http.StatusAccepted, + invalidTypeBatch.MetricCount(), + true, + }, + { + "intSum_case", + &intSumBatch, + checkFunc, + 2, + http.StatusAccepted, + 0, + false, + }, + { + "doubleSum_case", + &doubleSumBatch, + checkFunc, + 2, + http.StatusAccepted, + 0, + false, + }, + { + "doubleGauge_case", + &doubleGaugeBatch, + checkFunc, + 2, + http.StatusAccepted, + 0, + false, + }, + { + "intGauge_case", + &intGaugeBatch, + checkFunc, + 2, + http.StatusAccepted, + 0, + false, + }, + { + "intHistogram_case", + &intHistogramBatch, + checkFunc, + 12, + http.StatusAccepted, + 0, + false, + }, + { + "doubleHistogram_case", + &doubleHistogramBatch, + checkFunc, + 12, + http.StatusAccepted, + 0, + false, + }, + { + "doubleSummary_case", + &doubleSummaryBatch, + checkFunc, + 10, + http.StatusAccepted, + 0, + false, + }, + { + "unmatchedBoundBucketIntHist_case", + &unmatchedBoundBucketIntHistBatch, + checkFunc, + 5, + http.StatusAccepted, + 0, + false, + }, + { + "unmatchedBoundBucketDoubleHist_case", + &unmatchedBoundBucketDoubleHistBatch, + checkFunc, + 5, + http.StatusAccepted, + 0, + false, + }, + { + "5xx_case", + &unmatchedBoundBucketDoubleHistBatch, + checkFunc, + 5, + http.StatusServiceUnavailable, + 1, + true, + }, + { + "nilDataPointDoubleGauge_case", + &nilDataPointDoubleGaugeBatch, + checkFunc, + 0, + http.StatusAccepted, + nilDataPointDoubleGaugeBatch.MetricCount(), + true, + }, + { + "nilDataPointIntGauge_case", + &nilDataPointIntGaugeBatch, + checkFunc, + 0, + http.StatusAccepted, + nilDataPointIntGaugeBatch.MetricCount(), + true, + }, + { + "nilDataPointDoubleSum_case", + &nilDataPointDoubleSumBatch, + checkFunc, + 0, + http.StatusAccepted, + nilDataPointDoubleSumBatch.MetricCount(), + true, + }, + { + "nilDataPointIntSum_case", + &nilDataPointIntSumBatch, + checkFunc, + 0, + http.StatusAccepted, + nilDataPointIntSumBatch.MetricCount(), + true, + }, + { + "nilDataPointDoubleHistogram_case", + &nilDataPointDoubleHistogramBatch, + checkFunc, + 0, + http.StatusAccepted, + nilDataPointDoubleHistogramBatch.MetricCount(), + true, + }, + { + "nilDataPointIntHistogram_case", + &nilDataPointIntHistogramBatch, + checkFunc, + 0, + http.StatusAccepted, + nilDataPointIntHistogramBatch.MetricCount(), + true, + }, + { + "nilDataPointDoubleSummary_case", + &nilDataPointDoubleSummaryBatch, + checkFunc, + 0, + http.StatusAccepted, + nilDataPointDoubleSummaryBatch.MetricCount(), + true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if tt.reqTestFunc != nil { + tt.reqTestFunc(t, r, tt.expectedTimeSeries) + } + w.WriteHeader(tt.httpResponseCode) + })) + + defer server.Close() + + serverURL, uErr := url.Parse(server.URL) + assert.NoError(t, uErr) + + config := &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: "prometheusremotewrite", + NameVal: "prometheusremotewrite", + }, + Namespace: "", + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "http://some.url:9411/api/prom/push", + // We almost read 0 bytes, so no need to tune ReadBufferSize. + ReadBufferSize: 0, + WriteBufferSize: 512 * 1024, + }, + } + assert.NotNil(t, config) + // c, err := config.HTTPClientSettings.ToClient() + // assert.Nil(t, err) + c := http.DefaultClient + prwe, nErr := NewPrwExporter(config.Namespace, serverURL.String(), c, map[string]string{}) + require.NoError(t, nErr) + numDroppedTimeSeries, err := prwe.PushMetrics(context.Background(), *tt.md) + assert.Equal(t, tt.numDroppedTimeSeries, numDroppedTimeSeries) + if tt.returnErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} + +func Test_validateAndSanitizeExternalLabels(t *testing.T) { + tests := []struct { + name string + inputLabels map[string]string + expectedLabels map[string]string + returnError bool + }{ + {"success_case_no_labels", + map[string]string{}, + map[string]string{}, + false, + }, + {"success_case_with_labels", + map[string]string{"key1": "val1"}, + map[string]string{"key1": "val1"}, + false, + }, + {"success_case_2_with_labels", + map[string]string{"__key1__": "val1"}, + map[string]string{"__key1__": "val1"}, + false, + }, + {"success_case_with_sanitized_labels", + map[string]string{"__key1.key__": "val1"}, + map[string]string{"__key1_key__": "val1"}, + false, + }, + {"fail_case_empty_label", + map[string]string{"": "val1"}, + map[string]string{}, + true, + }, + } + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + newLabels, err := validateAndSanitizeExternalLabels(tt.inputLabels) + if tt.returnError { + assert.Error(t, err) + return + } + assert.EqualValues(t, tt.expectedLabels, newLabels) + assert.NoError(t, err) + }) + } +} diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/factory.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/factory.go new file mode 100644 index 00000000000..af2c65154ab --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/factory.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewriteexporter + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "prometheusremotewrite" +) + +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithMetrics(createMetricsExporter)) +} + +func createMetricsExporter(_ context.Context, params component.ExporterCreateParams, + cfg configmodels.Exporter) (component.MetricsExporter, error) { + + prwCfg, ok := cfg.(*Config) + if !ok { + return nil, errors.New("invalid configuration") + } + + client, err := prwCfg.HTTPClientSettings.ToClient() + if err != nil { + return nil, err + } + + prwe, err := NewPrwExporter(prwCfg.Namespace, prwCfg.HTTPClientSettings.Endpoint, client, prwCfg.ExternalLabels) + if err != nil { + return nil, err + } + + prwexp, err := exporterhelper.NewMetricsExporter( + cfg, + params.Logger, + prwe.PushMetrics, + exporterhelper.WithTimeout(prwCfg.TimeoutSettings), + exporterhelper.WithQueue(prwCfg.QueueSettings), + exporterhelper.WithRetry(prwCfg.RetrySettings), + exporterhelper.WithShutdown(prwe.Shutdown), + ) + + return prwexp, err +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Namespace: "", + ExternalLabels: map[string]string{}, + TimeoutSettings: exporterhelper.DefaultTimeoutSettings(), + RetrySettings: exporterhelper.DefaultRetrySettings(), + QueueSettings: exporterhelper.DefaultQueueSettings(), + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "http://some.url:9411/api/prom/push", + // We almost read 0 bytes, so no need to tune ReadBufferSize. + ReadBufferSize: 0, + WriteBufferSize: 512 * 1024, + Timeout: exporterhelper.DefaultTimeoutSettings().Timeout, + Headers: map[string]string{}, + }, + } +} diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/factory_test.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/factory_test.go new file mode 100644 index 00000000000..a94cb6ce83b --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/factory_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewriteexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtls" +) + +// Tests whether or not the default Exporter factory can instantiate a properly interfaced Exporter with default conditions +func Test_createDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +// Tests whether or not a correct Metrics Exporter from the default Config parameters +func Test_createMetricsExporter(t *testing.T) { + + invalidConfig := createDefaultConfig().(*Config) + invalidConfig.HTTPClientSettings = confighttp.HTTPClientSettings{} + invalidTLSConfig := createDefaultConfig().(*Config) + invalidTLSConfig.HTTPClientSettings.TLSSetting = configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "non-existent file", + CertFile: "", + KeyFile: "", + }, + Insecure: false, + ServerName: "", + } + tests := []struct { + name string + cfg configmodels.Exporter + params component.ExporterCreateParams + returnError bool + }{ + {"success_case", + createDefaultConfig(), + component.ExporterCreateParams{Logger: zap.NewNop()}, + false, + }, + {"fail_case", + nil, + component.ExporterCreateParams{Logger: zap.NewNop()}, + true, + }, + {"invalid_config_case", + invalidConfig, + component.ExporterCreateParams{Logger: zap.NewNop()}, + true, + }, + {"invalid_tls_config_case", + invalidTLSConfig, + component.ExporterCreateParams{Logger: zap.NewNop()}, + true, + }, + } + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := createMetricsExporter(context.Background(), tt.params, tt.cfg) + if tt.returnError { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/helper.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/helper.go new file mode 100644 index 00000000000..b19b820c0b1 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/helper.go @@ -0,0 +1,489 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewriteexporter + +import ( + "errors" + "log" + "sort" + "strconv" + "strings" + "time" + "unicode" + + "github.com/prometheus/prometheus/prompb" + + "go.opentelemetry.io/collector/consumer/pdata" + common "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlp "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +const ( + nameStr = "__name__" + sumStr = "_sum" + countStr = "_count" + bucketStr = "_bucket" + leStr = "le" + quantileStr = "quantile" + pInfStr = "+Inf" + totalStr = "total" + delimeter = "_" + keyStr = "key" +) + +// ByLabelName enables the usage of sort.Sort() with a slice of labels +type ByLabelName []prompb.Label + +func (a ByLabelName) Len() int { return len(a) } +func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name } +func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// validateMetrics returns a bool representing whether the metric has a valid type and temporality combination and a +// matching metric type and field +func validateMetrics(metric *otlp.Metric) bool { + if metric == nil || metric.Data == nil { + return false + } + switch metric.Data.(type) { + case *otlp.Metric_DoubleGauge: + return metric.GetDoubleGauge() != nil + case *otlp.Metric_IntGauge: + return metric.GetIntGauge() != nil + case *otlp.Metric_DoubleSum: + return metric.GetDoubleSum() != nil && metric.GetDoubleSum().GetAggregationTemporality() == + otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE + case *otlp.Metric_IntSum: + return metric.GetIntSum() != nil && metric.GetIntSum().GetAggregationTemporality() == + otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE + case *otlp.Metric_DoubleHistogram: + return metric.GetDoubleHistogram() != nil && metric.GetDoubleHistogram().GetAggregationTemporality() == + otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE + case *otlp.Metric_IntHistogram: + return metric.GetIntHistogram() != nil && metric.GetIntHistogram().GetAggregationTemporality() == + otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE + case *otlp.Metric_DoubleSummary: + return metric.GetDoubleSummary() != nil + } + return false +} + +// addSample finds a TimeSeries in tsMap that corresponds to the label set labels, and add sample to the TimeSeries; it +// creates a new TimeSeries in the map if not found. tsMap is unmodified if either of its parameters is nil. +func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label, + metric *otlp.Metric) { + + if sample == nil || labels == nil || tsMap == nil { + return + } + + sig := timeSeriesSignature(metric, &labels) + ts, ok := tsMap[sig] + + if ok { + ts.Samples = append(ts.Samples, *sample) + } else { + newTs := &prompb.TimeSeries{ + Labels: labels, + Samples: []prompb.Sample{*sample}, + } + tsMap[sig] = newTs + } +} + +// timeSeries return a string signature in the form of: +// TYPE-label1-value1- ... -labelN-valueN +// the label slice should not contain duplicate label names; this method sorts the slice by label name before creating +// the signature. +func timeSeriesSignature(metric *otlp.Metric, labels *[]prompb.Label) string { + b := strings.Builder{} + b.WriteString(getTypeString(metric)) + + sort.Sort(ByLabelName(*labels)) + + for _, lb := range *labels { + b.WriteString("-") + b.WriteString(lb.GetName()) + b.WriteString("-") + b.WriteString(lb.GetValue()) + } + + return b.String() +} + +// createLabelSet creates a slice of Cortex Label with OTLP labels and paris of string values. +// Unpaired string value is ignored. String pairs overwrites OTLP labels if collision happens, and the overwrite is +// logged. Resultant label names are sanitized. +func createLabelSet(labels []common.StringKeyValue, externalLabels map[string]string, extras ...string) []prompb.Label { + // map ensures no duplicate label name + l := map[string]prompb.Label{} + + for key, value := range externalLabels { + // External labels have already been sanitized + l[key] = prompb.Label{ + Name: key, + Value: value, + } + } + + for _, lb := range labels { + l[lb.Key] = prompb.Label{ + Name: sanitize(lb.Key), + Value: lb.Value, + } + } + + for i := 0; i < len(extras); i += 2 { + if i+1 >= len(extras) { + break + } + _, found := l[extras[i]] + if found { + log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.") + } + // internal labels should be maintained + name := extras[i] + if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { + name = sanitize(name) + } + l[extras[i]] = prompb.Label{ + Name: name, + Value: extras[i+1], + } + } + + s := make([]prompb.Label, 0, len(l)) + for _, lb := range l { + s = append(s, lb) + } + + return s +} + +// getPromMetricName creates a Prometheus metric name by attaching namespace prefix, and _total suffix for Monotonic +// metrics. +func getPromMetricName(metric *otlp.Metric, ns string) string { + if metric == nil { + return "" + } + + // if the metric is counter, _total suffix should be applied + _, isCounter1 := metric.Data.(*otlp.Metric_DoubleSum) + _, isCounter2 := metric.Data.(*otlp.Metric_IntSum) + isCounter := isCounter1 || isCounter2 + + b := strings.Builder{} + + b.WriteString(ns) + + if b.Len() > 0 { + b.WriteString(delimeter) + } + name := metric.GetName() + b.WriteString(name) + + // do not add the total suffix if the metric name already ends in "total" + isCounter = isCounter && name[len(name)-len(totalStr):] != totalStr + + // Including units makes two metrics with the same name and label set belong to two different TimeSeries if the + // units are different. + /* + if b.Len() > 0 && len(desc.GetUnit()) > 0{ + fmt.Fprintf(&b, delimeter) + fmt.Fprintf(&b, desc.GetUnit()) + } + */ + + if b.Len() > 0 && isCounter { + b.WriteString(delimeter) + b.WriteString(totalStr) + } + return sanitize(b.String()) +} + +// batchTimeSeries splits series into multiple batch write requests. +func batchTimeSeries(tsMap map[string]*prompb.TimeSeries, maxBatchByteSize int) ([]*prompb.WriteRequest, error) { + if len(tsMap) == 0 { + return nil, errors.New("invalid tsMap: cannot be empty map") + } + + var requests []*prompb.WriteRequest + var tsArray []prompb.TimeSeries + sizeOfCurrentBatch := 0 + + for _, v := range tsMap { + sizeOfSeries := v.Size() + + if sizeOfCurrentBatch+sizeOfSeries >= maxBatchByteSize { + wrapped := convertTimeseriesToRequest(tsArray) + requests = append(requests, wrapped) + + tsArray = make([]prompb.TimeSeries, 0) + sizeOfCurrentBatch = 0 + } + + tsArray = append(tsArray, *v) + sizeOfCurrentBatch += sizeOfSeries + } + + if len(tsArray) != 0 { + wrapped := convertTimeseriesToRequest(tsArray) + requests = append(requests, wrapped) + } + + return requests, nil +} + +// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms +func convertTimeStamp(timestamp uint64) int64 { + return int64(timestamp / uint64(int64(time.Millisecond)/int64(time.Nanosecond))) +} + +// copied from prometheus-go-metric-exporter +// sanitize replaces non-alphanumeric characters with underscores in s. +func sanitize(s string) string { + if len(s) == 0 { + return s + } + + // Note: No length limit for label keys because Prometheus doesn't + // define a length limit, thus we should NOT be truncating label keys. + // See https://github.com/orijtech/prometheus-go-metrics-exporter/issues/4. + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = keyStr + delimeter + s + } + if s[0] == '_' { + s = keyStr + s + } + return s +} + +// copied from prometheus-go-metric-exporter +// sanitizeRune converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} + +func getTypeString(metric *otlp.Metric) string { + switch metric.Data.(type) { + case *otlp.Metric_DoubleGauge: + return strconv.Itoa(int(pdata.MetricDataTypeDoubleGauge)) + case *otlp.Metric_IntGauge: + return strconv.Itoa(int(pdata.MetricDataTypeIntGauge)) + case *otlp.Metric_DoubleSum: + return strconv.Itoa(int(pdata.MetricDataTypeDoubleSum)) + case *otlp.Metric_IntSum: + return strconv.Itoa(int(pdata.MetricDataTypeIntSum)) + case *otlp.Metric_DoubleHistogram: + return strconv.Itoa(int(pdata.MetricDataTypeDoubleHistogram)) + case *otlp.Metric_IntHistogram: + return strconv.Itoa(int(pdata.MetricDataTypeIntHistogram)) + } + return "" +} + +// addSingleDoubleDataPoint converts the metric value stored in pt to a Prometheus sample, and add the sample +// to its corresponding time series in tsMap +func addSingleDoubleDataPoint(pt *otlp.DoubleDataPoint, metric *otlp.Metric, namespace string, + tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { + if pt == nil { + return + } + // create parameters for addSample + name := getPromMetricName(metric, namespace) + labels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, name) + sample := &prompb.Sample{ + Value: pt.Value, + // convert ns to ms + Timestamp: convertTimeStamp(pt.TimeUnixNano), + } + addSample(tsMap, sample, labels, metric) +} + +// addSingleIntDataPoint converts the metric value stored in pt to a Prometheus sample, and add the sample +// to its corresponding time series in tsMap +func addSingleIntDataPoint(pt *otlp.IntDataPoint, metric *otlp.Metric, namespace string, + tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { + if pt == nil { + return + } + // create parameters for addSample + name := getPromMetricName(metric, namespace) + labels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, name) + sample := &prompb.Sample{ + Value: float64(pt.Value), + // convert ns to ms + Timestamp: convertTimeStamp(pt.TimeUnixNano), + } + addSample(tsMap, sample, labels, metric) +} + +// addSingleIntHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It +// ignore extra buckets if len(ExplicitBounds) > len(BucketCounts) +func addSingleIntHistogramDataPoint(pt *otlp.IntHistogramDataPoint, metric *otlp.Metric, namespace string, + tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { + if pt == nil { + return + } + time := convertTimeStamp(pt.TimeUnixNano) + // sum, count, and buckets of the histogram should append suffix to baseName + baseName := getPromMetricName(metric, namespace) + // treat sum as a sample in an individual TimeSeries + sum := &prompb.Sample{ + Value: float64(pt.GetSum()), + Timestamp: time, + } + + sumlabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+sumStr) + addSample(tsMap, sum, sumlabels, metric) + + // treat count as a sample in an individual TimeSeries + count := &prompb.Sample{ + Value: float64(pt.GetCount()), + Timestamp: time, + } + countlabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+countStr) + addSample(tsMap, count, countlabels, metric) + + // cumulative count for conversion to cumulative histogram + var cumulativeCount uint64 + + // process each bound, ignore extra bucket values + for index, bound := range pt.GetExplicitBounds() { + if index >= len(pt.GetBucketCounts()) { + break + } + cumulativeCount += pt.GetBucketCounts()[index] + bucket := &prompb.Sample{ + Value: float64(cumulativeCount), + Timestamp: time, + } + boundStr := strconv.FormatFloat(bound, 'f', -1, 64) + labels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+bucketStr, leStr, boundStr) + addSample(tsMap, bucket, labels, metric) + } + // add le=+Inf bucket + cumulativeCount += pt.GetBucketCounts()[len(pt.GetBucketCounts())-1] + infBucket := &prompb.Sample{ + Value: float64(cumulativeCount), + Timestamp: time, + } + infLabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) + addSample(tsMap, infBucket, infLabels, metric) +} + +// addSingleDoubleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It +// ignore extra buckets if len(ExplicitBounds) > len(BucketCounts) +func addSingleDoubleHistogramDataPoint(pt *otlp.DoubleHistogramDataPoint, metric *otlp.Metric, namespace string, + tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { + if pt == nil { + return + } + time := convertTimeStamp(pt.TimeUnixNano) + // sum, count, and buckets of the histogram should append suffix to baseName + baseName := getPromMetricName(metric, namespace) + // treat sum as a sample in an individual TimeSeries + sum := &prompb.Sample{ + Value: pt.GetSum(), + Timestamp: time, + } + + sumlabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+sumStr) + addSample(tsMap, sum, sumlabels, metric) + + // treat count as a sample in an individual TimeSeries + count := &prompb.Sample{ + Value: float64(pt.GetCount()), + Timestamp: time, + } + countlabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+countStr) + addSample(tsMap, count, countlabels, metric) + + // cumulative count for conversion to cumulative histogram + var cumulativeCount uint64 + + // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 + for index, bound := range pt.GetExplicitBounds() { + if index >= len(pt.GetBucketCounts()) { + break + } + cumulativeCount += pt.GetBucketCounts()[index] + bucket := &prompb.Sample{ + Value: float64(cumulativeCount), + Timestamp: time, + } + boundStr := strconv.FormatFloat(bound, 'f', -1, 64) + labels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+bucketStr, leStr, boundStr) + addSample(tsMap, bucket, labels, metric) + } + // add le=+Inf bucket + cumulativeCount += pt.GetBucketCounts()[len(pt.GetBucketCounts())-1] + infBucket := &prompb.Sample{ + Value: float64(cumulativeCount), + Timestamp: time, + } + infLabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) + addSample(tsMap, infBucket, infLabels, metric) +} + +// addSingleDoubleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. +func addSingleDoubleSummaryDataPoint(pt *otlp.DoubleSummaryDataPoint, metric *otlp.Metric, namespace string, + tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { + if pt == nil { + return + } + time := convertTimeStamp(pt.TimeUnixNano) + // sum and count of the summary should append suffix to baseName + baseName := getPromMetricName(metric, namespace) + // treat sum as a sample in an individual TimeSeries + sum := &prompb.Sample{ + Value: pt.GetSum(), + Timestamp: time, + } + + sumlabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+sumStr) + addSample(tsMap, sum, sumlabels, metric) + + // treat count as a sample in an individual TimeSeries + count := &prompb.Sample{ + Value: float64(pt.GetCount()), + Timestamp: time, + } + countlabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName+countStr) + addSample(tsMap, count, countlabels, metric) + + // process each percentile/quantile + for _, qt := range pt.GetQuantileValues() { + quantile := &prompb.Sample{ + Value: qt.Value, + Timestamp: time, + } + percentileStr := strconv.FormatFloat(qt.GetQuantile(), 'f', -1, 64) + qtlabels := createLabelSet(pt.GetLabels(), externalLabels, nameStr, baseName, quantileStr, percentileStr) + addSample(tsMap, quantile, qtlabels, metric) + } +} + +func convertTimeseriesToRequest(tsArray []prompb.TimeSeries) *prompb.WriteRequest { + // the remote_write endpoint only requires the timeseries. + // otlp defines it's own way to handle metric metadata + return &prompb.WriteRequest{ + Timeseries: tsArray, + } +} diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/helper_test.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/helper_test.go new file mode 100644 index 00000000000..821d9947fe4 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/helper_test.go @@ -0,0 +1,363 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewriteexporter + +import ( + "strconv" + "testing" + + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + common "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlp "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +// Test_validateMetrics checks validateMetrics return true if a type and temporality combination is valid, false +// otherwise. +func Test_validateMetrics(t *testing.T) { + + // define a single test + type combTest struct { + name string + metric *otlp.Metric + want bool + } + + tests := []combTest{} + + // append true cases + for k, validMetric := range validMetrics1 { + name := "valid_" + k + + tests = append(tests, combTest{ + name, + validMetric, + true, + }) + } + + // append nil case + tests = append(tests, combTest{"invalid_nil", nil, false}) + + for k, invalidMetric := range invalidMetrics { + name := "valid_" + k + + tests = append(tests, combTest{ + name, + invalidMetric, + false, + }) + } + + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := validateMetrics(tt.metric) + assert.Equal(t, tt.want, got) + }) + } +} + +// Test_addSample checks addSample updates the map it receives correctly based on the sample and Label +// set it receives. +// Test cases are two samples belonging to the same TimeSeries, two samples belong to different TimeSeries, and nil +// case. +func Test_addSample(t *testing.T) { + type testCase struct { + metric *otlp.Metric + sample prompb.Sample + labels []prompb.Label + } + + tests := []struct { + name string + orig map[string]*prompb.TimeSeries + testCase []testCase + want map[string]*prompb.TimeSeries + }{ + { + "two_points_same_ts_same_metric", + map[string]*prompb.TimeSeries{}, + []testCase{ + {validMetrics1[validDoubleGauge], + getSample(floatVal1, msTime1), + promLbs1, + }, + { + validMetrics1[validDoubleGauge], + getSample(floatVal2, msTime2), + promLbs1, + }, + }, + twoPointsSameTs, + }, + { + "two_points_different_ts_same_metric", + map[string]*prompb.TimeSeries{}, + []testCase{ + {validMetrics1[validIntGauge], + getSample(float64(intVal1), msTime1), + promLbs1, + }, + {validMetrics1[validIntGauge], + getSample(float64(intVal1), msTime2), + promLbs2, + }, + }, + twoPointsDifferentTs, + }, + } + t.Run("nil_case", func(t *testing.T) { + tsMap := map[string]*prompb.TimeSeries{} + addSample(tsMap, nil, nil, nil) + assert.Exactly(t, tsMap, map[string]*prompb.TimeSeries{}) + }) + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + addSample(tt.orig, &tt.testCase[0].sample, tt.testCase[0].labels, tt.testCase[0].metric) + addSample(tt.orig, &tt.testCase[1].sample, tt.testCase[1].labels, tt.testCase[1].metric) + assert.Exactly(t, tt.want, tt.orig) + }) + } +} + +// Test_timeSeries checks timeSeriesSignature returns consistent and unique signatures for a distinct label set and +// metric type combination. +func Test_timeSeriesSignature(t *testing.T) { + tests := []struct { + name string + lbs []prompb.Label + metric *otlp.Metric + want string + }{ + { + "int64_signature", + promLbs1, + validMetrics1[validIntGauge], + strconv.Itoa(int(pdata.MetricDataTypeIntGauge)) + lb1Sig, + }, + { + "histogram_signature", + promLbs2, + validMetrics1[validIntHistogram], + strconv.Itoa(int(pdata.MetricDataTypeIntHistogram)) + lb2Sig, + }, + { + "unordered_signature", + getPromLabels(label22, value22, label21, value21), + validMetrics1[validIntHistogram], + strconv.Itoa(int(pdata.MetricDataTypeIntHistogram)) + lb2Sig, + }, + // descriptor type cannot be nil, as checked by validateMetrics + { + "nil_case", + nil, + validMetrics1[validIntHistogram], + strconv.Itoa(int(pdata.MetricDataTypeIntHistogram)), + }, + } + + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.EqualValues(t, tt.want, timeSeriesSignature(tt.metric, &tt.lbs)) + }) + } +} + +// Test_createLabelSet checks resultant label names are sanitized and label in extra overrides label in labels if +// collision happens. It does not check whether labels are not sorted +func Test_createLabelSet(t *testing.T) { + tests := []struct { + name string + orig []common.StringKeyValue + externalLabels map[string]string + extras []string + want []prompb.Label + }{ + { + "labels_clean", + lbs1, + map[string]string{}, + []string{label31, value31, label32, value32}, + getPromLabels(label11, value11, label12, value12, label31, value31, label32, value32), + }, + { + "labels_duplicate_in_extras", + lbs1, + map[string]string{}, + []string{label11, value31}, + getPromLabels(label11, value31, label12, value12), + }, + { + "labels_dirty", + lbs1Dirty, + map[string]string{}, + []string{label31 + dirty1, value31, label32, value32}, + getPromLabels(label11+"_", value11, "key_"+label12, value12, label31+"_", value31, label32, value32), + }, + { + "no_original_case", + nil, + nil, + []string{label31, value31, label32, value32}, + getPromLabels(label31, value31, label32, value32), + }, + { + "empty_extra_case", + lbs1, + map[string]string{}, + []string{"", ""}, + getPromLabels(label11, value11, label12, value12, "", ""), + }, + { + "single_left_over_case", + lbs1, + map[string]string{}, + []string{label31, value31, label32}, + getPromLabels(label11, value11, label12, value12, label31, value31), + }, + { + "valid_external_labels", + lbs1, + exlbs1, + []string{label31, value31, label32, value32}, + getPromLabels(label11, value11, label12, value12, label41, value41, label31, value31, label32, value32), + }, + { + "overwritten_external_labels", + lbs1, + exlbs2, + []string{label31, value31, label32, value32}, + getPromLabels(label11, value11, label12, value12, label31, value31, label32, value32), + }, + } + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.ElementsMatch(t, tt.want, createLabelSet(tt.orig, tt.externalLabels, tt.extras...)) + }) + } +} + +// Tes_getPromMetricName checks if OTLP metric names are converted to Cortex metric names correctly. +// Test cases are empty namespace, monotonic metrics that require a total suffix, and metric names that contains +// invalid characters. +func Test_getPromMetricName(t *testing.T) { + tests := []struct { + name string + metric *otlp.Metric + ns string + want string + }{ + { + "nil_case", + nil, + ns1, + "", + }, + { + "normal_case", + validMetrics1[validDoubleGauge], + ns1, + "test_ns_" + validDoubleGauge, + }, + { + "empty_namespace", + validMetrics1[validDoubleGauge], + "", + validDoubleGauge, + }, + { + "total_suffix", + validMetrics1[validIntSum], + ns1, + "test_ns_" + validIntSum + delimeter + totalStr, + }, + { + "dirty_string", + validMetrics2[validIntGaugeDirty], + "7" + ns1, + "key_7test_ns__" + validIntGauge + "_", + }, + } + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, getPromMetricName(tt.metric, tt.ns)) + }) + } +} + +// Test_batchTimeSeries checks batchTimeSeries return the correct number of requests +// depending on byte size. +func Test_batchTimeSeries(t *testing.T) { + // First we will instantiate a dummy TimeSeries instance to pass into both the export call and compare the http request + labels := getPromLabels(label11, value11, label12, value12, label21, value21, label22, value22) + sample1 := getSample(floatVal1, msTime1) + sample2 := getSample(floatVal2, msTime2) + sample3 := getSample(floatVal3, msTime3) + ts1 := getTimeSeries(labels, sample1, sample2) + ts2 := getTimeSeries(labels, sample1, sample2, sample3) + + tsMap1 := getTimeseriesMap([]*prompb.TimeSeries{}) + tsMap2 := getTimeseriesMap([]*prompb.TimeSeries{ts1}) + tsMap3 := getTimeseriesMap([]*prompb.TimeSeries{ts1, ts2}) + + tests := []struct { + name string + tsMap map[string]*prompb.TimeSeries + maxBatchByteSize int + numExpectedRequests int + returnErr bool + }{ + { + "no_timeseries", + tsMap1, + 100, + -1, + true, + }, + { + "normal_case", + tsMap2, + 300, + 1, + false, + }, + { + "two_requests", + tsMap3, + 300, + 2, + false, + }, + } + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + requests, err := batchTimeSeries(tt.tsMap, tt.maxBatchByteSize) + if tt.returnErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tt.numExpectedRequests, len(requests)) + }) + } +} diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/img/cortex.png b/internal/otel_collector/exporter/prometheusremotewriteexporter/img/cortex.png new file mode 100644 index 00000000000..75769e61302 Binary files /dev/null and b/internal/otel_collector/exporter/prometheusremotewriteexporter/img/cortex.png differ diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/img/timeseries.png b/internal/otel_collector/exporter/prometheusremotewriteexporter/img/timeseries.png new file mode 100644 index 00000000000..54f406ce53f Binary files /dev/null and b/internal/otel_collector/exporter/prometheusremotewriteexporter/img/timeseries.png differ diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/testdata/config.yaml b/internal/otel_collector/exporter/prometheusremotewriteexporter/testdata/config.yaml new file mode 100644 index 00000000000..88c065ddd17 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/testdata/config.yaml @@ -0,0 +1,37 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + prometheusremotewrite: + prometheusremotewrite/2: + namespace: "test-space" + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + endpoint: "localhost:8888" + ca_file: "/var/lib/mycert.pem" + write_buffer_size: 524288 + headers: + Prometheus-Remote-Write-Version: "0.1.0" + X-Scope-OrgID: 234 + external_labels: + key1: value1 + key2: value2 + +service: + pipelines: + metrics: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [prometheusremotewrite] + + diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/testutil_test.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/testutil_test.go new file mode 100644 index 00000000000..62ddf2e4ff7 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/testutil_test.go @@ -0,0 +1,578 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewriteexporter + +import ( + "fmt" + "time" + + "github.com/prometheus/prometheus/prompb" + + commonpb "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlp "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +var ( + time1 = uint64(time.Now().UnixNano()) + time2 = uint64(time.Now().UnixNano() - 5) + time3 = uint64(time.Date(1970, 1, 0, 0, 0, 0, 0, time.UTC).UnixNano()) + msTime1 = int64(time1 / uint64(int64(time.Millisecond)/int64(time.Nanosecond))) + msTime2 = int64(time2 / uint64(int64(time.Millisecond)/int64(time.Nanosecond))) + msTime3 = int64(time3 / uint64(int64(time.Millisecond)/int64(time.Nanosecond))) + + label11 = "test_label11" + value11 = "test_value11" + label12 = "test_label12" + value12 = "test_value12" + label21 = "test_label21" + value21 = "test_value21" + label22 = "test_label22" + value22 = "test_value22" + label31 = "test_label31" + value31 = "test_value31" + label32 = "test_label32" + value32 = "test_value32" + label41 = "__test_label41__" + value41 = "test_value41" + dirty1 = "%" + dirty2 = "?" + + intVal1 int64 = 1 + intVal2 int64 = 2 + floatVal1 = 1.0 + floatVal2 = 2.0 + floatVal3 = 3.0 + + lbs1 = getLabels(label11, value11, label12, value12) + lbs2 = getLabels(label21, value21, label22, value22) + lbs1Dirty = getLabels(label11+dirty1, value11, dirty2+label12, value12) + + exlbs1 = map[string]string{label41: value41} + exlbs2 = map[string]string{label11: value41} + + promLbs1 = getPromLabels(label11, value11, label12, value12) + promLbs2 = getPromLabels(label21, value21, label22, value22) + + lb1Sig = "-" + label11 + "-" + value11 + "-" + label12 + "-" + value12 + lb2Sig = "-" + label21 + "-" + value21 + "-" + label22 + "-" + value22 + ns1 = "test_ns" + + twoPointsSameTs = map[string]*prompb.TimeSeries{ + "2" + "-" + label11 + "-" + value11 + "-" + label12 + "-" + value12: getTimeSeries(getPromLabels(label11, value11, label12, value12), + getSample(float64(intVal1), msTime1), + getSample(float64(intVal2), msTime2)), + } + twoPointsDifferentTs = map[string]*prompb.TimeSeries{ + "1" + "-" + label11 + "-" + value11 + "-" + label12 + "-" + value12: getTimeSeries(getPromLabels(label11, value11, label12, value12), + getSample(float64(intVal1), msTime1)), + "1" + "-" + label21 + "-" + value21 + "-" + label22 + "-" + value22: getTimeSeries(getPromLabels(label21, value21, label22, value22), + getSample(float64(intVal1), msTime2)), + } + bounds = []float64{0.1, 0.5, 0.99} + buckets = []uint64{1, 2, 3} + + quantileBounds = []float64{0.15, 0.9, 0.99} + quantileValues = []float64{7, 8, 9} + quantiles = getQuantiles(quantileBounds, quantileValues) + + validIntGauge = "valid_IntGauge" + validDoubleGauge = "valid_DoubleGauge" + validIntSum = "valid_IntSum" + validDoubleSum = "valid_DoubleSum" + validIntHistogram = "valid_IntHistogram" + validDoubleHistogram = "valid_DoubleHistogram" + validDoubleSummary = "valid_DoubleSummary" + + validIntGaugeDirty = "*valid_IntGauge$" + + unmatchedBoundBucketIntHist = "unmatchedBoundBucketIntHist" + unmatchedBoundBucketDoubleHist = "unmatchedBoundBucketDoubleHist" + + // valid metrics as input should not return error + validMetrics1 = map[string]*otlp.Metric{ + validIntGauge: { + Name: validIntGauge, + Data: &otlp.Metric_IntGauge{ + IntGauge: &otlp.IntGauge{ + DataPoints: []*otlp.IntDataPoint{ + getIntDataPoint(lbs1, intVal1, time1), + nil, + }, + }, + }, + }, + validDoubleGauge: { + Name: validDoubleGauge, + Data: &otlp.Metric_DoubleGauge{ + DoubleGauge: &otlp.DoubleGauge{ + DataPoints: []*otlp.DoubleDataPoint{ + getDoubleDataPoint(lbs1, floatVal1, time1), + nil, + }, + }, + }, + }, + validIntSum: { + Name: validIntSum, + Data: &otlp.Metric_IntSum{ + IntSum: &otlp.IntSum{ + DataPoints: []*otlp.IntDataPoint{ + getIntDataPoint(lbs1, intVal1, time1), + nil, + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + validDoubleSum: { + Name: validDoubleSum, + Data: &otlp.Metric_DoubleSum{ + DoubleSum: &otlp.DoubleSum{ + DataPoints: []*otlp.DoubleDataPoint{ + getDoubleDataPoint(lbs1, floatVal1, time1), + nil, + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + validIntHistogram: { + Name: validIntHistogram, + Data: &otlp.Metric_IntHistogram{ + IntHistogram: &otlp.IntHistogram{ + DataPoints: []*otlp.IntHistogramDataPoint{ + getIntHistogramDataPoint(lbs1, time1, floatVal1, uint64(intVal1), bounds, buckets), + nil, + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + validDoubleHistogram: { + Name: validDoubleHistogram, + Data: &otlp.Metric_DoubleHistogram{ + DoubleHistogram: &otlp.DoubleHistogram{ + DataPoints: []*otlp.DoubleHistogramDataPoint{ + getDoubleHistogramDataPoint(lbs1, time1, floatVal1, uint64(intVal1), bounds, buckets), + nil, + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + validDoubleSummary: { + Name: validDoubleSummary, + Data: &otlp.Metric_DoubleSummary{ + DoubleSummary: &otlp.DoubleSummary{ + DataPoints: []*otlp.DoubleSummaryDataPoint{ + getDoubleSummaryDataPoint(lbs1, time1, floatVal1, uint64(intVal1), quantiles), + nil, + }, + }, + }, + }, + } + validMetrics2 = map[string]*otlp.Metric{ + validIntGauge: { + Name: validIntGauge, + Data: &otlp.Metric_IntGauge{ + IntGauge: &otlp.IntGauge{ + DataPoints: []*otlp.IntDataPoint{ + getIntDataPoint(lbs2, intVal2, time2), + }, + }, + }, + }, + validDoubleGauge: { + Name: validDoubleGauge, + Data: &otlp.Metric_DoubleGauge{ + DoubleGauge: &otlp.DoubleGauge{ + DataPoints: []*otlp.DoubleDataPoint{ + getDoubleDataPoint(lbs2, floatVal2, time2), + }, + }, + }, + }, + validIntSum: { + Name: validIntSum, + Data: &otlp.Metric_IntSum{ + IntSum: &otlp.IntSum{ + DataPoints: []*otlp.IntDataPoint{ + getIntDataPoint(lbs2, intVal2, time2), + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + validDoubleSum: { + Name: validDoubleSum, + Data: &otlp.Metric_DoubleSum{ + DoubleSum: &otlp.DoubleSum{ + DataPoints: []*otlp.DoubleDataPoint{ + getDoubleDataPoint(lbs2, floatVal2, time2), + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + validIntHistogram: { + Name: validIntHistogram, + Data: &otlp.Metric_IntHistogram{ + IntHistogram: &otlp.IntHistogram{ + DataPoints: []*otlp.IntHistogramDataPoint{ + getIntHistogramDataPoint(lbs2, time2, floatVal2, uint64(intVal2), bounds, buckets), + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + validDoubleHistogram: { + Name: validDoubleHistogram, + Data: &otlp.Metric_DoubleHistogram{ + DoubleHistogram: &otlp.DoubleHistogram{ + DataPoints: []*otlp.DoubleHistogramDataPoint{ + getDoubleHistogramDataPoint(lbs2, time2, floatVal2, uint64(intVal2), bounds, buckets), + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + validDoubleSummary: { + Name: validDoubleSummary, + Data: &otlp.Metric_DoubleSummary{ + DoubleSummary: &otlp.DoubleSummary{ + DataPoints: []*otlp.DoubleSummaryDataPoint{ + getDoubleSummaryDataPoint(lbs2, time2, floatVal2, uint64(intVal2), quantiles), + nil, + }, + }, + }, + }, + validIntGaugeDirty: { + Name: validIntGaugeDirty, + Data: &otlp.Metric_IntGauge{ + IntGauge: &otlp.IntGauge{ + DataPoints: []*otlp.IntDataPoint{ + getIntDataPoint(lbs1, intVal1, time1), + nil, + }, + }, + }, + }, + unmatchedBoundBucketIntHist: { + Name: unmatchedBoundBucketIntHist, + Data: &otlp.Metric_IntHistogram{ + IntHistogram: &otlp.IntHistogram{ + DataPoints: []*otlp.IntHistogramDataPoint{ + { + ExplicitBounds: []float64{0.1, 0.2, 0.3}, + BucketCounts: []uint64{1, 2}, + }, + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + unmatchedBoundBucketDoubleHist: { + Name: unmatchedBoundBucketDoubleHist, + Data: &otlp.Metric_DoubleHistogram{ + DoubleHistogram: &otlp.DoubleHistogram{ + DataPoints: []*otlp.DoubleHistogramDataPoint{ + { + ExplicitBounds: []float64{0.1, 0.2, 0.3}, + BucketCounts: []uint64{1, 2}, + }, + }, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + } + + nilMetric = "nil" + empty = "empty" + + // Category 1: type and data field doesn't match + notMatchIntGauge = "noMatchIntGauge" + notMatchDoubleGauge = "notMatchDoubleGauge" + notMatchIntSum = "notMatchIntSum" + notMatchDoubleSum = "notMatchDoubleSum" + notMatchIntHistogram = "notMatchIntHistogram" + notMatchDoubleHistogram = "notMatchDoubleHistogram" + notMatchDoubleSummary = "notMatchDoubleSummary" + + // Category 2: invalid type and temporality combination + invalidIntSum = "invalidIntSum" + invalidDoubleSum = "invalidDoubleSum" + invalidIntHistogram = "invalidIntHistogram" + invalidDoubleHistogram = "invalidDoubleHistogram" + + // Category 3: nil data points + nilDataPointIntGauge = "nilDataPointIntGauge" + nilDataPointDoubleGauge = "nilDataPointDoubleGauge" + nilDataPointIntSum = "nilDataPointIntSum" + nilDataPointDoubleSum = "nilDataPointDoubleSum" + nilDataPointIntHistogram = "nilDataPointIntHistogram" + nilDataPointDoubleHistogram = "nilDataPointDoubleHistogram" + nilDataPointDoubleSummary = "nilDataPointDoubleSummary" + + // different metrics that will not pass validate metrics + invalidMetrics = map[string]*otlp.Metric{ + // nil + nilMetric: nil, + // Data = nil + empty: {}, + notMatchIntGauge: { + Name: notMatchIntGauge, + Data: &otlp.Metric_IntGauge{}, + }, + notMatchDoubleGauge: { + Name: notMatchDoubleGauge, + Data: &otlp.Metric_DoubleGauge{}, + }, + notMatchIntSum: { + Name: notMatchIntSum, + Data: &otlp.Metric_IntSum{}, + }, + notMatchDoubleSum: { + Name: notMatchDoubleSum, + Data: &otlp.Metric_DoubleSum{}, + }, + notMatchIntHistogram: { + Name: notMatchIntHistogram, + Data: &otlp.Metric_IntHistogram{}, + }, + notMatchDoubleHistogram: { + Name: notMatchDoubleHistogram, + Data: &otlp.Metric_DoubleHistogram{}, + }, + notMatchDoubleSummary: { + Name: notMatchDoubleSummary, + Data: &otlp.Metric_DoubleSummary{}, + }, + invalidIntSum: { + Name: invalidIntSum, + Data: &otlp.Metric_IntSum{ + IntSum: &otlp.IntSum{ + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + }, + }, + }, + invalidDoubleSum: { + Name: invalidDoubleSum, + Data: &otlp.Metric_DoubleSum{ + DoubleSum: &otlp.DoubleSum{ + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + }, + }, + }, + invalidIntHistogram: { + Name: invalidIntHistogram, + Data: &otlp.Metric_IntHistogram{ + IntHistogram: &otlp.IntHistogram{ + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + }, + }, + }, + invalidDoubleHistogram: { + Name: invalidDoubleHistogram, + Data: &otlp.Metric_DoubleHistogram{ + DoubleHistogram: &otlp.DoubleHistogram{ + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + }, + }, + }, + } + + // different metrics that will cause the exporter to return an error + errorMetrics = map[string]*otlp.Metric{ + + nilDataPointIntGauge: { + Name: nilDataPointIntGauge, + Data: &otlp.Metric_IntGauge{ + IntGauge: &otlp.IntGauge{DataPoints: nil}, + }, + }, + nilDataPointDoubleGauge: { + Name: nilDataPointDoubleGauge, + Data: &otlp.Metric_DoubleGauge{ + DoubleGauge: &otlp.DoubleGauge{DataPoints: nil}, + }, + }, + nilDataPointIntSum: { + Name: nilDataPointIntSum, + Data: &otlp.Metric_IntSum{ + IntSum: &otlp.IntSum{ + DataPoints: nil, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + nilDataPointDoubleSum: { + Name: nilDataPointDoubleSum, + Data: &otlp.Metric_DoubleSum{ + DoubleSum: &otlp.DoubleSum{ + DataPoints: nil, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + nilDataPointIntHistogram: { + Name: nilDataPointIntHistogram, + Data: &otlp.Metric_IntHistogram{ + IntHistogram: &otlp.IntHistogram{ + DataPoints: nil, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + nilDataPointDoubleHistogram: { + Name: nilDataPointDoubleHistogram, + Data: &otlp.Metric_DoubleHistogram{ + DoubleHistogram: &otlp.DoubleHistogram{ + DataPoints: nil, + AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }, + }, + }, + nilDataPointDoubleSummary: { + Name: nilDataPointDoubleSummary, + Data: &otlp.Metric_DoubleSummary{ + DoubleSummary: &otlp.DoubleSummary{ + DataPoints: nil, + }, + }, + }, + } +) + +// OTLP metrics +// labels must come in pairs +func getLabels(labels ...string) []commonpb.StringKeyValue { + var set []commonpb.StringKeyValue + for i := 0; i < len(labels); i += 2 { + set = append(set, commonpb.StringKeyValue{ + Key: labels[i], + Value: labels[i+1], + }) + } + return set +} + +func getIntDataPoint(labels []commonpb.StringKeyValue, value int64, ts uint64) *otlp.IntDataPoint { + return &otlp.IntDataPoint{ + Labels: labels, + StartTimeUnixNano: 0, + TimeUnixNano: ts, + Value: value, + } +} + +func getDoubleDataPoint(labels []commonpb.StringKeyValue, value float64, ts uint64) *otlp.DoubleDataPoint { + return &otlp.DoubleDataPoint{ + Labels: labels, + StartTimeUnixNano: 0, + TimeUnixNano: ts, + Value: value, + } +} + +func getIntHistogramDataPoint(labels []commonpb.StringKeyValue, ts uint64, sum float64, count uint64, bounds []float64, + buckets []uint64) *otlp.IntHistogramDataPoint { + return &otlp.IntHistogramDataPoint{ + Labels: labels, + StartTimeUnixNano: 0, + TimeUnixNano: ts, + Count: count, + Sum: int64(sum), + BucketCounts: buckets, + ExplicitBounds: bounds, + Exemplars: nil, + } +} + +func getDoubleHistogramDataPoint(labels []commonpb.StringKeyValue, ts uint64, sum float64, count uint64, + bounds []float64, buckets []uint64) *otlp.DoubleHistogramDataPoint { + return &otlp.DoubleHistogramDataPoint{ + Labels: labels, + TimeUnixNano: ts, + Count: count, + Sum: sum, + BucketCounts: buckets, + ExplicitBounds: bounds, + } +} + +func getDoubleSummaryDataPoint(labels []commonpb.StringKeyValue, ts uint64, sum float64, count uint64, + quantiles []*otlp.DoubleSummaryDataPoint_ValueAtQuantile) *otlp.DoubleSummaryDataPoint { + return &otlp.DoubleSummaryDataPoint{ + Labels: labels, + TimeUnixNano: ts, + Count: count, + Sum: sum, + QuantileValues: quantiles, + } +} + +// Prometheus TimeSeries +func getPromLabels(lbs ...string) []prompb.Label { + pbLbs := prompb.Labels{ + Labels: []prompb.Label{}, + } + for i := 0; i < len(lbs); i += 2 { + pbLbs.Labels = append(pbLbs.Labels, getLabel(lbs[i], lbs[i+1])) + } + return pbLbs.Labels +} + +func getLabel(name string, value string) prompb.Label { + return prompb.Label{ + Name: name, + Value: value, + } +} + +func getSample(v float64, t int64) prompb.Sample { + return prompb.Sample{ + Value: v, + Timestamp: t, + } +} + +func getTimeSeries(labels []prompb.Label, samples ...prompb.Sample) *prompb.TimeSeries { + return &prompb.TimeSeries{ + Labels: labels, + Samples: samples, + } +} + +func getQuantiles(bounds []float64, values []float64) []*otlp.DoubleSummaryDataPoint_ValueAtQuantile { + quantiles := make([]*otlp.DoubleSummaryDataPoint_ValueAtQuantile, len(bounds)) + for i := 0; i < len(bounds); i++ { + quantiles[i] = &otlp.DoubleSummaryDataPoint_ValueAtQuantile{ + Quantile: bounds[i], + Value: values[i], + } + } + return quantiles +} + +func getTimeseriesMap(timeseries []*prompb.TimeSeries) map[string]*prompb.TimeSeries { + tsMap := make(map[string]*prompb.TimeSeries) + for i, v := range timeseries { + tsMap[fmt.Sprintf("%s%d", "timeseries_name", i)] = v + } + return tsMap +} diff --git a/internal/otel_collector/exporter/zipkinexporter/README.md b/internal/otel_collector/exporter/zipkinexporter/README.md new file mode 100644 index 00000000000..2afa2a42b22 --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/README.md @@ -0,0 +1,51 @@ +# Zipkin Exporter + +Exports data to a [Zipkin](https://zipkin.io/) back-end. +By default, this exporter requires TLS and offers queued retry capabilities. + +Supported pipeline types: traces, metrics + +## Getting Started + +The following settings are required: + +- `endpoint` (no default): URL to which the exporter is going to send Zipkin trace data. +- `format` (default = `JSON`): The format to sent events in. Can be set to `JSON` or `proto`. + +By default, TLS is enabled: + +- `insecure` (default = `false`): whether to enable client transport security for + the exporter's connection. + +As a result, the following parameters are also required: + +- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` (no default): path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +The following settings are optional: + +- `defaultservicename` (default = ``): What to name + services missing this information. + +Example: + +```yaml +exporters: + zipkin: + endpoint: "http://some.url:9411/api/v2/spans" + cert_file: file.cert + key_file: file.key + zipkin/2: + endpoint: "http://some.url:9411/api/v2/spans" + insecure: true +``` + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/confighttp/README.md) +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/exporterhelper/README.md) diff --git a/internal/otel_collector/exporter/zipkinexporter/config.go b/internal/otel_collector/exporter/zipkinexporter/config.go new file mode 100644 index 00000000000..280c31a932f --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/config.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinexporter + +import ( + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration settings for the Zipkin exporter. +type Config struct { + configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + // Configures the exporter client. + // The Endpoint to send the Zipkin trace data to (e.g.: http://some.url:9411/api/v2/spans). + confighttp.HTTPClientSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + + Format string `mapstructure:"format"` + + DefaultServiceName string `mapstructure:"default_service_name"` +} diff --git a/internal/otel_collector/exporter/zipkinexporter/config_test.go b/internal/otel_collector/exporter/zipkinexporter/config_test.go new file mode 100644 index 00000000000..c242ac3366b --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/config_test.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinexporter + +import ( + "context" + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + e0 := cfg.Exporters["zipkin"] + + // URL doesn't have a default value so set it directly. + defaultCfg := factory.CreateDefaultConfig().(*Config) + defaultCfg.Endpoint = "http://some.location.org:9411/api/v2/spans" + assert.Equal(t, defaultCfg, e0) + assert.Equal(t, "json", e0.(*Config).Format) + + e1 := cfg.Exporters["zipkin/2"] + assert.Equal(t, &Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "zipkin/2", + TypeVal: "zipkin", + }, + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 10 * time.Second, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + }, + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "https://somedest:1234/api/v2/spans", + WriteBufferSize: 524288, + Timeout: 5 * time.Second, + }, + Format: "proto", + DefaultServiceName: "test_name", + }, e1) + params := component.ExporterCreateParams{Logger: zap.NewNop()} + _, err = factory.CreateTracesExporter(context.Background(), params, e1) + require.NoError(t, err) +} diff --git a/internal/otel_collector/exporter/zipkinexporter/factory.go b/internal/otel_collector/exporter/zipkinexporter/factory.go new file mode 100644 index 00000000000..daf5d1b5411 --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/factory.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinexporter + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "zipkin" + + defaultTimeout = time.Second * 5 + + defaultFormat = "json" + + defaultServiceName string = "" +) + +// NewFactory creates a factory for Zipkin exporter. +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTraceExporter)) +} + +func createDefaultConfig() configmodels.Exporter { + return &Config{ + ExporterSettings: configmodels.ExporterSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + RetrySettings: exporterhelper.DefaultRetrySettings(), + QueueSettings: exporterhelper.DefaultQueueSettings(), + HTTPClientSettings: confighttp.HTTPClientSettings{ + Timeout: defaultTimeout, + // We almost read 0 bytes, so no need to tune ReadBufferSize. + WriteBufferSize: 512 * 1024, + }, + Format: defaultFormat, + DefaultServiceName: defaultServiceName, + } +} + +func createTraceExporter( + _ context.Context, + params component.ExporterCreateParams, + cfg configmodels.Exporter, +) (component.TracesExporter, error) { + zc := cfg.(*Config) + + if zc.Endpoint == "" { + // TODO https://github.com/open-telemetry/opentelemetry-collector/issues/215 + return nil, errors.New("exporter config requires a non-empty 'endpoint'") + } + + ze, err := createZipkinExporter(zc) + if err != nil { + return nil, err + } + return exporterhelper.NewTraceExporter( + zc, + params.Logger, + ze.pushTraceData, + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithQueue(zc.QueueSettings), + exporterhelper.WithRetry(zc.RetrySettings)) +} diff --git a/internal/otel_collector/exporter/zipkinexporter/factory_test.go b/internal/otel_collector/exporter/zipkinexporter/factory_test.go new file mode 100644 index 00000000000..861287e333b --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/factory_test.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateInstanceViaFactory(t *testing.T) { + cfg := createDefaultConfig() + + // Default config doesn't have default endpoint so creating from it should + // fail. + ze, err := createTraceExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + assert.Error(t, err) + assert.Nil(t, ze) + + // URL doesn't have a default value so set it directly. + zeCfg := cfg.(*Config) + zeCfg.Endpoint = "http://some.location.org:9411/api/v2/spans" + ze, err = createTraceExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) + assert.NoError(t, err) + assert.NotNil(t, ze) +} diff --git a/internal/otel_collector/exporter/zipkinexporter/testdata/config.yaml b/internal/otel_collector/exporter/zipkinexporter/testdata/config.yaml new file mode 100644 index 00000000000..0a68945d00f --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/testdata/config.yaml @@ -0,0 +1,29 @@ +receivers: + examplereceiver: + +processors: + exampleprocessor: + +exporters: + zipkin: + endpoint: "http://some.location.org:9411/api/v2/spans" + zipkin/2: + endpoint: "https://somedest:1234/api/v2/spans" + format: proto + default_service_name: test_name + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [zipkin, zipkin/2] diff --git a/internal/otel_collector/exporter/zipkinexporter/testutils_test.go b/internal/otel_collector/exporter/zipkinexporter/testutils_test.go new file mode 100644 index 00000000000..baee904b9ee --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/testutils_test.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinexporter + +import ( + "encoding/json" + "testing" + + zipkinmodel "github.com/openzipkin/zipkin-go/model" + "github.com/stretchr/testify/require" +) + +func unmarshalZipkinSpanArrayToMap(t *testing.T, jsonStr string) map[zipkinmodel.ID]*zipkinmodel.SpanModel { + var i interface{} + + err := json.Unmarshal([]byte(jsonStr), &i) + require.NoError(t, err) + + results := make(map[zipkinmodel.ID]*zipkinmodel.SpanModel) + + switch x := i.(type) { + case []interface{}: + for _, j := range x { + span := jsonToSpan(t, j) + results[span.ID] = span + } + default: + span := jsonToSpan(t, x) + results[span.ID] = span + } + return results +} + +func jsonToSpan(t *testing.T, j interface{}) *zipkinmodel.SpanModel { + b, err := json.Marshal(j) + require.NoError(t, err) + span := &zipkinmodel.SpanModel{} + err = span.UnmarshalJSON(b) + require.NoError(t, err) + return span +} diff --git a/internal/otel_collector/exporter/zipkinexporter/zipkin.go b/internal/otel_collector/exporter/zipkinexporter/zipkin.go new file mode 100644 index 00000000000..8054a2244a7 --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/zipkin.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinexporter + +import ( + "bytes" + "context" + "fmt" + "net/http" + + "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" + zipkinreporter "github.com/openzipkin/zipkin-go/reporter" + + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/trace/zipkin" +) + +// zipkinExporter is a multiplexing exporter that spawns a new OpenCensus-Go Zipkin +// exporter per unique node encountered. This is because serviceNames per node define +// unique services, alongside their IPs. Also it is useful to receive traffic from +// Zipkin servers and then transform them back to the final form when creating an +// OpenCensus spandata. +type zipkinExporter struct { + defaultServiceName string + + url string + client *http.Client + serializer zipkinreporter.SpanSerializer +} + +func createZipkinExporter(cfg *Config) (*zipkinExporter, error) { + client, err := cfg.HTTPClientSettings.ToClient() + if err != nil { + return nil, err + } + + ze := &zipkinExporter{ + defaultServiceName: cfg.DefaultServiceName, + url: cfg.Endpoint, + client: client, + } + + switch cfg.Format { + case "json": + ze.serializer = zipkinreporter.JSONSerializer{} + case "proto": + ze.serializer = zipkin_proto3.SpanSerializer{} + default: + return nil, fmt.Errorf("%s is not one of json or proto", cfg.Format) + } + + return ze, nil +} + +func (ze *zipkinExporter) pushTraceData(ctx context.Context, td pdata.Traces) (int, error) { + tbatch, err := zipkin.InternalTracesToZipkinSpans(td) + if err != nil { + return td.SpanCount(), consumererror.Permanent(fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err)) + } + + body, err := ze.serializer.Serialize(tbatch) + if err != nil { + return td.SpanCount(), consumererror.Permanent(fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err)) + } + + req, err := http.NewRequestWithContext(ctx, "POST", ze.url, bytes.NewReader(body)) + if err != nil { + return td.SpanCount(), fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err) + } + req.Header.Set("Content-Type", ze.serializer.ContentType()) + + resp, err := ze.client.Do(req) + if err != nil { + return td.SpanCount(), fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err) + } + _ = resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return td.SpanCount(), fmt.Errorf("failed the request with status code %d", resp.StatusCode) + } + return 0, nil +} diff --git a/internal/otel_collector/exporter/zipkinexporter/zipkin_test.go b/internal/otel_collector/exporter/zipkinexporter/zipkin_test.go new file mode 100644 index 00000000000..f9d81c06793 --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/zipkin_test.go @@ -0,0 +1,359 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinexporter + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + zipkinmodel "github.com/openzipkin/zipkin-go/model" + "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" + zipkinreporter "github.com/openzipkin/zipkin-go/reporter" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/receiver/zipkinreceiver" + "go.opentelemetry.io/collector/testutil" +) + +// This function tests that Zipkin spans that are received then processed roundtrip +// back to almost the same JSON with differences: +// a) Go's net.IP.String intentional shortens 0s with "::" but also converts to hex values +// so +// "7::0.128.128.127" +// becomes +// "7::80:807f" +// +// The rest of the fields should match up exactly +func TestZipkinExporter_roundtripJSON(t *testing.T) { + buf := new(bytes.Buffer) + var sizes []int64 + cst := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s, _ := io.Copy(buf, r.Body) + sizes = append(sizes, s) + r.Body.Close() + })) + defer cst.Close() + + config := &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: cst.URL, + }, + Format: "json", + } + zexp, err := NewFactory().CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, config) + assert.NoError(t, err) + require.NotNil(t, zexp) + + // The test requires the spans from zipkinSpansJSONJavaLibrary to be sent in a single batch, use + // a mock to ensure that this happens as intended. + mzr := newMockZipkinReporter(cst.URL) + + // Run the Zipkin receiver to "receive spans upload from a client application" + addr := testutil.GetAvailableLocalAddress(t) + cfg := &zipkinreceiver.Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: "zipkin_receiver", + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: addr, + }, + } + zi, err := zipkinreceiver.New(cfg, zexp) + assert.NoError(t, err) + require.NotNil(t, zi) + + require.NoError(t, zi.Start(context.Background(), componenttest.NewNopHost())) + defer zi.Shutdown(context.Background()) + + // Let the receiver receive "uploaded Zipkin spans from a Java client application" + req, _ := http.NewRequest("POST", "https://tld.org/", strings.NewReader(zipkinSpansJSONJavaLibrary)) + responseWriter := httptest.NewRecorder() + zi.ServeHTTP(responseWriter, req) + + // Use the mock zipkin reporter to ensure all expected spans in a single batch. Since Flush waits for + // server response there is no need for further synchronization. + require.NoError(t, mzr.Flush()) + + // We expect back the exact JSON that was received + wants := []string{` + [{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385","parentId": "86154a4ba6e91385","id": "4d1e00c0db9010db", + "kind": "CLIENT","name": "get", + "timestamp": 1472470996199000,"duration": 207000, + "localEndpoint": {"serviceName": "frontend","ipv6": "7::80:807f"}, + "remoteEndpoint": {"serviceName": "backend","ipv4": "192.168.99.101","port": 9000}, + "annotations": [ + {"timestamp": 1472470996238000,"value": "foo"}, + {"timestamp": 1472470996403000,"value": "bar"} + ], + "tags": {"http.path": "/api","clnt/finagle.version": "6.45.0"} + }, + { + "traceId": "4d1e00c0db9010db86154a4ba6e91385","parentId": "86154a4ba6e91386","id": "4d1e00c0db9010dc", + "kind": "SERVER","name": "put", + "timestamp": 1472470996199000,"duration": 207000, + "localEndpoint": {"serviceName": "frontend","ipv6": "7::80:807f"}, + "remoteEndpoint": {"serviceName": "frontend", "ipv4": "192.168.99.101","port": 9000}, + "annotations": [ + {"timestamp": 1472470996238000,"value": "foo"}, + {"timestamp": 1472470996403000,"value": "bar"} + ], + "tags": {"http.path": "/api","clnt/finagle.version": "6.45.0"} + }, + { + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91386", + "id": "4d1e00c0db9010dd", + "kind": "SERVER", + "name": "put", + "timestamp": 1472470996199000, + "duration": 207000 + }] + `} + for i, s := range wants { + want := unmarshalZipkinSpanArrayToMap(t, s) + gotBytes := buf.Next(int(sizes[i])) + got := unmarshalZipkinSpanArrayToMap(t, string(gotBytes)) + for id, expected := range want { + actual, ok := got[id] + assert.True(t, ok) + assert.Equal(t, expected.ID, actual.ID) + assert.Equal(t, expected.Name, actual.Name) + assert.Equal(t, expected.TraceID, actual.TraceID) + assert.Equal(t, expected.Timestamp, actual.Timestamp) + assert.Equal(t, expected.Duration, actual.Duration) + assert.Equal(t, expected.Kind, actual.Kind) + } + } +} + +type mockZipkinReporter struct { + url string + client *http.Client + batch []*zipkinmodel.SpanModel + serializer zipkinreporter.SpanSerializer +} + +var _ zipkinreporter.Reporter = (*mockZipkinReporter)(nil) + +func (r *mockZipkinReporter) Send(span zipkinmodel.SpanModel) { + r.batch = append(r.batch, &span) +} +func (r *mockZipkinReporter) Close() error { + return nil +} + +func newMockZipkinReporter(url string) *mockZipkinReporter { + return &mockZipkinReporter{ + url: url, + client: &http.Client{}, + serializer: zipkinreporter.JSONSerializer{}, + } +} + +func (r *mockZipkinReporter) Flush() error { + sendBatch := r.batch + r.batch = nil + + if len(sendBatch) == 0 { + return nil + } + + body, err := r.serializer.Serialize(sendBatch) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", r.url, bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Content-Type", r.serializer.ContentType()) + + resp, err := r.client.Do(req) + if err != nil { + return err + } + _ = resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("http request failed with status code %d", resp.StatusCode) + } + + return nil +} + +const zipkinSpansJSONJavaLibrary = ` +[{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91386", + "id": "4d1e00c0db9010dc", + "kind": "SERVER", + "name": "put", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "frontend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91386", + "id": "4d1e00c0db9010dd", + "kind": "SERVER", + "name": "put", + "timestamp": 1472470996199000, + "duration": 207000 +}] +` + +func TestZipkinExporter_invalidFormat(t *testing.T) { + config := &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "1.2.3.4", + }, + Format: "foobar", + } + f := NewFactory() + params := component.ExporterCreateParams{Logger: zap.NewNop()} + _, err := f.CreateTracesExporter(context.Background(), params, config) + require.Error(t, err) +} + +// The rest of the fields should match up exactly +func TestZipkinExporter_roundtripProto(t *testing.T) { + buf := new(bytes.Buffer) + var contentType string + cst := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.Copy(buf, r.Body) + contentType = r.Header.Get("Content-Type") + r.Body.Close() + })) + defer cst.Close() + + config := &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: cst.URL, + }, + Format: "proto", + } + zexp, err := NewFactory().CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, config) + require.NoError(t, err) + + // The test requires the spans from zipkinSpansJSONJavaLibrary to be sent in a single batch, use + // a mock to ensure that this happens as intended. + mzr := newMockZipkinReporter(cst.URL) + + mzr.serializer = zipkin_proto3.SpanSerializer{} + + // Run the Zipkin receiver to "receive spans upload from a client application" + port := testutil.GetAvailablePort(t) + cfg := &zipkinreceiver.Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: "zipkin_receiver", + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: fmt.Sprintf(":%d", port), + }, + } + zi, err := zipkinreceiver.New(cfg, zexp) + require.NoError(t, err) + + err = zi.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + defer zi.Shutdown(context.Background()) + + // Let the receiver receive "uploaded Zipkin spans from a Java client application" + req, _ := http.NewRequest("POST", "https://tld.org/", strings.NewReader(zipkinSpansJSONJavaLibrary)) + responseWriter := httptest.NewRecorder() + zi.ServeHTTP(responseWriter, req) + + // Use the mock zipkin reporter to ensure all expected spans in a single batch. Since Flush waits for + // server response there is no need for further synchronization. + err = mzr.Flush() + require.NoError(t, err) + + require.Equal(t, zipkin_proto3.SpanSerializer{}.ContentType(), contentType) + // Finally we need to inspect the output + gotBytes, err := ioutil.ReadAll(buf) + require.NoError(t, err) + + _, err = zipkin_proto3.ParseSpans(gotBytes, false) + require.NoError(t, err) +} diff --git a/internal/otel_collector/extension/README.md b/internal/otel_collector/extension/README.md new file mode 100644 index 00000000000..8aaa8286049 --- /dev/null +++ b/internal/otel_collector/extension/README.md @@ -0,0 +1,107 @@ +# General Information + +Extensions provide capabilities on top of the primary functionality of the +collector. Generally, extensions are used for implementing components that can +be added to the Collector, but which do not require direct access to telemetry +data and are not part of the pipelines (like receivers, processors or +exporters). Example extensions are: Health Check extension that responds to +health check requests or PProf extension that allows fetching Collector's +performance profile. + +Supported service extensions (sorted alphabetically): + +- [Health Check](healthcheckextension/README.md) +- [Performance Profiler](pprofextension/README.md) +- [zPages](zpagesextension/README.md) + +The [contributors +repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) +may have more extensions that can be added to custom builds of the Collector. + +## Ordering Extensions + +The order extensions are specified for the service is important as this is the +order in which each extension will be started and the reverse order in which they +will be shutdown. The ordering is determined in the `extensions` tag under the +`service` tag in the configuration file, example: + +```yaml +service: + # Extensions specified below are going to be loaded by the service in the + # order given below, and shutdown on reverse order. + extensions: [health_check, pprof, zpages] +``` + +# Extensions + +## Health Check +Health Check extension enables an HTTP url that can be probed to check the +status of the the OpenTelemetry Collector. This extension can be used as a +liveness and/or readiness probe on Kubernetes. + +The following settings are required: + +- `port` (default = 13133): What port to expose HTTP health information. + +Example: + +```yaml +extensions: + health_check: +``` + +The full list of settings exposed for this exporter is documented [here](healthcheckextension/config.go) +with detailed sample configurations [here](healthcheckextension/testdata/config.yaml). + +## Performance Profiler + +Performance Profiler extension enables the golang `net/http/pprof` endpoint. +This is typically used by developers to collect performance profiles and +investigate issues with the service. + +The following settings are required: + +- `endpoint` (default = localhost:1777): The endpoint in which the pprof will +be listening to. +- `block_profile_fraction` (default = 0): Fraction of blocking events that +are profiled. A value <= 0 disables profiling. See +https://golang.org/pkg/runtime/#SetBlockProfileRate for details. +- `mutex_profile_fraction` (default = 0): Fraction of mutex contention +events that are profiled. A value <= 0 disables profiling. See +https://golang.org/pkg/runtime/#SetMutexProfileFraction for details. + +The following settings can be optionally configured: + +- `save_to_file`: File name to save the CPU profile to. The profiling starts when the +Collector starts and is saved to the file when the Collector is terminated. + +Example: + +```yaml +extensions: + pprof: +``` + +The full list of settings exposed for this exporter are documented [here](pprofextension/config.go) +with detailed sample configurations [here](pprofextension/testdata/config.yaml). + +## zPages + +Enables an extension that serves zPages, an HTTP endpoint that provides live +data for debugging different components that were properly instrumented for such. +All core exporters and receivers provide some zPages instrumentation. + +The following settings are required: + +- `endpoint` (default = localhost:55679): Specifies the HTTP endpoint that serves +zPages. + +Example: + +```yaml +extensions: + zpages: +``` + +The full list of settings exposed for this exporter are documented [here](zpagesextension/config.go) +with detailed sample configurations [here](zpagesextension/testdata/config.yaml). diff --git a/internal/otel_collector/extension/extensionhelper/factory.go b/internal/otel_collector/extension/extensionhelper/factory.go new file mode 100644 index 00000000000..125901ba598 --- /dev/null +++ b/internal/otel_collector/extension/extensionhelper/factory.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extensionhelper + +import ( + "context" + + "github.com/spf13/viper" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" +) + +// FactoryOption apply changes to ExporterOptions. +type FactoryOption func(o *factory) + +// CreateDefaultConfig is the equivalent of component.ExtensionFactory.CreateDefaultConfig() +type CreateDefaultConfig func() configmodels.Extension + +// CreateServiceExtension is the equivalent of component.ExtensionFactory.CreateExtension() +type CreateServiceExtension func(context.Context, component.ExtensionCreateParams, configmodels.Extension) (component.ServiceExtension, error) + +type factory struct { + cfgType configmodels.Type + customUnmarshaler component.CustomUnmarshaler + createDefaultConfig CreateDefaultConfig + createServiceExtension CreateServiceExtension +} + +// WithCustomUnmarshaler implements component.ConfigUnmarshaler. +func WithCustomUnmarshaler(customUnmarshaler component.CustomUnmarshaler) FactoryOption { + return func(o *factory) { + o.customUnmarshaler = customUnmarshaler + } +} + +// NewFactory returns a component.ExtensionFactory. +func NewFactory( + cfgType configmodels.Type, + createDefaultConfig CreateDefaultConfig, + createServiceExtension CreateServiceExtension, + options ...FactoryOption) component.ExtensionFactory { + f := &factory{ + cfgType: cfgType, + createDefaultConfig: createDefaultConfig, + createServiceExtension: createServiceExtension, + } + for _, opt := range options { + opt(f) + } + var ret component.ExtensionFactory + if f.customUnmarshaler != nil { + ret = &factoryWithUnmarshaler{f} + } else { + ret = f + } + return ret +} + +// Type gets the type of the Extension config created by this factory. +func (f *factory) Type() configmodels.Type { + return f.cfgType +} + +// CreateDefaultConfig creates the default configuration for processor. +func (f *factory) CreateDefaultConfig() configmodels.Extension { + return f.createDefaultConfig() +} + +// CreateExtension creates a component.TraceExtension based on this config. +func (f *factory) CreateExtension( + ctx context.Context, + params component.ExtensionCreateParams, + cfg configmodels.Extension) (component.ServiceExtension, error) { + return f.createServiceExtension(ctx, params, cfg) +} + +var _ component.ConfigUnmarshaler = (*factoryWithUnmarshaler)(nil) + +type factoryWithUnmarshaler struct { + *factory +} + +// Unmarshal un-marshals the config using the provided custom unmarshaler. +func (f *factoryWithUnmarshaler) Unmarshal(componentViperSection *viper.Viper, intoCfg interface{}) error { + return f.customUnmarshaler(componentViperSection, intoCfg) +} diff --git a/internal/otel_collector/extension/extensionhelper/factory_test.go b/internal/otel_collector/extension/extensionhelper/factory_test.go new file mode 100644 index 00000000000..b5d0f079ec1 --- /dev/null +++ b/internal/otel_collector/extension/extensionhelper/factory_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extensionhelper + +import ( + "context" + "errors" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" +) + +const typeStr = "test" + +var ( + defaultCfg = &configmodels.ExtensionSettings{ + TypeVal: typeStr, + NameVal: typeStr, + } + nopExtensionInstance = new(nopExtension) +) + +func TestNewFactory(t *testing.T) { + factory := NewFactory( + typeStr, + defaultConfig, + createExtension) + assert.EqualValues(t, typeStr, factory.Type()) + assert.EqualValues(t, defaultCfg, factory.CreateDefaultConfig()) + ext, err := factory.CreateExtension(context.Background(), component.ExtensionCreateParams{}, defaultCfg) + assert.NoError(t, err) + assert.Same(t, nopExtensionInstance, ext) +} + +func TestNewFactory_WithConstructors(t *testing.T) { + factory := NewFactory( + typeStr, + defaultConfig, + createExtension, + WithCustomUnmarshaler(customUnmarshaler)) + assert.EqualValues(t, typeStr, factory.Type()) + assert.EqualValues(t, defaultCfg, factory.CreateDefaultConfig()) + + fu, ok := factory.(component.ConfigUnmarshaler) + assert.True(t, ok) + assert.Equal(t, errors.New("my error"), fu.Unmarshal(nil, nil)) + + ext, err := factory.CreateExtension(context.Background(), component.ExtensionCreateParams{}, defaultCfg) + assert.NoError(t, err) + assert.Same(t, nopExtensionInstance, ext) +} + +func defaultConfig() configmodels.Extension { + return defaultCfg +} + +func createExtension(context.Context, component.ExtensionCreateParams, configmodels.Extension) (component.ServiceExtension, error) { + return nopExtensionInstance, nil +} + +func customUnmarshaler(*viper.Viper, interface{}) error { + return errors.New("my error") +} + +type nopExtension struct { +} + +func (ne *nopExtension) Start(context.Context, component.Host) error { + return nil +} + +// Shutdown stops the exporter and is invoked during shutdown. +func (ne *nopExtension) Shutdown(context.Context) error { + return nil +} diff --git a/internal/otel_collector/extension/fluentbitextension/README.md b/internal/otel_collector/extension/fluentbitextension/README.md new file mode 100644 index 00000000000..27e43d4ba7c --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/README.md @@ -0,0 +1,65 @@ +# FluentBit Subprocess Extension + +**This extension is experimental and may receive breaking changes or be removed +at any time.** + +The `fluentbit` extension facilitates running a FluentBit subprocess of the +collector. This is meant to be used in conjunction with the `fluentforward` +receiver such that the FluentBit subprocess will be configured to send to the +TCP socket opened by the `fluentforward` receiver. This extension does not +actually listen for the logs from FluentBit, it just starts a FluentBit +subprocess that will generally send to a `fluentforward` receiver, which must +be configured separately. + +You are responsible for providing a configuration to FluentBit via the `config` +config option. This will be provided to the subprocess, along with a few other +config options to enhance the integration with the collector. + +**As of now, this extension is only targeted for Linux environments. It does not +work on Windows or MacOS.** + + +## Example Config + +```yaml +extensions: + health_check: + fluentbit: + executable_path: /usr/src/fluent-bit/build/bin/fluent-bit + tcp_endpoint: 127.0.0.1:8006 + config: | + [SERVICE] + parsers_file /usr/src/fluent-bit/conf/parsers.conf + [INPUT] + name tail + path /var/log/mylog + parser apache +receivers: + fluentforward: + endpoint: 0.0.0.0:8006 + prometheus: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 1s + static_configs: + - targets: ['127.0.0.1:8888'] + # This will connect to the Fluent Bit subprocess's built-in HTTP + # monitoring server to grab Promtheus metrics. + - job_name: 'fluentbit' + scrape_interval: 1s + metrics_path: '/api/v1/metrics/prometheus' + static_configs: + - targets: ['127.0.0.1:2020'] +service: + pipelines: + logs: + receivers: [fluentforward] + processors: [] + exporters: [mylogsexporter] + metrics: + receivers: [prometheus] + processors: [batch] + exporters: [mymetricsexporter] + extensions: [health_check, zpages, fluentbit, pprof] +``` diff --git a/internal/otel_collector/extension/fluentbitextension/config.go b/internal/otel_collector/extension/fluentbitextension/config.go new file mode 100644 index 00000000000..5696c0f0553 --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/config.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentbitextension + +import ( + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config has the configuration for the fluentbit extension. +type Config struct { + configmodels.ExtensionSettings `mapstructure:",squash"` + + // The TCP `host:port` to which the subprocess should send log entries. + // This is required unless you are overridding `args` and providing the + // output configuration yourself either in `args` or `config`. + TCPEndpoint string `mapstructure:"tcp_endpoint"` + + // The path to the executable for FluentBit. Ideally should be an absolute + // path since the CWD of the collector is not guaranteed to be stable. + ExecutablePath string `mapstructure:"executable_path"` + + // Exec arguments to the FluentBit process. If you provide this, none of + // the standard args will be set, and only these provided args will be + // passed to FluentBit. The standard args will set the flush interval to 1 + // second, configure the forward output with the given `tcp_endpoint` + // option, enable the HTTP monitoring server in FluentBit, and set the + // config file to stdin. The only required arg is `--config=/dev/stdin`, + // since this extension passes the provided config to FluentBit via stdin. + // If you set args manually, you will be responsible for setting the + // forward output to the right port for the fluentforward receiver. See + // `process.go#constructArgs` of this extension source to see the current + // default args. + Args []string `mapstructure:"args"` + + // A configuration for FluentBit. This is the text content of the config + // itself, not a path to a config file. + Config string `mapstructure:"config"` +} diff --git a/internal/otel_collector/extension/fluentbitextension/config_test.go b/internal/otel_collector/extension/fluentbitextension/config_test.go new file mode 100644 index 00000000000..35d81d76766 --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/config_test.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentbitextension + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Extensions[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.Nil(t, err) + require.NotNil(t, cfg) + + ext0 := cfg.Extensions["fluentbit"] + assert.Equal(t, factory.CreateDefaultConfig(), ext0) + + ext1 := cfg.Extensions["fluentbit/1"] + assert.Equal(t, + &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: "fluentbit", + NameVal: "fluentbit/1", + }, + ExecutablePath: "/usr/local/bin/fluent-bit", + }, + ext1) + + assert.Equal(t, 1, len(cfg.Service.Extensions)) + assert.Equal(t, "fluentbit/1", cfg.Service.Extensions[0]) +} diff --git a/internal/otel_collector/extension/fluentbitextension/factory.go b/internal/otel_collector/extension/fluentbitextension/factory.go new file mode 100644 index 00000000000..e4a40adf147 --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/factory.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentbitextension + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/extension/extensionhelper" +) + +const ( + // The value of extension "type" in configuration. + typeStr = "fluentbit" +) + +// NewFactory creates a factory for FluentBit extension. +func NewFactory() component.ExtensionFactory { + return extensionhelper.NewFactory( + typeStr, + createDefaultConfig, + createExtension) +} + +func createDefaultConfig() configmodels.Extension { + return &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createExtension(_ context.Context, params component.ExtensionCreateParams, cfg configmodels.Extension) (component.ServiceExtension, error) { + config := cfg.(*Config) + return newProcessManager(config, params.Logger), nil +} diff --git a/internal/otel_collector/extension/fluentbitextension/factory_test.go b/internal/otel_collector/extension/fluentbitextension/factory_test.go new file mode 100644 index 00000000000..0b22c0e8743 --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/factory_test.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentbitextension + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configmodels" +) + +func TestFactory_CreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.Equal(t, &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + NameVal: typeStr, + TypeVal: typeStr, + }, + }, + cfg) + + assert.NoError(t, configcheck.ValidateConfig(cfg)) + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) +} + +func TestFactory_CreateExtension(t *testing.T) { + cfg := createDefaultConfig().(*Config) + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) +} diff --git a/internal/otel_collector/extension/fluentbitextension/process.go b/internal/otel_collector/extension/fluentbitextension/process.go new file mode 100644 index 00000000000..c16aa2bec08 --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/process.go @@ -0,0 +1,224 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentbitextension + +import ( + "bufio" + "context" + "io" + "os" + "os/exec" + "syscall" + "time" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +type processManager struct { + cancel context.CancelFunc + conf *Config + logger *zap.Logger + shutdownSignal chan struct{} +} + +func newProcessManager(conf *Config, logger *zap.Logger) *processManager { + return &processManager{ + conf: conf, + logger: logger, + shutdownSignal: make(chan struct{}), + } +} + +type procState string + +// A global var that is available only for testing +var restartDelay = 10 * time.Second + +const ( + Starting procState = "starting" + Running procState = "running" + ShuttingDown procState = "shutting-down" + Stopped procState = "stopped" + Restarting procState = "restarting" + Errored procState = "errored" +) + +func constructArgs(tcpEndpoint string) []string { + return []string{ + "--config=/dev/stdin", + "--http", + "--port=2020", + "--flush=1", + "-o", "forward://" + tcpEndpoint, + "--match=*", + } +} + +func (pm *processManager) Start(ctx context.Context, _ component.Host) error { + childCtx, cancel := context.WithCancel(ctx) + pm.cancel = cancel + + args := pm.conf.Args + if len(args) == 0 { + args = constructArgs(pm.conf.TCPEndpoint) + } + go func() { + run(childCtx, pm.conf.ExecutablePath, args, pm.conf.Config, pm.logger) + close(pm.shutdownSignal) + }() + return nil +} + +// Shutdown is invoked during service shutdown. +func (pm *processManager) Shutdown(context.Context) error { + pm.cancel() + t := time.NewTimer(5 * time.Second) + + // Wait for either the FluentBit process to terminate or the timeout + // period, whichever comes first. + select { + case <-pm.shutdownSignal: + case <-t.C: + } + + return nil +} + +func run(ctx context.Context, execPath string, args []string, config string, logger *zap.Logger) { + state := Starting + + var cmd *exec.Cmd + var err error + var stdin io.WriteCloser + var stdout io.ReadCloser + // procWait is guaranteed to be sent exactly one message per successful process start + procWait := make(chan error) + + // A state machine makes the management easier to understand and account + // for all of the edge cases when managing a subprocess. + for { + logger.Debug("Fluent extension changed state", zap.String("state", string(state))) + + switch state { + case Errored: + logger.Error("FluentBit process died", zap.Error(err)) + state = Restarting + + case Starting: + cmd, stdin, stdout = createCommand(execPath, args) + + logger.Debug("Starting fluent subprocess", zap.String("command", cmd.String())) + err = cmd.Start() + if err != nil { + state = Errored + continue + } + + go signalWhenProcessDone(cmd, procWait) + + state = Running + + case Running: + go collectOutput(stdout, logger) + + err = renderConfig(config, stdin) + stdin.Close() + if err != nil { + state = Errored + continue + } + + select { + case err = <-procWait: + if ctx.Err() == nil { + // We aren't supposed to shutdown yet so this is an error + // state. + state = Errored + continue + } + state = Stopped + case <-ctx.Done(): + state = ShuttingDown + } + + case ShuttingDown: + _ = cmd.Process.Signal(syscall.SIGTERM) + <-procWait + stdout.Close() + state = Stopped + + case Restarting: + _ = stdout.Close() + _ = stdin.Close() + + // Sleep for a bit so we don't have a hot loop on repeated failures. + time.Sleep(restartDelay) + state = Starting + + case Stopped: + return + } + } +} + +func signalWhenProcessDone(cmd *exec.Cmd, procWait chan<- error) { + err := cmd.Wait() + procWait <- err +} + +func renderConfig(config string, writer io.Writer) error { + if config == "" { + return nil + } + + _, err := writer.Write([]byte(config)) + return err +} + +func createCommand(execPath string, args []string) (*exec.Cmd, io.WriteCloser, io.ReadCloser) { + cmd := exec.Command(execPath, args...) + + inReader, inWriter, err := os.Pipe() + if err != nil { + panic("Input pipe could not be created for subprocess") + } + + cmd.Stdin = inReader + + outReader, outWriter, err := os.Pipe() + // If this errors things are really wrong with the system + if err != nil { + panic("Output pipe could not be created for subprocess") + } + cmd.Stdout = outWriter + cmd.Stderr = outWriter + + cmd.Env = os.Environ() + + applyOSSpecificCmdModifications(cmd) + + return cmd, inWriter, outReader +} + +func collectOutput(stdout io.Reader, logger *zap.Logger) { + scanner := bufio.NewScanner(stdout) + + for scanner.Scan() { + logger.Debug(scanner.Text()) + } + // Returns when stdout is closed when the process ends +} diff --git a/internal/otel_collector/extension/fluentbitextension/process_linux.go b/internal/otel_collector/extension/fluentbitextension/process_linux.go new file mode 100644 index 00000000000..f5e945a7dd6 --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/process_linux.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fluentbitextension + +import ( + "os/exec" + "syscall" +) + +func applyOSSpecificCmdModifications(cmd *exec.Cmd) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + // This is Linux-specific and will cause the subprocess to be killed by the OS if + // the collector dies + Pdeathsig: syscall.SIGTERM, + } +} diff --git a/internal/otel_collector/extension/fluentbitextension/process_linux_test.go b/internal/otel_collector/extension/fluentbitextension/process_linux_test.go new file mode 100644 index 00000000000..5862f85f7bc --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/process_linux_test.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentbitextension + +import ( + "context" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/shirou/gopsutil/process" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +const mockScript = `#!/bin/sh + +echo "Config:" 1>&2 +cat - + +sleep 100 + +` + +func setup(t *testing.T, conf *Config) (*processManager, **process.Process, func() bool, func()) { + logCore, logObserver := observer.New(zap.DebugLevel) + logger := zap.New(logCore) + + mockScriptFile, err := ioutil.TempFile("", "mocksubproc") + require.Nil(t, err) + + cleanup := func() { + spew.Dump(logObserver.All()) + os.Remove(mockScriptFile.Name()) + } + + _, err = mockScriptFile.Write([]byte(mockScript)) + require.Nil(t, err) + + err = mockScriptFile.Chmod(0700) + require.Nil(t, err) + + require.NoError(t, mockScriptFile.Close()) + + conf.ExecutablePath = mockScriptFile.Name() + pm := newProcessManager(conf, logger) + + var mockProc *process.Process + findSubproc := func() bool { + selfPid := os.Getpid() + procs, _ := process.Processes() + for _, proc := range procs { + if ppid, _ := proc.Ppid(); ppid == int32(selfPid) { + cmdline, _ := proc.Cmdline() + if strings.HasPrefix(cmdline, "/bin/sh "+mockScriptFile.Name()) { + mockProc = proc + return true + } + } + } + return false + } + + return pm, &mockProc, findSubproc, cleanup +} + +func TestProcessManager(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pm, mockProc, findSubproc, cleanup := setup(t, &Config{ + TCPEndpoint: "127.0.0.1:8000", + Config: "example config", + }) + defer cleanup() + + pm.Start(ctx, nil) + defer pm.Shutdown(ctx) + + require.Eventually(t, findSubproc, 12*time.Second, 100*time.Millisecond) + require.NotNil(t, *mockProc) + + cmdline, err := (*mockProc).Cmdline() + require.Nil(t, err) + require.Equal(t, + "/bin/sh "+pm.conf.ExecutablePath+ + " --config=/dev/stdin --http --port=2020 --flush=1 -o forward://127.0.0.1:8000 --match=*", + cmdline) + + oldProcPid := (*mockProc).Pid + err = (*mockProc).Kill() + require.NoError(t, err) + + // Should be restarted + require.Eventually(t, findSubproc, restartDelay+3*time.Second, 100*time.Millisecond) + require.NotNil(t, *mockProc) + + require.NotEqual(t, (*mockProc).Pid, oldProcPid) +} + +func TestProcessManagerArgs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pm, mockProc, findSubproc, cleanup := setup(t, &Config{ + TCPEndpoint: "127.0.0.1:8000", + Config: "example config", + Args: []string{"--http"}, + }) + defer cleanup() + + pm.Start(ctx, nil) + defer pm.Shutdown(ctx) + + require.Eventually(t, findSubproc, 12*time.Second, 100*time.Millisecond) + require.NotNil(t, *mockProc) + + cmdline, err := (*mockProc).Cmdline() + require.Nil(t, err) + require.Equal(t, + "/bin/sh "+pm.conf.ExecutablePath+ + " --http", + cmdline) +} + +func TestProcessManagerBadExec(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logCore, logObserver := observer.New(zap.DebugLevel) + logger := zap.New(logCore) + + pm := newProcessManager(&Config{ + ExecutablePath: "/does/not/exist", + TCPEndpoint: "127.0.0.1:8000", + Config: "example config", + }, logger) + + pm.Start(ctx, nil) + defer pm.Shutdown(ctx) + + time.Sleep(restartDelay + 2*time.Second) + require.Len(t, logObserver.FilterMessage("FluentBit process died").All(), 2) +} + +func TestProcessManagerEmptyConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pm, mockProc, findSubproc, cleanup := setup(t, &Config{ + TCPEndpoint: "127.0.0.1:8000", + Config: "", + }) + defer cleanup() + + pm.Start(ctx, nil) + defer pm.Shutdown(ctx) + + require.Eventually(t, findSubproc, 15*time.Second, 100*time.Millisecond) + require.NotNil(t, *mockProc) +} diff --git a/internal/otel_collector/extension/fluentbitextension/process_others.go b/internal/otel_collector/extension/fluentbitextension/process_others.go new file mode 100644 index 00000000000..f6edff2faba --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/process_others.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux + +package fluentbitextension + +import ( + "os/exec" +) + +func applyOSSpecificCmdModifications(_ *exec.Cmd) {} diff --git a/internal/otel_collector/extension/fluentbitextension/testdata/config.yaml b/internal/otel_collector/extension/fluentbitextension/testdata/config.yaml new file mode 100644 index 00000000000..a7ff30fbe48 --- /dev/null +++ b/internal/otel_collector/extension/fluentbitextension/testdata/config.yaml @@ -0,0 +1,20 @@ +extensions: + fluentbit: + fluentbit/1: + executable_path: /usr/local/bin/fluent-bit + +service: + extensions: [fluentbit/1] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + +# Data pipeline is required to load the config. +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: diff --git a/internal/otel_collector/extension/healthcheckextension/README.md b/internal/otel_collector/extension/healthcheckextension/README.md new file mode 100644 index 00000000000..a57ab881dae --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/README.md @@ -0,0 +1,19 @@ +# Health Check + +Health Check extension enables an HTTP url that can be probed to check the +status of the the OpenTelemetry Collector. This extension can be used as a +liveness and/or readiness probe on Kubernetes. + +The following settings are required: + +- `port` (default = 13133): What port to expose HTTP health information. + +Example: + +```yaml +extensions: + health_check: +``` + +The full list of settings exposed for this exporter is documented [here](./config.go) +with detailed sample configurations [here](./testdata/config.yaml). diff --git a/internal/otel_collector/extension/healthcheckextension/config.go b/internal/otel_collector/extension/healthcheckextension/config.go new file mode 100644 index 00000000000..9369260a01b --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/config.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheckextension + +import ( + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config has the configuration for the extension enabling the health check +// extension, used to report the health status of the service. +type Config struct { + configmodels.ExtensionSettings `mapstructure:",squash"` + + // Port is the port used to publish the health check status. + // The default value is 13133. + Port uint16 `mapstructure:"port"` +} diff --git a/internal/otel_collector/extension/healthcheckextension/config_test.go b/internal/otel_collector/extension/healthcheckextension/config_test.go new file mode 100644 index 00000000000..76c3c2c4272 --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/config_test.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheckextension + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Extensions[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.Nil(t, err) + require.NotNil(t, cfg) + + ext0 := cfg.Extensions["health_check"] + assert.Equal(t, factory.CreateDefaultConfig(), ext0) + + ext1 := cfg.Extensions["health_check/1"] + assert.Equal(t, + &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: "health_check", + NameVal: "health_check/1", + }, + Port: 13, + }, + ext1) + + assert.Equal(t, 1, len(cfg.Service.Extensions)) + assert.Equal(t, "health_check/1", cfg.Service.Extensions[0]) +} diff --git a/internal/otel_collector/extension/healthcheckextension/doc.go b/internal/otel_collector/extension/healthcheckextension/doc.go new file mode 100644 index 00000000000..21f44479696 --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package healthcheckextension implements an extension that enables an HTTP +// endpoint that can be used to check the overall health and status of the +// service. +package healthcheckextension diff --git a/internal/otel_collector/extension/healthcheckextension/factory.go b/internal/otel_collector/extension/healthcheckextension/factory.go new file mode 100644 index 00000000000..6d29f92d338 --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/factory.go @@ -0,0 +1,73 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheckextension + +import ( + "context" + "errors" + "sync/atomic" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/extension/extensionhelper" +) + +const ( + // The value of extension "type" in configuration. + typeStr = "health_check" +) + +// NewFactory creates a factory for HealthCheck extension. +func NewFactory() component.ExtensionFactory { + return extensionhelper.NewFactory( + typeStr, + createDefaultConfig, + createExtension) +} + +func createDefaultConfig() configmodels.Extension { + return &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Port: 13133, + } +} + +func createExtension(_ context.Context, params component.ExtensionCreateParams, cfg configmodels.Extension) (component.ServiceExtension, error) { + config := cfg.(*Config) + + // The runtime settings are global to the application, so while in principle it + // is possible to have more than one instance, running multiple does not bring + // any value to the service. + // In order to avoid this issue we will allow the creation of a single + // instance once per process while keeping the private function that allow + // the creation of multiple instances for unit tests. Summary: only a single + // instance can be created via the factory. + if !atomic.CompareAndSwapInt32(&instanceState, instanceNotCreated, instanceCreated) { + return nil, errors.New("only a single health check extension instance can be created per process") + } + + return newServer(*config, params.Logger), nil +} + +// See comment in createExtension how these are used. +var instanceState int32 + +const ( + instanceNotCreated int32 = 0 + instanceCreated int32 = 1 +) diff --git a/internal/otel_collector/extension/healthcheckextension/factory_test.go b/internal/otel_collector/extension/healthcheckextension/factory_test.go new file mode 100644 index 00000000000..a23b433fb5e --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/factory_test.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheckextension + +import ( + "context" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/testutil" +) + +func TestFactory_CreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.Equal(t, &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + NameVal: typeStr, + TypeVal: typeStr, + }, + Port: 13133, + }, + cfg) + + assert.NoError(t, configcheck.ValidateConfig(cfg)) + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} + +func TestFactory_CreateExtension(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Port = testutil.GetAvailablePort(t) + + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} + +func TestFactory_CreateExtensionOnlyOnce(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Port = testutil.GetAvailablePort(t) + + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + ext1, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.Error(t, err) + require.Nil(t, ext1) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} diff --git a/internal/otel_collector/extension/healthcheckextension/healthcheckextension.go b/internal/otel_collector/extension/healthcheckextension/healthcheckextension.go new file mode 100644 index 00000000000..29e568f4301 --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/healthcheckextension.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheckextension + +import ( + "context" + "net" + "net/http" + "strconv" + + "github.com/jaegertracing/jaeger/pkg/healthcheck" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +type healthCheckExtension struct { + config Config + logger *zap.Logger + state *healthcheck.HealthCheck + server http.Server +} + +var _ component.PipelineWatcher = (*healthCheckExtension)(nil) + +func (hc *healthCheckExtension) Start(_ context.Context, host component.Host) error { + + hc.logger.Info("Starting health_check extension", zap.Any("config", hc.config)) + + // Initialize listener + portStr := ":" + strconv.Itoa(int(hc.config.Port)) + ln, err := net.Listen("tcp", portStr) + if err != nil { + host.ReportFatalError(err) + return nil + } + + // Mount HC handler + hc.server.Handler = hc.state.Handler() + + go func() { + // The listener ownership goes to the server. + if err := hc.server.Serve(ln); err != http.ErrServerClosed && err != nil { + host.ReportFatalError(err) + } + }() + + return nil +} + +func (hc *healthCheckExtension) Shutdown(context.Context) error { + return hc.server.Close() +} + +func (hc *healthCheckExtension) Ready() error { + hc.state.Set(healthcheck.Ready) + return nil +} + +func (hc *healthCheckExtension) NotReady() error { + hc.state.Set(healthcheck.Unavailable) + return nil +} + +func newServer(config Config, logger *zap.Logger) *healthCheckExtension { + hc := &healthCheckExtension{ + config: config, + logger: logger, + state: healthcheck.New(), + server: http.Server{}, + } + + hc.state.SetLogger(logger) + + return hc +} diff --git a/internal/otel_collector/extension/healthcheckextension/healthcheckextension_test.go b/internal/otel_collector/extension/healthcheckextension/healthcheckextension_test.go new file mode 100644 index 00000000000..603756e1f3b --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/healthcheckextension_test.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheckextension + +import ( + "context" + "net" + "net/http" + "runtime" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/testutil" +) + +func TestHealthCheckExtensionUsage(t *testing.T) { + config := Config{ + Port: testutil.GetAvailablePort(t), + } + + hcExt := newServer(config, zap.NewNop()) + require.NotNil(t, hcExt) + + require.NoError(t, hcExt.Start(context.Background(), componenttest.NewNopHost())) + defer hcExt.Shutdown(context.Background()) + + // Give a chance for the server goroutine to run. + runtime.Gosched() + + client := &http.Client{} + url := "http://localhost:" + strconv.Itoa(int(config.Port)) + resp0, err := client.Get(url) + require.NoError(t, err) + defer resp0.Body.Close() + + require.Equal(t, http.StatusServiceUnavailable, resp0.StatusCode) + + hcExt.Ready() + resp1, err := client.Get(url) + require.NoError(t, err) + defer resp1.Body.Close() + require.Equal(t, http.StatusOK, resp1.StatusCode) + + hcExt.NotReady() + resp2, err := client.Get(url) + require.NoError(t, err) + defer resp2.Body.Close() + require.Equal(t, http.StatusServiceUnavailable, resp2.StatusCode) +} + +func TestHealthCheckExtensionPortAlreadyInUse(t *testing.T) { + endpoint := testutil.GetAvailableLocalAddress(t) + _, portStr, err := net.SplitHostPort(endpoint) + require.NoError(t, err) + + // This needs to be ":port" because health checks also tries to connect to ":port". + // To avoid the pop-up "accept incoming network connections" health check should be changed + // to accept an address. + ln, err := net.Listen("tcp", ":"+portStr) + require.NoError(t, err) + defer ln.Close() + + port, err := strconv.Atoi(portStr) + require.NoError(t, err) + + config := Config{ + Port: uint16(port), + } + hcExt := newServer(config, zap.NewNop()) + require.NotNil(t, hcExt) + + // Health check will report port already in use in a goroutine, use the error waiting + // host to get it. + mh := componenttest.NewErrorWaitingHost() + require.NoError(t, hcExt.Start(context.Background(), mh)) + + receivedError, receivedErr := mh.WaitForFatalError(500 * time.Millisecond) + require.True(t, receivedError) + require.Error(t, receivedErr) +} + +func TestHealthCheckMultipleStarts(t *testing.T) { + config := Config{ + Port: testutil.GetAvailablePort(t), + } + + hcExt := newServer(config, zap.NewNop()) + require.NotNil(t, hcExt) + + mh := componenttest.NewErrorWaitingHost() + require.NoError(t, hcExt.Start(context.Background(), mh)) + defer hcExt.Shutdown(context.Background()) + + // Health check will report already in use in a goroutine, use the error waiting + // host to get it. + require.NoError(t, hcExt.Start(context.Background(), mh)) + + receivedError, receivedErr := mh.WaitForFatalError(500 * time.Millisecond) + require.True(t, receivedError) + require.Error(t, receivedErr) +} + +func TestHealthCheckMultipleShutdowns(t *testing.T) { + config := Config{ + Port: testutil.GetAvailablePort(t), + } + + hcExt := newServer(config, zap.NewNop()) + require.NotNil(t, hcExt) + + require.NoError(t, hcExt.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, hcExt.Shutdown(context.Background())) + require.NoError(t, hcExt.Shutdown(context.Background())) +} + +func TestHealthCheckShutdownWithoutStart(t *testing.T) { + config := Config{ + Port: testutil.GetAvailablePort(t), + } + + hcExt := newServer(config, zap.NewNop()) + require.NotNil(t, hcExt) + + require.NoError(t, hcExt.Shutdown(context.Background())) +} diff --git a/internal/otel_collector/extension/healthcheckextension/testdata/config.yaml b/internal/otel_collector/extension/healthcheckextension/testdata/config.yaml new file mode 100644 index 00000000000..79b7d9758c1 --- /dev/null +++ b/internal/otel_collector/extension/healthcheckextension/testdata/config.yaml @@ -0,0 +1,20 @@ +extensions: + health_check: + health_check/1: + port: 13 + +service: + extensions: [health_check/1] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + +# Data pipeline is required to load the config. +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: diff --git a/internal/otel_collector/extension/pprofextension/README.md b/internal/otel_collector/extension/pprofextension/README.md new file mode 100644 index 00000000000..d69bcf1bc76 --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/README.md @@ -0,0 +1,32 @@ +# Performance Profiler + +Performance Profiler extension enables the golang `net/http/pprof` endpoint. +This is typically used by developers to collect performance profiles and +investigate issues with the service. + +The following settings are required: + +- `endpoint` (default = localhost:1777): The endpoint in which the pprof will +be listening to. Use localhost: to make it available only locally, or +":" to make it available on all network interfaces. +- `block_profile_fraction` (default = 0): Fraction of blocking events that +are profiled. A value <= 0 disables profiling. See +https://golang.org/pkg/runtime/#SetBlockProfileRate for details. +- `mutex_profile_fraction` (default = 0): Fraction of mutex contention +events that are profiled. A value <= 0 disables profiling. See +https://golang.org/pkg/runtime/#SetMutexProfileFraction for details. + +The following settings can be optionally configured: + +- `save_to_file`: File name to save the CPU profile to. The profiling starts when the +Collector starts and is saved to the file when the Collector is terminated. + +Example: +```yaml + +extensions: + pprof: +``` + +The full list of settings exposed for this exporter are documented [here](./config.go) +with detailed sample configurations [here](./testdata/config.yaml). diff --git a/internal/otel_collector/extension/pprofextension/config.go b/internal/otel_collector/extension/pprofextension/config.go new file mode 100644 index 00000000000..0eea6de88c1 --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/config.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pprofextension + +import ( + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config has the configuration for the extension enabling the golang +// net/http/pprof (Performance Profiler) extension. +type Config struct { + configmodels.ExtensionSettings `mapstructure:",squash"` + + // Endpoint is the address and port in which the pprof will be listening to. + // Use localhost: to make it available only locally, or ":" to + // make it available on all network interfaces. + Endpoint string `mapstructure:"endpoint"` + + // Fraction of blocking events that are profiled. A value <= 0 disables + // profiling. See https://golang.org/pkg/runtime/#SetBlockProfileRate for details. + BlockProfileFraction int `mapstructure:"block_profile_fraction"` + + // Fraction of mutex contention events that are profiled. A value <= 0 + // disables profiling. See https://golang.org/pkg/runtime/#SetMutexProfileFraction + // for details. + MutexProfileFraction int `mapstructure:"mutex_profile_fraction"` + + // Optional file name to save the CPU profile to. The profiling starts when the + // Collector starts and is saved to the file when the Collector is terminated. + SaveToFile string `mapstructure:"save_to_file"` +} diff --git a/internal/otel_collector/extension/pprofextension/config_test.go b/internal/otel_collector/extension/pprofextension/config_test.go new file mode 100644 index 00000000000..a0fe6df4a59 --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/config_test.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pprofextension + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Extensions[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.Nil(t, err) + require.NotNil(t, cfg) + + ext0 := cfg.Extensions["pprof"] + assert.Equal(t, factory.CreateDefaultConfig(), ext0) + + ext1 := cfg.Extensions["pprof/1"] + assert.Equal(t, + &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: "pprof", + NameVal: "pprof/1", + }, + Endpoint: "0.0.0.0:1777", + BlockProfileFraction: 3, + MutexProfileFraction: 5, + }, + ext1) + + assert.Equal(t, 1, len(cfg.Service.Extensions)) + assert.Equal(t, "pprof/1", cfg.Service.Extensions[0]) +} diff --git a/internal/otel_collector/extension/pprofextension/doc.go b/internal/otel_collector/extension/pprofextension/doc.go new file mode 100644 index 00000000000..a16e8b7624e --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pprofextension implements an extension that exposes the golang +// net/http/pprof (Performance Profiler) in a HTTP endpoint. +package pprofextension diff --git a/internal/otel_collector/extension/pprofextension/factory.go b/internal/otel_collector/extension/pprofextension/factory.go new file mode 100644 index 00000000000..c6166dca0be --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/factory.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pprofextension + +import ( + "context" + "errors" + "sync/atomic" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/extension/extensionhelper" +) + +const ( + // The value of extension "type" in configuration. + typeStr = "pprof" +) + +// NewFactory creates a factory for pprof extension. +func NewFactory() component.ExtensionFactory { + return extensionhelper.NewFactory( + typeStr, + createDefaultConfig, + createExtension) +} + +func createDefaultConfig() configmodels.Extension { + return &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Endpoint: "localhost:1777", + } +} + +func createExtension(_ context.Context, params component.ExtensionCreateParams, cfg configmodels.Extension) (component.ServiceExtension, error) { + config := cfg.(*Config) + if config.Endpoint == "" { + return nil, errors.New("\"endpoint\" is required when using the \"pprof\" extension") + } + + // The runtime settings are global to the application, so while in principle it + // is possible to have more than one instance, running multiple will mean that + // the settings of the last started instance will prevail. In order to avoid + // this issue we will allow the creation of a single instance once per process + // while keeping the private function that allow the creation of multiple + // instances for unit tests. Summary: only a single instance can be created + // via the factory. + // TODO: Move this as an option to extensionhelper. + if !atomic.CompareAndSwapInt32(&instanceState, instanceNotCreated, instanceCreated) { + return nil, errors.New("only a single pprof extension instance can be created per process") + } + + return newServer(*config, params.Logger), nil +} + +// See comment in createExtension how these are used. +var instanceState int32 + +const ( + instanceNotCreated int32 = 0 + instanceCreated int32 = 1 +) diff --git a/internal/otel_collector/extension/pprofextension/factory_test.go b/internal/otel_collector/extension/pprofextension/factory_test.go new file mode 100644 index 00000000000..5ed29bf8ca8 --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/factory_test.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pprofextension + +import ( + "context" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/testutil" +) + +func TestFactory_CreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.Equal(t, &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + NameVal: typeStr, + TypeVal: typeStr, + }, + Endpoint: "localhost:1777", + }, + cfg) + + assert.NoError(t, configcheck.ValidateConfig(cfg)) + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} + +func TestFactory_CreateExtension(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = testutil.GetAvailableLocalAddress(t) + + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} + +func TestFactory_CreateExtensionOnlyOnce(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = testutil.GetAvailableLocalAddress(t) + + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + ext1, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.Error(t, err) + require.Nil(t, ext1) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} diff --git a/internal/otel_collector/extension/pprofextension/pprofextension.go b/internal/otel_collector/extension/pprofextension/pprofextension.go new file mode 100644 index 00000000000..bb9db577c76 --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/pprofextension.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pprofextension + +import ( + "context" + "net" + "net/http" + _ "net/http/pprof" // #nosec Needed to enable the performance profiler + "os" + "runtime" + "runtime/pprof" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +type pprofExtension struct { + config Config + logger *zap.Logger + server http.Server +} + +func (p *pprofExtension) Start(_ context.Context, host component.Host) error { + // Start the listener here so we can have earlier failure if port is + // already in use. + ln, err := net.Listen("tcp", p.config.Endpoint) + if err != nil { + return err + } + + runtime.SetBlockProfileRate(p.config.BlockProfileFraction) + runtime.SetMutexProfileFraction(p.config.MutexProfileFraction) + + p.logger.Info("Starting net/http/pprof server", zap.Any("config", p.config)) + go func() { + // The listener ownership goes to the server. + if err := p.server.Serve(ln); err != nil && err != http.ErrServerClosed { + host.ReportFatalError(err) + } + }() + + if p.config.SaveToFile != "" { + f, err := os.Create(p.config.SaveToFile) + if err != nil { + return err + } + return pprof.StartCPUProfile(f) + } + + return nil +} + +func (p *pprofExtension) Shutdown(context.Context) error { + if p.config.SaveToFile != "" { + pprof.StopCPUProfile() + } + return p.server.Close() +} + +func newServer(config Config, logger *zap.Logger) *pprofExtension { + return &pprofExtension{ + config: config, + logger: logger, + } +} diff --git a/internal/otel_collector/extension/pprofextension/pprofextension_test.go b/internal/otel_collector/extension/pprofextension/pprofextension_test.go new file mode 100644 index 00000000000..460fe6983e4 --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/pprofextension_test.go @@ -0,0 +1,110 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pprofextension + +import ( + "context" + "net" + "net/http" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/testutil" +) + +func TestPerformanceProfilerExtensionUsage(t *testing.T) { + config := Config{ + Endpoint: testutil.GetAvailableLocalAddress(t), + BlockProfileFraction: 3, + MutexProfileFraction: 5, + } + + pprofExt := newServer(config, zap.NewNop()) + require.NotNil(t, pprofExt) + + require.NoError(t, pprofExt.Start(context.Background(), componenttest.NewNopHost())) + defer pprofExt.Shutdown(context.Background()) + + // Give a chance for the server goroutine to run. + runtime.Gosched() + + _, pprofPort, err := net.SplitHostPort(config.Endpoint) + require.NoError(t, err) + + client := &http.Client{} + resp, err := client.Get("http://localhost:" + pprofPort + "/debug/pprof") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) +} + +func TestPerformanceProfilerExtensionPortAlreadyInUse(t *testing.T) { + endpoint := testutil.GetAvailableLocalAddress(t) + ln, err := net.Listen("tcp", endpoint) + require.NoError(t, err) + defer ln.Close() + + config := Config{ + Endpoint: endpoint, + } + pprofExt := newServer(config, zap.NewNop()) + require.NotNil(t, pprofExt) + + require.Error(t, pprofExt.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestPerformanceProfilerMultipleStarts(t *testing.T) { + config := Config{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + pprofExt := newServer(config, zap.NewNop()) + require.NotNil(t, pprofExt) + + require.NoError(t, pprofExt.Start(context.Background(), componenttest.NewNopHost())) + defer pprofExt.Shutdown(context.Background()) + + // Try to start it again, it will fail since it is on the same endpoint. + require.Error(t, pprofExt.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestPerformanceProfilerMultipleShutdowns(t *testing.T) { + config := Config{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + pprofExt := newServer(config, zap.NewNop()) + require.NotNil(t, pprofExt) + + require.NoError(t, pprofExt.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, pprofExt.Shutdown(context.Background())) + require.NoError(t, pprofExt.Shutdown(context.Background())) +} + +func TestPerformanceProfilerShutdownWithoutStart(t *testing.T) { + config := Config{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + pprofExt := newServer(config, zap.NewNop()) + require.NotNil(t, pprofExt) + + require.NoError(t, pprofExt.Shutdown(context.Background())) +} diff --git a/internal/otel_collector/extension/pprofextension/testdata/config.yaml b/internal/otel_collector/extension/pprofextension/testdata/config.yaml new file mode 100644 index 00000000000..9b332d8fa6c --- /dev/null +++ b/internal/otel_collector/extension/pprofextension/testdata/config.yaml @@ -0,0 +1,22 @@ +extensions: + pprof: + pprof/1: + endpoint: "0.0.0.0:1777" + block_profile_fraction: 3 + mutex_profile_fraction: 5 + +service: + extensions: [pprof/1] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + +# Data pipeline is required to load the config. +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: diff --git a/internal/otel_collector/extension/zpagesextension/README.md b/internal/otel_collector/extension/zpagesextension/README.md new file mode 100644 index 00000000000..338e1d6f838 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/README.md @@ -0,0 +1,20 @@ +# zPages + +Enables an extension that serves zPages, an HTTP endpoint that provides live +data for debugging different components that were properly instrumented for such. +All core exporters and receivers provide some zPage instrumentation. + +The following settings are required: + +- `endpoint` (default = localhost:55679): Specifies the HTTP endpoint that serves +zPages. Use localhost: to make it available only locally, or ":" to +make it available on all network interfaces. + +Example: +```yaml +extensions: + zpages: +``` + +The full list of settings exposed for this exporter are documented [here](./config.go) +with detailed sample configurations [here](./testdata/config.yaml). diff --git a/internal/otel_collector/extension/zpagesextension/config.go b/internal/otel_collector/extension/zpagesextension/config.go new file mode 100644 index 00000000000..2449ca139d2 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/config.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config has the configuration for the extension enabling the zPages extension. +type Config struct { + configmodels.ExtensionSettings `mapstructure:",squash"` + + // Endpoint is the address and port in which the zPages will be listening to. + // Use localhost: to make it available only locally, or ":" to + // make it available on all network interfaces. + Endpoint string `mapstructure:"endpoint"` +} diff --git a/internal/otel_collector/extension/zpagesextension/config_test.go b/internal/otel_collector/extension/zpagesextension/config_test.go new file mode 100644 index 00000000000..ada938baca1 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/config_test.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Extensions[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.Nil(t, err) + require.NotNil(t, cfg) + + ext0 := cfg.Extensions["zpages"] + assert.Equal(t, factory.CreateDefaultConfig(), ext0) + + ext1 := cfg.Extensions["zpages/1"] + assert.Equal(t, + &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: "zpages", + NameVal: "zpages/1", + }, + Endpoint: "localhost:56888", + }, + ext1) + + assert.Equal(t, 1, len(cfg.Service.Extensions)) + assert.Equal(t, "zpages/1", cfg.Service.Extensions[0]) +} diff --git a/internal/otel_collector/extension/zpagesextension/doc.go b/internal/otel_collector/extension/zpagesextension/doc.go new file mode 100644 index 00000000000..7b312a5e9d2 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zpagesextension implements an extension that exposes zPages of +// properly instrumented components. +package zpagesextension diff --git a/internal/otel_collector/extension/zpagesextension/factory.go b/internal/otel_collector/extension/zpagesextension/factory.go new file mode 100644 index 00000000000..035a23f1e62 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/factory.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "context" + "errors" + "sync/atomic" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/extension/extensionhelper" +) + +const ( + // The value of extension "type" in configuration. + typeStr = "zpages" +) + +// NewFactory creates a factory for Z-Pages extension. +func NewFactory() component.ExtensionFactory { + return extensionhelper.NewFactory( + typeStr, + createDefaultConfig, + createExtension) +} + +func createDefaultConfig() configmodels.Extension { + return &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Endpoint: "localhost:55679", + } +} + +// createExtension creates the extension based on this config. +func createExtension(_ context.Context, params component.ExtensionCreateParams, cfg configmodels.Extension) (component.ServiceExtension, error) { + config := cfg.(*Config) + if config.Endpoint == "" { + return nil, errors.New("\"endpoint\" is required when using the \"zpages\" extension") + } + + // The runtime settings are global to the application, so while in principle it + // is possible to have more than one instance, running multiple does not bring + // any value to the service. + // In order to avoid this issue we will allow the creation of a single + // instance once per process while keeping the private function that allow + // the creation of multiple instances for unit tests. Summary: only a single + // instance can be created via the factory. + if !atomic.CompareAndSwapInt32(&instanceState, instanceNotCreated, instanceCreated) { + return nil, errors.New("only a single zpages extension instance can be created per process") + } + + return newServer(*config, params.Logger), nil +} + +// See comment in CreateExtension how these are used. +var instanceState int32 + +const ( + instanceNotCreated int32 = 0 + instanceCreated int32 = 1 +) diff --git a/internal/otel_collector/extension/zpagesextension/factory_test.go b/internal/otel_collector/extension/zpagesextension/factory_test.go new file mode 100644 index 00000000000..ff5425a2c6a --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/factory_test.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "context" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/testutil" +) + +func TestFactory_CreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.Equal(t, &Config{ + ExtensionSettings: configmodels.ExtensionSettings{ + NameVal: typeStr, + TypeVal: typeStr, + }, + Endpoint: "localhost:55679", + }, + cfg) + + assert.NoError(t, configcheck.ValidateConfig(cfg)) + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} + +func TestFactory_CreateExtension(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = testutil.GetAvailableLocalAddress(t) + + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} + +func TestFactory_CreateExtensionOnlyOnce(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = testutil.GetAvailableLocalAddress(t) + + ext, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.NoError(t, err) + require.NotNil(t, ext) + + ext1, err := createExtension(context.Background(), component.ExtensionCreateParams{Logger: zap.NewNop()}, cfg) + require.Error(t, err) + require.Nil(t, ext1) + + // Restore instance tracking from factory, for other tests. + atomic.StoreInt32(&instanceState, instanceNotCreated) +} diff --git a/internal/otel_collector/extension/zpagesextension/testdata/config.yaml b/internal/otel_collector/extension/zpagesextension/testdata/config.yaml new file mode 100644 index 00000000000..6819540571b --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/testdata/config.yaml @@ -0,0 +1,20 @@ +extensions: + zpages: + zpages/1: + endpoint: "localhost:56888" + +service: + extensions: [zpages/1] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + +# Data pipeline is required to load the config. +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: diff --git a/internal/otel_collector/extension/zpagesextension/zpagesextension.go b/internal/otel_collector/extension/zpagesextension/zpagesextension.go new file mode 100644 index 00000000000..98eeb50ac99 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/zpagesextension.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "context" + "net" + "net/http" + + "go.opencensus.io/zpages" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +type zpagesExtension struct { + config Config + logger *zap.Logger + server http.Server +} + +func (zpe *zpagesExtension) Start(_ context.Context, host component.Host) error { + zPagesMux := http.NewServeMux() + zpages.Handle(zPagesMux, "/debug") + + hostZPages, ok := host.(interface { + RegisterZPages(mux *http.ServeMux, pathPrefix string) + }) + if ok { + zpe.logger.Info("Register Host's zPages") + hostZPages.RegisterZPages(zPagesMux, "/debug") + } else { + zpe.logger.Info("Host's zPages not available") + } + + // Start the listener here so we can have earlier failure if port is + // already in use. + ln, err := net.Listen("tcp", zpe.config.Endpoint) + if err != nil { + return err + } + + zpe.logger.Info("Starting zPages extension", zap.Any("config", zpe.config)) + zpe.server = http.Server{Handler: zPagesMux} + go func() { + if err := zpe.server.Serve(ln); err != nil && err != http.ErrServerClosed { + host.ReportFatalError(err) + } + }() + + return nil +} + +func (zpe *zpagesExtension) Shutdown(context.Context) error { + return zpe.server.Close() +} + +func newServer(config Config, logger *zap.Logger) *zpagesExtension { + return &zpagesExtension{ + config: config, + logger: logger, + } +} diff --git a/internal/otel_collector/extension/zpagesextension/zpagesextension_test.go b/internal/otel_collector/extension/zpagesextension/zpagesextension_test.go new file mode 100644 index 00000000000..8ebb575357c --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/zpagesextension_test.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "context" + "net" + "net/http" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/testutil" +) + +func TestZPagesExtensionUsage(t *testing.T) { + config := Config{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + zpagesExt := newServer(config, zap.NewNop()) + require.NotNil(t, zpagesExt) + + require.NoError(t, zpagesExt.Start(context.Background(), componenttest.NewNopHost())) + defer zpagesExt.Shutdown(context.Background()) + + // Give a chance for the server goroutine to run. + runtime.Gosched() + + _, zpagesPort, err := net.SplitHostPort(config.Endpoint) + require.NoError(t, err) + + client := &http.Client{} + resp, err := client.Get("http://localhost:" + zpagesPort + "/debug/tracez") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) +} + +func TestZPagesExtensionPortAlreadyInUse(t *testing.T) { + endpoint := testutil.GetAvailableLocalAddress(t) + ln, err := net.Listen("tcp", endpoint) + require.NoError(t, err) + defer ln.Close() + + config := Config{ + Endpoint: endpoint, + } + zpagesExt := newServer(config, zap.NewNop()) + require.NotNil(t, zpagesExt) + + require.Error(t, zpagesExt.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestZPagesMultipleStarts(t *testing.T) { + config := Config{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + zpagesExt := newServer(config, zap.NewNop()) + require.NotNil(t, zpagesExt) + + require.NoError(t, zpagesExt.Start(context.Background(), componenttest.NewNopHost())) + defer zpagesExt.Shutdown(context.Background()) + + // Try to start it again, it will fail since it is on the same endpoint. + require.Error(t, zpagesExt.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestZPagesMultipleShutdowns(t *testing.T) { + config := Config{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + zpagesExt := newServer(config, zap.NewNop()) + require.NotNil(t, zpagesExt) + + require.NoError(t, zpagesExt.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, zpagesExt.Shutdown(context.Background())) + require.NoError(t, zpagesExt.Shutdown(context.Background())) +} + +func TestZPagesShutdownWithoutStart(t *testing.T) { + config := Config{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + zpagesExt := newServer(config, zap.NewNop()) + require.NotNil(t, zpagesExt) + + require.NoError(t, zpagesExt.Shutdown(context.Background())) +} diff --git a/internal/otel_collector/go.mod b/internal/otel_collector/go.mod new file mode 100644 index 00000000000..626c6c2c898 --- /dev/null +++ b/internal/otel_collector/go.mod @@ -0,0 +1,64 @@ +module go.opentelemetry.io/collector + +go 1.14 + +require ( + contrib.go.opencensus.io/exporter/prometheus v0.2.0 + github.com/OneOfOne/xxhash v1.2.5 // indirect + github.com/Shopify/sarama v1.27.2 + github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect + github.com/antonmedv/expr v1.8.9 + github.com/apache/thrift v0.13.0 + github.com/cenkalti/backoff v2.2.1+incompatible + github.com/census-instrumentation/opencensus-proto v0.3.0 + github.com/coreos/go-oidc v2.2.1+incompatible + github.com/davecgh/go-spew v1.1.1 + github.com/go-kit/kit v0.10.0 + github.com/go-ole/go-ole v1.2.4 // indirect + github.com/gogo/googleapis v1.3.0 // indirect + github.com/gogo/protobuf v1.3.1 + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e + github.com/golang/protobuf v1.4.3 + github.com/golang/snappy v0.0.2 + github.com/google/go-cmp v0.5.4 + github.com/google/uuid v1.1.2 + github.com/gorilla/mux v1.8.0 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/jaegertracing/jaeger v1.21.0 + github.com/leoluk/perflib_exporter v0.1.0 + github.com/mattn/go-colorable v0.1.7 // indirect + github.com/mitchellh/mapstructure v1.3.2 // indirect + github.com/onsi/ginkgo v1.14.1 // indirect + github.com/onsi/gomega v1.10.2 // indirect + github.com/openzipkin/zipkin-go v0.2.5 + github.com/orijtech/prometheus-go-metrics-exporter v0.0.6 + github.com/pelletier/go-toml v1.8.0 // indirect + github.com/pquerna/cachecontrol v0.0.0-20200819021114-67c6ae64274f // indirect + github.com/prometheus/client_golang v1.8.0 + github.com/prometheus/common v0.15.0 + github.com/prometheus/prometheus v1.8.2-0.20201105135750-00f16d1ac3a4 + github.com/rs/cors v1.7.0 + github.com/shirou/gopsutil v3.20.11+incompatible + github.com/soheilhy/cmux v0.1.4 + github.com/spf13/cast v1.3.1 + github.com/spf13/cobra v1.1.1 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.7.1 + github.com/stretchr/testify v1.6.1 + github.com/tinylib/msgp v1.1.5 + github.com/uber/jaeger-lib v2.4.0+incompatible + go.opencensus.io v0.22.5 + go.uber.org/atomic v1.7.0 + go.uber.org/zap v1.16.0 + golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 + golang.org/x/text v0.3.4 + google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d + google.golang.org/grpc v1.34.0 + google.golang.org/grpc/examples v0.0.0-20200728065043-dfc0c05b2da9 // indirect + google.golang.org/protobuf v1.25.0 + gopkg.in/ini.v1 v1.57.0 // indirect + gopkg.in/square/go-jose.v2 v2.5.1 // indirect + gopkg.in/yaml.v2 v2.4.0 + honnef.co/go/tools v0.0.1-2020.1.6 // indirect +) diff --git a/internal/otel_collector/go.sum b/internal/otel_collector/go.sum new file mode 100644 index 00000000000..52d75b5b662 --- /dev/null +++ b/internal/otel_collector/go.sum @@ -0,0 +1,1480 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/prometheus v0.2.0 h1:9PUk0/8V0LGoPqVCrf8fQZJkFGBxudu8jOjQSMwoD6w= +contrib.go.opencensus.io/exporter/prometheus v0.2.0/go.mod h1:TYmVAyE8Tn1lyPcltF5IYYfWp2KHu7lQGIZnj8iZMys= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-sdk-for-go v46.4.0+incompatible h1:fCN6Pi+tEiEwFa8RSmtVlFHRXEZ+DJm9gfx/MKqYWw4= +github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.10 h1:j5sGbX7uj1ieYYkQ3Mpvewd4DCsEQ+ZeJpqnSM9pjnM= +github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= +github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.22.2-0.20190604114437-cd910a683f9f/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc= +github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.8.9 h1:O9stiHmHHww9b4ozhPx7T6BK7fXfOCHJ8ybxf0833zw= +github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.35.5 h1:doSEOxC0UkirPcle20Rc+1kAhJ4Ip+GSEeZ3nKl7Qlk= +github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.46.0 h1:WRbwjATilgz2NE4NGMeSDpeicy9h4xSKNGuRJ/Nq/fA= +github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible h1:+mzU0jHyjWpYHiD0StRlsVXkCvecWS2hc55M3OlUJSk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= +github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= +github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.3.0 h1:M695OaDJ5ipWvDPcoAg/YL9c3uORAegkEfBqTQF/fTQ= +github.com/gogo/googleapis v1.3.0/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gophercloud/gophercloud v0.13.0 h1:1XkslZZRm6Ks0bLup+hBNth+KQf+0JA1UeoB7YKw9E8= +github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BMKEyvcZ5+IM0AwDrnlkEc= +github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.7.0 h1:tGs8Oep67r8CcA2Ycmb/8BLBcJ70St44mF2X10a/qPg= +github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.6.0 h1:FfhMEkwvQl57CildXJyGHnwGGM4HMODGyfjGwNM1Vdw= +github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.0 h1:1X+ga+2ki9aUJJaliI2isvjNLI8rNCGHFkZ1FaVpvCA= +github.com/hashicorp/go-hclog v0.14.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM= +github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hetznercloud/hcloud-go v1.22.0 h1:CC0jwkaBzwP4ObFE0sdJBTvGh5DE9kB/tuDETnRfOik= +github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/jaegertracing/jaeger v1.21.0 h1:Fgre3vTI5E/cmkXKBXK7ksnzul5b/3gXjA3mQzt0+58= +github.com/jaegertracing/jaeger v1.21.0/go.mod h1:PCTGGFohQBPQMR4j333V5lt6If7tj8aWJ+pQNgvZ+wU= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leoluk/perflib_exporter v0.1.0 h1:fXe/mDaf9jR+Zk8FjFlcCSksACuIj2VNN4GyKHmQqtA= +github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olivere/elastic v6.2.27+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/orijtech/prometheus-go-metrics-exporter v0.0.6 h1:ExkpQsyDDcyp0U3zhoNUQaCQ/o0Ovq7e1jRCL9lQ/4o= +github.com/orijtech/prometheus-go-metrics-exporter v0.0.6/go.mod h1:BiTx/ugZex8LheBk3j53tktWaRdFjV5FCfT2o0P7msE= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= +github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20200819021114-67c6ae64274f h1:JDEmUDtyiLMyMlFwiaDOv2hxUp35497fkwePcLeV7j4= +github.com/pquerna/cachecontrol v0.0.0-20200819021114-67c6ae64274f/go.mod h1:hoLfEwdY11HjRfKFH6KqnPsfxlo3BP6bJehpDv8t6sQ= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw= +github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/prometheus v1.8.2-0.20201105135750-00f16d1ac3a4 h1:54z99l8Q3TuyyeoNZkyY4Lq7eFht9J2Mynq4T1Hxbzc= +github.com/prometheus/prometheus v1.8.2-0.20201105135750-00f16d1ac3a4/go.mod h1:XYjkJiog7fyQu3puQNivZPI2pNq1C/775EIoHfDvuvY= +github.com/prometheus/statsd_exporter v0.15.0 h1:UiwC1L5HkxEPeapXdm2Ye0u1vUJfTj7uwT5yydYpa1E= +github.com/prometheus/statsd_exporter v0.15.0/go.mod h1:Dv8HnkoLQkeEjkIE4/2ndAA7WL1zHKK7WMqFQqu72rw= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v3.20.11+incompatible h1:LJr4ZQK4mPpIV5gOa4jCOKOGb4ty4DZO54I4FGqIpto= +github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= +github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.3.0/go.mod h1:9CWT6lKIep8U41DDaPiH6eFscnTyjfTANNQNx6LrIcA= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181112210238-4b1f3b6b1646/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200203023011-6f24f261dadb/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200603131246-cc40288be839/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201008025239-9df69603baec/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc/examples v0.0.0-20200728065043-dfc0c05b2da9 h1:f+/+gfZ/tfaHBXXiv1gWRmCej6wlX3mLY4bnLpI99wk= +google.golang.org/grpc/examples v0.0.0-20200728065043-dfc0c05b2da9/go.mod h1:5j1uub0jRGhRiSghIlrThmBUgcgLXOVJQ/l1getT4uo= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.6 h1:W18jzjh8mfPez+AwGLxmOImucz/IFjpNlrKVnaj2YVc= +honnef.co/go/tools v0.0.1-2020.1.6/go.mod h1:pyyisuGw24ruLjrr1ddx39WE0y9OooInRzEYLhQB2YY= +k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= +k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco= +k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/internal/otel_collector/internal/buildscripts/gen-certs.sh b/internal/otel_collector/internal/buildscripts/gen-certs.sh new file mode 100644 index 00000000000..e5266367bf2 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/gen-certs.sh @@ -0,0 +1,125 @@ +#!/usr/bin/env bash + +# This script is used to create the CA, server and client's certificates and keys required by unit tests. +# These certificates use the Subject Alternative Name extension rather than the Common Name, which will be unsupported from Go 1.15. + +usage() { + echo "Usage: $0 [-d]" + echo + echo "-d Dry-run mode. No project files will not be modified. Default: 'false'" + echo "-m Domain name to use in the certificate. Default: 'localhost'" + echo "-o Output directory where certificates will be written to. Default: '.'; the current directory" + exit 1 +} + +dry_run=false +domain="localhost" +output_dir="." + +while getopts "dm:o:" o; do + case "${o}" in + d) + dry_run=true + ;; + m) + domain=$OPTARG + ;; + o) + output_dir=$OPTARG + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +set -ex + +# Create temp dir for generated files. +tmp_dir=$(mktemp -d -t certificates) +clean_up() { + ARG=$? + if [ $dry_run = true ]; then + echo "Dry-run complete. Generated files can be found in $tmp_dir" + else + rm -rf "$tmp_dir" + fi + exit $ARG +} +trap clean_up EXIT + +gen_ssl_conf() { + domain_name=$1 + output_file=$2 + + cat << EOF > "$output_file" +[ req ] +prompt = no +default_bits = 2048 +distinguished_name = req_distinguished_name +req_extensions = req_ext + +[ req_distinguished_name ] +countryName = AU +stateOrProvinceName = Australia +localityName = Sydney +organizationName = MyOrgName +commonName = MyCommonName + +[ req_ext ] +subjectAltName = @alt_names + +[alt_names] +DNS.1 = $domain_name +EOF +} + +# Generate config files. +gen_ssl_conf "$domain" "$tmp_dir/ssl.conf" + +# Create CA (accept defaults from prompts). +openssl genrsa -out "$tmp_dir/ca.key" 2048 +openssl req -new -key "$tmp_dir/ca.key" -x509 -days 3650 -out "$tmp_dir/ca.crt" -config "$tmp_dir/ssl.conf" + +# Create client and server keys. +openssl genrsa -out "$tmp_dir/server.key" 2048 +openssl genrsa -out "$tmp_dir/client.key" 2048 + +# Create certificate sign request using the above created keys. +openssl req -new -nodes -key "$tmp_dir/server.key" -out "$tmp_dir/server.csr" -config "$tmp_dir/ssl.conf" +openssl req -new -nodes -key "$tmp_dir/client.key" -out "$tmp_dir/client.csr" -config "$tmp_dir/ssl.conf" + +# Creating the client and server certificates. +openssl x509 -req \ + -sha256 \ + -days 3650 \ + -in "$tmp_dir/server.csr" \ + -signkey "$tmp_dir/server.key" \ + -out "$tmp_dir/server.crt" \ + -extensions req_ext \ + -CA "$tmp_dir/ca.crt" \ + -CAkey "$tmp_dir/ca.key" \ + -CAcreateserial \ + -extfile "$tmp_dir/ssl.conf" +openssl x509 -req \ + -sha256 \ + -days 3650 \ + -in "$tmp_dir/client.csr" \ + -signkey "$tmp_dir/client.key" \ + -out "$tmp_dir/client.crt" \ + -extensions req_ext \ + -CA "$tmp_dir/ca.crt" \ + -CAkey "$tmp_dir/ca.key" \ + -CAcreateserial \ + -extfile "$tmp_dir/ssl.conf" + +# Copy files if not in dry-run mode. +if [ $dry_run = false ]; then + cp "$tmp_dir/ca.crt" \ + "$tmp_dir/client.crt" \ + "$tmp_dir/client.key" \ + "$tmp_dir/server.crt" \ + "$tmp_dir/server.key" \ + "$output_dir" +fi diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/Dockerfile b/internal/otel_collector/internal/buildscripts/packaging/fpm/Dockerfile new file mode 100644 index 00000000000..ec3ff295ff0 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/Dockerfile @@ -0,0 +1,16 @@ +FROM debian:9 + +RUN apt-get update && \ + apt-get install -y ruby ruby-dev rubygems build-essential git rpm + +RUN gem install --no-document fpm -v 1.11.0 + +VOLUME /repo +WORKDIR /repo + +ENV PACKAGE="deb" +ENV VERSION="" +ENV ARCH="amd64" +ENV OUTPUT_DIR="/repo/dist/" + +CMD ./internal/buildscripts/packaging/fpm/$PACKAGE/build.sh "$VERSION" "$ARCH" "$OUTPUT_DIR" \ No newline at end of file diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/common.sh b/internal/otel_collector/internal/buildscripts/packaging/fpm/common.sh new file mode 100644 index 00000000000..c87e6f454f5 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/common.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FPM_DIR="$( cd "$( dirname ${BASH_SOURCE[0]} )" && pwd )" + +PKG_NAME="otel-collector" +PKG_VENDOR="OpenTelemetry Community" +PKG_MAINTAINER="OpenTelemetry Community " +PKG_DESCRIPTION="OpenTelemetry Collector" +PKG_LICENSE="Apache 2.0" +PKG_URL="https://github.com/open-telemetry/opentelemetry-collector" +PKG_USER="otel" +PKG_GROUP="otel" + +SERVICE_NAME="otel-collector" +PROCESS_NAME="otelcol" + +SERVICE_PATH="$FPM_DIR/$SERVICE_NAME.service" +PREINSTALL_PATH="$FPM_DIR/preinstall.sh" +POSTINSTALL_PATH="$FPM_DIR/postinstall.sh" +PREUNINSTALL_PATH="$FPM_DIR/preuninstall.sh" + +install_pkg() { + local pkg_path="$1" + local pkg_base=$( basename "$pkg_path" ) + + echo "Installing $pkg_base ..." + docker cp "$pkg_path" $image_name:/tmp/$pkg_base + if [[ "${pkg_base##*.}" = "deb" ]]; then + $docker_exec dpkg -i /tmp/$pkg_base + else + $docker_exec rpm -ivh /tmp/$pkg_base + fi +} + +uninstall_pkg() { + local pkg_type="$1" + local pkg_name="${2:-"$PKG_NAME"}" + + echo "Uninstalling $pkg_name ..." + if [[ "$pkg_type" = "deb" ]]; then + $docker_exec dpkg -r $pkg_name + else + $docker_exec rpm -e $pkg_name + fi +} diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/deb/README.md b/internal/otel_collector/internal/buildscripts/packaging/fpm/deb/README.md new file mode 100644 index 00000000000..a23aa433e55 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/deb/README.md @@ -0,0 +1,13 @@ +# Build otel-collector deb package + +Build the otel-collector deb package with [fpm](https://github.com/jordansissel/fpm). + +To build the deb package, run `make deb-package` from the repo root directory. The deb package will be written to +`dist/otel-collector__.deb`. + +By default, `` is `amd64` and `` is the latest git tag with `-post` appended, e.g. `1.2.3-post`. +To override these defaults, set the `ARCH` and `VERSION` environment variables, e.g. +`ARCH=arm64 VERSION=4.5.6 make deb-package`. + +Run `./internal/buildscripts/packaging/fpm/test.sh PATH_TO_DEB_FILE` to run a basic installation test with the built +package. \ No newline at end of file diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/deb/build.sh b/internal/otel_collector/internal/buildscripts/packaging/fpm/deb/build.sh new file mode 100644 index 00000000000..db62ffb363b --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/deb/build.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +SCRIPT_DIR="$( cd "$( dirname ${BASH_SOURCE[0]} )" && pwd )" +REPO_DIR="$( cd "$SCRIPT_DIR/../../../../../" && pwd )" +VERSION="${1:-}" +ARCH="${2:-"amd64"}" +OUTPUT_DIR="${3:-"$REPO_DIR/dist/"}" +OTELCOL_PATH="$REPO_DIR/bin/otelcol_linux_$ARCH" +CONFIG_PATH="$REPO_DIR/examples/local/otel-config.yaml" + +mkdir -p $OUTPUT_DIR + +. $SCRIPT_DIR/../common.sh + +if [[ -z "$VERSION" ]]; then + latest_tag="$( git describe --abbrev=0 --match v[0-9]* )" + VERSION="${latest_tag}-post" +fi + +fpm -s dir -t deb -n $PKG_NAME -v ${VERSION#v} -f -p "$OUTPUT_DIR" \ + --vendor "$PKG_VENDOR" \ + --maintainer "$PKG_MAINTAINER" \ + --description "$PKG_DESCRIPTION" \ + --license "$PKG_LICENSE" \ + --url "$PKG_URL" \ + --architecture "$ARCH" \ + --config-files /etc/otel-collector/config.yaml \ + --deb-dist "stable" \ + --deb-user "$PKG_USER" \ + --deb-group "$PKG_GROUP" \ + --before-install "$PREINSTALL_PATH" \ + --after-install "$POSTINSTALL_PATH" \ + --pre-uninstall "$PREUNINSTALL_PATH" \ + $OTELCOL_PATH=/usr/bin/$PROCESS_NAME \ + $SERVICE_PATH=/lib/systemd/system/$SERVICE_NAME.service \ + $CONFIG_PATH=/etc/otel-collector/config.yaml diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/otel-collector.service b/internal/otel_collector/internal/buildscripts/packaging/fpm/otel-collector.service new file mode 100644 index 00000000000..faca2be7ce1 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/otel-collector.service @@ -0,0 +1,14 @@ +[Unit] +Description=OpenTelemety Collector +After=network.target + +[Service] +ExecStart=/usr/bin/otelcol --config /etc/otel-collector/config.yaml +KillMode=mixed +Restart=on-failure +Type=simple +User=otel +Group=otel + +[Install] +WantedBy=multi-user.target diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/postinstall.sh b/internal/otel_collector/internal/buildscripts/packaging/fpm/postinstall.sh new file mode 100644 index 00000000000..a7b39f0c5fd --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/postinstall.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if command -v systemctl >/dev/null 2>&1; then + systemctl enable otel-collector.service + if [ -f /etc/otel-collector/config.yaml ]; then + systemctl start otel-collector.service + fi +fi diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/preinstall.sh b/internal/otel_collector/internal/buildscripts/packaging/fpm/preinstall.sh new file mode 100644 index 00000000000..e90d69fa961 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/preinstall.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +getent passwd otel >/dev/null || useradd --system --user-group --no-create-home --shell /sbin/nologin otel diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/preuninstall.sh b/internal/otel_collector/internal/buildscripts/packaging/fpm/preuninstall.sh new file mode 100644 index 00000000000..b0a1bdded02 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/preuninstall.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if command -v systemctl >/dev/null 2>&1; then + systemctl stop otel-collector.service + systemctl disable otel-collector.service +fi diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/rpm/README.md b/internal/otel_collector/internal/buildscripts/packaging/fpm/rpm/README.md new file mode 100644 index 00000000000..a7cb37530e9 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/rpm/README.md @@ -0,0 +1,13 @@ +# Build otel-collector rpm package + +Build the otel-collector rpm package with [fpm](https://github.com/jordansissel/fpm). + +To build the rpm package, run `make rpm-package` from the repo root directory. The rpm package will be written to +`dist/otel-collector-..rpm`. + +By default, `` is `amd64` and `` is the latest git tag with `~post` appended, e.g. `1.2.3~post`. +To override these defaults, set the `ARCH` and `VERSION` environment variables, e.g. +`ARCH=arm64 VERSION=4.5.6 make rpm-package`. + +Run `./internal/buildscripts/packaging/fpm/test.sh PATH_TO_RPM_FILE` to run a basic installation test with the built +package. \ No newline at end of file diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/rpm/build.sh b/internal/otel_collector/internal/buildscripts/packaging/fpm/rpm/build.sh new file mode 100644 index 00000000000..b07339df02d --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/rpm/build.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +SCRIPT_DIR="$( cd "$( dirname ${BASH_SOURCE[0]} )" && pwd )" +REPO_DIR="$( cd "$SCRIPT_DIR/../../../../../" && pwd )" +VERSION="${1:-}" +ARCH="${2:-"amd64"}" +OUTPUT_DIR="${3:-"$REPO_DIR/dist/"}" +OTELCOL_PATH="$REPO_DIR/bin/otelcol_linux_$ARCH" +CONFIG_PATH="$REPO_DIR/examples/local/otel-config.yaml" + +mkdir -p $OUTPUT_DIR + +. $SCRIPT_DIR/../common.sh + +if [[ -z "$VERSION" ]]; then + latest_tag="$( git describe --abbrev=0 --match v[0-9]* )" + VERSION="${latest_tag}~post" +fi + +fpm -s dir -t rpm -n $PKG_NAME -v ${VERSION#v} -f -p "$OUTPUT_DIR" \ + --vendor "$PKG_VENDOR" \ + --maintainer "$PKG_MAINTAINER" \ + --description "$PKG_DESCRIPTION" \ + --license "$PKG_LICENSE" \ + --url "$PKG_URL" \ + --architecture "$ARCH" \ + --config-files /etc/otel-collector/config.yaml \ + --rpm-summary "$PKG_DESCRIPTION" \ + --rpm-user "$PKG_USER" \ + --rpm-group "$PKG_GROUP" \ + --before-install "$PREINSTALL_PATH" \ + --after-install "$POSTINSTALL_PATH" \ + --pre-uninstall "$PREUNINSTALL_PATH" \ + $OTELCOL_PATH=/usr/bin/$PROCESS_NAME \ + $SERVICE_PATH=/lib/systemd/system/$SERVICE_NAME.service \ + $CONFIG_PATH=/etc/otel-collector/config.yaml diff --git a/internal/otel_collector/internal/buildscripts/packaging/fpm/test.sh b/internal/otel_collector/internal/buildscripts/packaging/fpm/test.sh new file mode 100644 index 00000000000..9dcc9622655 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/fpm/test.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +SCRIPT_DIR="$( cd "$( dirname ${BASH_SOURCE[0]} )" && pwd )" +REPO_DIR="$( cd "$SCRIPT_DIR/../../../../" && pwd )" +PKG_PATH=${1:-} + +. $SCRIPT_DIR/common.sh + +if [[ -z "$PKG_PATH" ]]; then + echo "usage: ${BASH_SOURCE[0]} DEB_OR_RPM_PATH" >&2 + exit 1 +fi + +if [[ ! -f "$PKG_PATH" ]]; then + echo "$PKG_PATH not found!" >&2 + exit 1 +fi + +pkg_name="$( basename "$PKG_PATH" )" +pkg_type="${pkg_name##*.}" +if [[ ! "$pkg_type" =~ ^(deb|rpm)$ ]]; then + echo "$PKG_PATH not supported!" >&2 + exit 1 +fi +image_name="otelcol-$pkg_type-test" +container_name="$image_name" +docker_run="docker run --name $container_name -d -v /sys/fs/cgroup:/sys/fs/cgroup:ro --privileged $image_name" +docker_exec="docker exec $container_name" + +trap "docker rm -fv $container_name >/dev/null 2>&1 || true" EXIT + +docker build -t $image_name -f "$SCRIPT_DIR/$pkg_type/Dockerfile.test" "$SCRIPT_DIR" +docker rm -fv $container_name >/dev/null 2>&1 || true + +# test install +echo +$docker_run +install_pkg "$PKG_PATH" + +# ensure service has started and still running after 5 seconds +sleep 5 +echo "Checking $SERVICE_NAME service status ..." +$docker_exec systemctl --no-pager status $SERVICE_NAME + +echo "Checking $PROCESS_NAME process ..." +$docker_exec pgrep -a -u otel $PROCESS_NAME + +# test uninstall +echo +uninstall_pkg $pkg_type + +echo "Checking $SERVICE_NAME service status after uninstall ..." +if $docker_exec systemctl --no-pager status $SERVICE_NAME; then + echo "$SERVICE_NAME service still running after uninstall!" >&2 + exit 1 +fi +echo "$SERVICE_NAME service successfully stopped after uninstall" + +echo "Checking $SERVICE_NAME service existence after uninstall ..." +if $docker_exec systemctl list-unit-files --all | grep $SERVICE_NAME; then + echo "$SERVICE_NAME service still exists after uninstall!" >&2 + exit 1 +fi +echo "$SERVICE_NAME service successfully removed after uninstall" + +echo "Checking $PROCESS_NAME process after uninstall ..." +if $docker_exec pgrep $PROCESS_NAME; then + echo "$PROCESS_NAME process still running after uninstall!" >&2 + exit 1 +fi +echo "$PROCESS_NAME process successfully killed after uninstall" diff --git a/internal/otel_collector/internal/buildscripts/packaging/msi/make.ps1 b/internal/otel_collector/internal/buildscripts/packaging/msi/make.ps1 new file mode 100644 index 00000000000..6eff43433f9 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/msi/make.ps1 @@ -0,0 +1,75 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +<# +.SYNOPSIS + Makefile like build commands for the Collector on Windows. + + Usage: .\make.ps1 [- ...] + Example: .\make.ps1 New-MSI -Config "./my-config.yaml" -Version "v0.0.2" +.PARAMETER Target + Build target to run (Install-Tools, New-MSI) +#> +Param( + [Parameter(Mandatory=$true, ValueFromRemainingArguments=$true)][string]$Target +) + +$ErrorActionPreference = "Stop" + +function Install-Tools { + # disable progress bar support as this causes CircleCI to crash + $OriginalPref = $ProgressPreference + $ProgressPreference = "SilentlyContinue" + Install-WindowsFeature Net-Framework-Core + $ProgressPreference = $OriginalPref + + choco install wixtoolset -y + setx /m PATH "%PATH%;C:\Program Files (x86)\WiX Toolset v3.11\bin" + refreshenv +} + +function New-MSI( + [string]$Version="0.0.1", + [string]$Config="./examples/local/otel-config.yaml" +) { + candle -arch x64 -dVersion="$Version" -dConfig="$Config" internal/buildscripts/packaging/msi/opentelemetry-collector.wxs + light opentelemetry-collector.wixobj + mkdir dist -ErrorAction Ignore + Move-Item -Force opentelemetry-collector.msi dist/otel-collector-$Version-amd64.msi +} + +function Confirm-MSI { + # ensure system32 is in Path so we can use executables like msiexec & sc + $env:Path += ";C:\Windows\System32" + $msipath = Resolve-Path "$pwd\dist\otel-collector-*-amd64.msi" + + # install msi, validate service is installed & running + Start-Process -Wait msiexec "/i `"$msipath`" /qn" + sc.exe query state=all | findstr "otelcol" | Out-Null + if ($LASTEXITCODE -ne 0) { Throw "otelcol service failed to install" } + + # stop service + Stop-Service otelcol + + # start service + Start-Service otelcol + + # uninstall msi, validate service is uninstalled + Start-Process -Wait msiexec "/x `"$msipath`" /qn" + sc.exe query state=all | findstr "otelcol" | Out-Null + if ($LASTEXITCODE -ne 1) { Throw "otelcol service failed to uninstall" } +} + +$sb = [scriptblock]::create("$Target") +Invoke-Command -ScriptBlock $sb diff --git a/internal/otel_collector/internal/buildscripts/packaging/msi/opentelemetry-collector.wxs b/internal/otel_collector/internal/buildscripts/packaging/msi/opentelemetry-collector.wxs new file mode 100644 index 00000000000..c957b5b3fc2 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/packaging/msi/opentelemetry-collector.wxs @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CONFIG AND NOT Installed + + + diff --git a/internal/otel_collector/internal/buildscripts/packaging/msi/opentelemetry.ico b/internal/otel_collector/internal/buildscripts/packaging/msi/opentelemetry.ico new file mode 100644 index 00000000000..9bdd4cf54e4 Binary files /dev/null and b/internal/otel_collector/internal/buildscripts/packaging/msi/opentelemetry.ico differ diff --git a/internal/otel_collector/internal/collector/telemetry/telemetry.go b/internal/otel_collector/internal/collector/telemetry/telemetry.go new file mode 100644 index 00000000000..d56dee568fd --- /dev/null +++ b/internal/otel_collector/internal/collector/telemetry/telemetry.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package telemetry controls the telemetry settings to be used in the collector. +package telemetry + +import ( + "flag" + + "go.opentelemetry.io/collector/internal/version" +) + +const ( + metricsAddrCfg = "metrics-addr" + metricsPrefixCfg = "metrics-prefix" +) + +var ( + // Command-line flags that control publication of telemetry data. + metricsAddrPtr *string + metricsPrefixPtr *string + + addInstanceIDPtr *bool +) + +func Flags(flags *flag.FlagSet) { + // At least until we can use a generic, i.e.: OpenCensus, metrics exporter + // we default to Prometheus at port 8888, if not otherwise specified. + metricsAddrPtr = flags.String( + metricsAddrCfg, + GetMetricsAddrDefault(), + "[address]:port for exposing collector telemetry.") + + metricsPrefixPtr = flags.String( + metricsPrefixCfg, + "otelcol", + "Prefix to the metrics generated by the collector.") + + addInstanceIDPtr = flags.Bool( + "add-instance-id", + true, + "Flag to control the addition of 'service.instance.id' to the collector metrics.") +} + +// GetMetricsAddrDefault returns the default metrics bind address and port depending on +// the current build type. +func GetMetricsAddrDefault() string { + if version.IsDevBuild() { + // Listen on localhost by default for dev builds to avoid security prompts. + return "localhost:8888" + } + return ":8888" +} + +func GetAddInstanceID() bool { + return *addInstanceIDPtr +} + +func GetMetricsAddr() string { + return *metricsAddrPtr +} + +func GetMetricsPrefix() string { + return *metricsPrefixPtr +} diff --git a/internal/otel_collector/internal/data/.gitignore b/internal/otel_collector/internal/data/.gitignore new file mode 100644 index 00000000000..980a4a35c71 --- /dev/null +++ b/internal/otel_collector/internal/data/.gitignore @@ -0,0 +1 @@ +.patched-otlp-proto \ No newline at end of file diff --git a/internal/otel_collector/internal/data/bytesid.go b/internal/otel_collector/internal/data/bytesid.go new file mode 100644 index 00000000000..5b02eaa6a45 --- /dev/null +++ b/internal/otel_collector/internal/data/bytesid.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "encoding/hex" + "errors" + "fmt" +) + +// marshalJSON converts trace id into a hex string enclosed in quotes. +// Called by Protobuf JSON deserialization. +func marshalJSON(id []byte) ([]byte, error) { + if len(id) == 0 { + return []byte(`""`), nil + } + + // 2 chars per byte plus 2 quote chars at the start and end. + hexLen := 2*len(id) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +// Called by Protobuf JSON deserialization. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} + +func marshalBytes(dst []byte, src []byte) (n int, err error) { + if len(dst) < len(src) { + return 0, errors.New("buffer is too short") + } + return copy(dst, src), nil +} diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go new file mode 100644 index 00000000000..722bec013b6 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go @@ -0,0 +1,558 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + + v1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportLogsServiceRequest struct { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` +} + +func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} } +func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportLogsServiceRequest) ProtoMessage() {} +func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8e3bf87aaa43acd4, []int{0} +} +func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src) +} +func (m *ExportLogsServiceRequest) XXX_Size() int { + return m.Size() +} +func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo + +func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs { + if m != nil { + return m.ResourceLogs + } + return nil +} + +type ExportLogsServiceResponse struct { +} + +func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} } +func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportLogsServiceResponse) ProtoMessage() {} +func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8e3bf87aaa43acd4, []int{1} +} +func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src) +} +func (m *ExportLogsServiceResponse) XXX_Size() int { + return m.Size() +} +func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest") + proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4) +} + +var fileDescriptor_8e3bf87aaa43acd4 = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xc8, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0xcf, 0xc9, 0x4f, 0x2f, 0xd6, 0x2f, + 0x33, 0x04, 0xd3, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0x45, 0x42, 0xaa, + 0x28, 0x3a, 0x21, 0x82, 0x7a, 0x70, 0x9d, 0x7a, 0x20, 0x1d, 0x7a, 0x65, 0x86, 0x52, 0x22, 0xe9, + 0xf9, 0xe9, 0xf9, 0x10, 0x63, 0x41, 0x2c, 0x88, 0x3a, 0x29, 0x35, 0x6c, 0xd6, 0x22, 0x5b, 0x06, + 0x51, 0xa7, 0x94, 0xc5, 0x25, 0xe1, 0x5a, 0x51, 0x90, 0x5f, 0x54, 0xe2, 0x93, 0x9f, 0x5e, 0x1c, + 0x0c, 0xb1, 0x3f, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0xc8, 0x8f, 0x8b, 0xb7, 0x28, 0xb5, + 0x38, 0xbf, 0xb4, 0x28, 0x39, 0x35, 0x1e, 0xa4, 0x45, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, + 0x53, 0x0f, 0x9b, 0xc3, 0xa0, 0xce, 0xd1, 0x0b, 0x82, 0xea, 0x00, 0x99, 0x17, 0xc4, 0x53, 0x84, + 0xc4, 0x53, 0x92, 0xe6, 0x92, 0xc4, 0x62, 0x57, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0xd1, 0x5c, + 0x46, 0x2e, 0x6e, 0x24, 0x71, 0xa1, 0x5e, 0x46, 0x2e, 0x36, 0x88, 0x6a, 0x21, 0x7b, 0x3d, 0xa2, + 0x42, 0x42, 0x0f, 0x97, 0x47, 0xa4, 0x1c, 0xc8, 0x37, 0x00, 0xe2, 0x3a, 0x25, 0x06, 0xa7, 0xb5, + 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, + 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0xc0, 0xa5, 0x91, 0x99, 0x4f, 0x9c, + 0x05, 0x4e, 0x02, 0x48, 0x66, 0x07, 0x80, 0xd4, 0x04, 0x30, 0x46, 0x85, 0xa6, 0xa3, 0xeb, 0xce, + 0x44, 0x4e, 0x20, 0x99, 0x79, 0x25, 0xa9, 0x45, 0x79, 0x89, 0x39, 0xfa, 0x29, 0x89, 0x25, 0x89, + 0xfa, 0x28, 0x0a, 0x75, 0xc1, 0xd6, 0xe8, 0xa6, 0xa7, 0xe6, 0x61, 0x26, 0xa8, 0x24, 0x36, 0xb0, + 0xa4, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x73, 0x09, 0x94, 0xaf, 0x80, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LogsServiceClient is the client API for LogsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LogsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) +} + +type logsServiceClient struct { + cc *grpc.ClientConn +} + +func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient { + return &logsServiceClient{cc} +} + +func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) { + out := new(ExportLogsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LogsServiceServer is the server API for LogsService service. +type LogsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) +} + +// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedLogsServiceServer struct { +} + +func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) { + s.RegisterService(&_LogsService_serviceDesc, srv) +} + +func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportLogsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LogsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LogsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService", + HandlerType: (*LogsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _LogsService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto", +} + +func (m *ExportLogsServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportLogsServiceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportLogsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceLogs) > 0 { + for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogsService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExportLogsServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportLogsServiceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportLogsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintLogsService(dAtA []byte, offset int, v uint64) int { + offset -= sovLogsService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExportLogsServiceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceLogs) > 0 { + for _, e := range m.ResourceLogs { + l = e.Size() + n += 1 + l + sovLogsService(uint64(l)) + } + } + return n +} + +func (m *ExportLogsServiceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovLogsService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLogsService(x uint64) (n int) { + return sovLogsService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExportLogsServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogsService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportLogsServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportLogsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogsService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogsService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogsService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceLogs = append(m.ResourceLogs, &v1.ResourceLogs{}) + if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogsService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogsService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogsService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportLogsServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogsService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportLogsServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportLogsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogsService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogsService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogsService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogsService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogsService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogsService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogsService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLogsService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLogsService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLogsService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLogsService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogsService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLogsService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go new file mode 100644 index 00000000000..8003733add0 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client LogsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportLogsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server LogsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportLogsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterLogsServiceHandlerServer registers the http handlers for service LogsService to "mux". +// UnaryRPC :call LogsServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterLogsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LogsServiceServer) error { + + mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_LogsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterLogsServiceHandlerFromEndpoint is same as RegisterLogsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterLogsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterLogsServiceHandler(ctx, mux, conn) +} + +// RegisterLogsServiceHandler registers the http handlers for service LogsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterLogsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterLogsServiceHandlerClient(ctx, mux, NewLogsServiceClient(conn)) +} + +// RegisterLogsServiceHandlerClient registers the http handlers for service LogsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LogsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LogsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LogsServiceClient" to call the correct interceptors. +func RegisterLogsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LogsServiceClient) error { + + mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_LogsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_LogsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "logs"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_LogsService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go new file mode 100644 index 00000000000..b0a984f1139 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go @@ -0,0 +1,558 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + + v1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportMetricsServiceRequest struct { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` +} + +func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } +func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceRequest) ProtoMessage() {} +func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_75fb6015e6e64798, []int{0} +} +func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) +} +func (m *ExportMetricsServiceRequest) XXX_Size() int { + return m.Size() +} +func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo + +func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics { + if m != nil { + return m.ResourceMetrics + } + return nil +} + +type ExportMetricsServiceResponse struct { +} + +func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } +func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceResponse) ProtoMessage() {} +func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_75fb6015e6e64798, []int{1} +} +func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) +} +func (m *ExportMetricsServiceResponse) XXX_Size() int { + return m.Size() +} +func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest") + proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798) +} + +var fileDescriptor_75fb6015e6e64798 = []byte{ + // 304 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xcb, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x07, 0x89, 0x66, 0x26, 0x17, 0xeb, + 0x97, 0x19, 0xc2, 0x98, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0xa5, 0x42, + 0x1a, 0x28, 0xfa, 0x21, 0x82, 0x7a, 0x70, 0xfd, 0x7a, 0x50, 0x4d, 0x7a, 0x65, 0x86, 0x52, 0x22, + 0xe9, 0xf9, 0xe9, 0xf9, 0x10, 0xf3, 0x41, 0x2c, 0x88, 0x52, 0x29, 0x1d, 0x6c, 0xf6, 0x63, 0xda, + 0x0a, 0x51, 0xad, 0x54, 0xc9, 0x25, 0xed, 0x5a, 0x51, 0x90, 0x5f, 0x54, 0xe2, 0x0b, 0x11, 0x0e, + 0x86, 0xb8, 0x25, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x28, 0x8a, 0x4b, 0xa0, 0x28, 0xb5, + 0x38, 0xbf, 0xb4, 0x28, 0x39, 0x35, 0x1e, 0xaa, 0x51, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, + 0x5f, 0x0f, 0x9b, 0x3b, 0x11, 0xae, 0xd3, 0x0b, 0x82, 0xea, 0x83, 0x1a, 0x1c, 0xc4, 0x5f, 0x84, + 0x2a, 0xa0, 0x24, 0xc7, 0x25, 0x83, 0xdd, 0xea, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa3, 0x35, + 0x8c, 0x5c, 0x7c, 0xa8, 0x52, 0x42, 0x33, 0x19, 0xb9, 0xd8, 0x20, 0x7a, 0x84, 0x5c, 0xf5, 0x88, + 0x0d, 0x27, 0x3d, 0x3c, 0x1e, 0x94, 0x72, 0xa3, 0xd4, 0x18, 0x88, 0x63, 0x95, 0x18, 0x9c, 0xb6, + 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, + 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x03, 0x97, 0x76, 0x66, 0x3e, 0xd1, + 0xd6, 0x38, 0x09, 0xa3, 0xda, 0x10, 0x00, 0x52, 0x19, 0xc0, 0x18, 0x15, 0x91, 0x8e, 0x6e, 0x46, + 0x26, 0x72, 0xb2, 0xca, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x4f, 0x49, 0x2c, 0x49, + 0xd4, 0x47, 0x51, 0xa8, 0x0b, 0xb6, 0x4c, 0x37, 0x3d, 0x35, 0x0f, 0x6b, 0x32, 0x4c, 0x62, 0x03, + 0xcb, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x08, 0x8a, 0xe8, 0x11, 0xb9, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) { + out := new(ExportMetricsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) +} + +// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportMetricsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _MetricsService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto", +} + +func (m *ExportMetricsServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportMetricsServiceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportMetricsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceMetrics) > 0 { + for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetricsService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExportMetricsServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportMetricsServiceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportMetricsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMetricsService(dAtA []byte, offset int, v uint64) int { + offset -= sovMetricsService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExportMetricsServiceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceMetrics) > 0 { + for _, e := range m.ResourceMetrics { + l = e.Size() + n += 1 + l + sovMetricsService(uint64(l)) + } + } + return n +} + +func (m *ExportMetricsServiceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMetricsService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetricsService(x uint64) (n int) { + return sovMetricsService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExportMetricsServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricsService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricsService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetricsService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetricsService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceMetrics = append(m.ResourceMetrics, &v1.ResourceMetrics{}) + if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetricsService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetricsService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetricsService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportMetricsServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricsService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportMetricsServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportMetricsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMetricsService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetricsService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetricsService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetricsService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricsService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricsService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricsService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetricsService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetricsService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetricsService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetricsService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetricsService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetricsService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go new file mode 100644 index 00000000000..8158c98a624 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportMetricsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportMetricsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux". +// UnaryRPC :call MetricsServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_MetricsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMetricsServiceHandler(ctx, mux, conn) +} + +// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn)) +} + +// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MetricsServiceClient" to call the correct interceptors. +func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_MetricsService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go new file mode 100644 index 00000000000..deebad2314c --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go @@ -0,0 +1,1262 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/trace/v1/trace_config.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// How spans should be sampled: +// - Always off +// - Always on +// - Always follow the parent Span's decision (off if no parent). +type ConstantSampler_ConstantDecision int32 + +const ( + ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 + ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 + ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 +) + +var ConstantSampler_ConstantDecision_name = map[int32]string{ + 0: "ALWAYS_OFF", + 1: "ALWAYS_ON", + 2: "ALWAYS_PARENT", +} + +var ConstantSampler_ConstantDecision_value = map[string]int32{ + "ALWAYS_OFF": 0, + "ALWAYS_ON": 1, + "ALWAYS_PARENT": 2, +} + +func (x ConstantSampler_ConstantDecision) String() string { + return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) +} + +func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{1, 0} +} + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +type TraceConfig struct { + // The global default sampler used to make decisions on span sampling. + // + // Types that are valid to be assigned to Sampler: + // *TraceConfig_ConstantSampler + // *TraceConfig_TraceIdRatioBased + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` + // The global default max number of attributes per timed event. + MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + // The global default max number of attributes per span. + MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` +} + +func (m *TraceConfig) Reset() { *m = TraceConfig{} } +func (m *TraceConfig) String() string { return proto.CompactTextString(m) } +func (*TraceConfig) ProtoMessage() {} +func (*TraceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{0} +} +func (m *TraceConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TraceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceConfig.Merge(m, src) +} +func (m *TraceConfig) XXX_Size() int { + return m.Size() +} +func (m *TraceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TraceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceConfig proto.InternalMessageInfo + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() + MarshalTo([]byte) (int, error) + Size() int +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof" json:"constant_sampler,omitempty"` +} +type TraceConfig_TraceIdRatioBased struct { + TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof" json:"trace_id_ratio_based,omitempty"` +} +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof" json:"rate_limiting_sampler,omitempty"` +} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} +func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (m *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (m *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { + if x, ok := m.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { + return x.TraceIdRatioBased + } + return nil +} + +func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { + if m != nil { + return m.MaxNumberOfAttributes + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfTimedEvents() int64 { + if m != nil { + return m.MaxNumberOfTimedEvents + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { + if m != nil { + return m.MaxNumberOfAttributesPerTimedEvent + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfLinks() int64 { + if m != nil { + return m.MaxNumberOfLinks + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { + if m != nil { + return m.MaxNumberOfAttributesPerLink + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_TraceIdRatioBased)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } +} + +// Sampler that always makes a constant decision on span sampling. +type ConstantSampler struct { + Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` +} + +func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } +func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } +func (*ConstantSampler) ProtoMessage() {} +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{1} +} +func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConstantSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConstantSampler.Merge(m, src) +} +func (m *ConstantSampler) XXX_Size() int { + return m.Size() +} +func (m *ConstantSampler) XXX_DiscardUnknown() { + xxx_messageInfo_ConstantSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo + +func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { + if m != nil { + return m.Decision + } + return ConstantSampler_ALWAYS_OFF +} + +// Sampler that tries to uniformly sample traces with a given ratio. +// The ratio of sampling a trace is equal to that of the specified ratio. +type TraceIdRatioBased struct { + // The desired ratio of sampling. Must be within [0.0, 1.0]. + SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` +} + +func (m *TraceIdRatioBased) Reset() { *m = TraceIdRatioBased{} } +func (m *TraceIdRatioBased) String() string { return proto.CompactTextString(m) } +func (*TraceIdRatioBased) ProtoMessage() {} +func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{2} +} +func (m *TraceIdRatioBased) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TraceIdRatioBased) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TraceIdRatioBased.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TraceIdRatioBased) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceIdRatioBased.Merge(m, src) +} +func (m *TraceIdRatioBased) XXX_Size() int { + return m.Size() +} +func (m *TraceIdRatioBased) XXX_DiscardUnknown() { + xxx_messageInfo_TraceIdRatioBased.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceIdRatioBased proto.InternalMessageInfo + +func (m *TraceIdRatioBased) GetSamplingRatio() float64 { + if m != nil { + return m.SamplingRatio + } + return 0 +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` +} + +func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } +func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } +func (*RateLimitingSampler) ProtoMessage() {} +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{3} +} +func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitingSampler.Merge(m, src) +} +func (m *RateLimitingSampler) XXX_Size() int { + return m.Size() +} +func (m *RateLimitingSampler) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo + +func (m *RateLimitingSampler) GetQps() int64 { + if m != nil { + return m.Qps + } + return 0 +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) + proto.RegisterType((*TraceConfig)(nil), "opentelemetry.proto.trace.v1.TraceConfig") + proto.RegisterType((*ConstantSampler)(nil), "opentelemetry.proto.trace.v1.ConstantSampler") + proto.RegisterType((*TraceIdRatioBased)(nil), "opentelemetry.proto.trace.v1.TraceIdRatioBased") + proto.RegisterType((*RateLimitingSampler)(nil), "opentelemetry.proto.trace.v1.RateLimitingSampler") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/trace/v1/trace_config.proto", fileDescriptor_5936aa8fa6443e6f) +} + +var fileDescriptor_5936aa8fa6443e6f = []byte{ + // 565 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xcd, 0x6e, 0xd3, 0x40, + 0x14, 0x85, 0xed, 0x86, 0xfe, 0xdd, 0x2a, 0xad, 0x33, 0x69, 0x91, 0x55, 0x55, 0xa6, 0x58, 0x48, + 0x64, 0x93, 0x58, 0x29, 0x0b, 0x04, 0x0b, 0xa4, 0xa4, 0x3f, 0x14, 0x29, 0x4a, 0x23, 0x37, 0x02, + 0x91, 0x8d, 0x35, 0xb1, 0x27, 0xd6, 0x08, 0x7b, 0x26, 0x8c, 0xa7, 0x51, 0xd9, 0xf3, 0x00, 0xbc, + 0x04, 0x4f, 0xc0, 0x4b, 0xb0, 0xec, 0x92, 0x25, 0x4a, 0x5e, 0x04, 0x79, 0x9c, 0xa6, 0x71, 0xda, + 0x46, 0x62, 0x37, 0x73, 0xce, 0x9c, 0xef, 0xde, 0x49, 0xae, 0x07, 0x1c, 0x3e, 0x24, 0x4c, 0x92, + 0x88, 0xc4, 0x44, 0x8a, 0x6f, 0xce, 0x50, 0x70, 0xc9, 0x1d, 0x29, 0xb0, 0x4f, 0x9c, 0x51, 0x3d, + 0x5b, 0x78, 0x3e, 0x67, 0x03, 0x1a, 0xd6, 0x94, 0x87, 0x0e, 0x72, 0x81, 0x4c, 0xac, 0xa9, 0x73, + 0xb5, 0x51, 0x7d, 0x7f, 0x37, 0xe4, 0x21, 0xcf, 0x20, 0xe9, 0x2a, 0xb3, 0xed, 0xef, 0xab, 0xb0, + 0xd5, 0x4d, 0x8f, 0x1c, 0x2b, 0x12, 0xea, 0x81, 0xe1, 0x73, 0x96, 0x48, 0xcc, 0xa4, 0x97, 0xe0, + 0x78, 0x18, 0x11, 0x61, 0xea, 0x87, 0x7a, 0x65, 0xeb, 0xa8, 0x5a, 0x5b, 0x86, 0xaf, 0x1d, 0x4f, + 0x53, 0x97, 0x59, 0xe8, 0x5c, 0x73, 0x77, 0xfc, 0xbc, 0x84, 0xfa, 0xb0, 0x9b, 0x75, 0x4d, 0x03, + 0x4f, 0x60, 0x49, 0xb9, 0xd7, 0xc7, 0x09, 0x09, 0xcc, 0x15, 0xc5, 0x77, 0x96, 0xf3, 0x55, 0x93, + 0x1f, 0x02, 0x37, 0xcd, 0x35, 0xd3, 0xd8, 0xb9, 0xe6, 0x96, 0xe4, 0xa2, 0x88, 0x42, 0xd8, 0x13, + 0x58, 0x12, 0x2f, 0xa2, 0x31, 0x95, 0x94, 0x85, 0xb3, 0x4b, 0x14, 0x54, 0x91, 0xfa, 0xf2, 0x22, + 0x2e, 0x96, 0xa4, 0x35, 0x4d, 0xde, 0x5d, 0xa4, 0x2c, 0xee, 0xcb, 0xe8, 0x35, 0x98, 0x31, 0xbe, + 0xf6, 0xd8, 0x55, 0xdc, 0x27, 0xc2, 0xe3, 0x03, 0x0f, 0x4b, 0x29, 0x68, 0xff, 0x4a, 0x92, 0xc4, + 0x7c, 0x72, 0xa8, 0x57, 0x0a, 0xee, 0x5e, 0x8c, 0xaf, 0xdb, 0xca, 0xbe, 0x18, 0x34, 0x66, 0x26, + 0x7a, 0x0b, 0xfb, 0xf9, 0xa0, 0xa4, 0x31, 0x09, 0x3c, 0x32, 0x22, 0x4c, 0x26, 0xe6, 0xaa, 0x8a, + 0x3e, 0x9d, 0x8b, 0x76, 0x53, 0xfb, 0x54, 0xb9, 0xa8, 0x0b, 0x95, 0xc7, 0x8a, 0x7a, 0x43, 0x22, + 0xe6, 0x51, 0xe6, 0x9a, 0x22, 0xd9, 0x0f, 0x36, 0xd1, 0x21, 0xe2, 0x0e, 0x8b, 0xaa, 0x50, 0xce, + 0x53, 0x23, 0xca, 0xbe, 0x24, 0xe6, 0xba, 0x02, 0x18, 0x73, 0x80, 0x56, 0xaa, 0xa3, 0xf7, 0xf0, + 0x7c, 0x69, 0x13, 0x69, 0xda, 0xdc, 0x50, 0xe1, 0x83, 0xc7, 0xaa, 0xa7, 0xa4, 0xe6, 0x26, 0xac, + 0x4f, 0xff, 0x1d, 0xfb, 0x97, 0x0e, 0x3b, 0x0b, 0x13, 0x84, 0x7a, 0xb0, 0x11, 0x10, 0x9f, 0x26, + 0x94, 0x33, 0x35, 0x82, 0xdb, 0x47, 0xef, 0xfe, 0x6b, 0x04, 0x67, 0xfb, 0x93, 0x29, 0xc5, 0x9d, + 0xf1, 0xec, 0x13, 0x30, 0x16, 0x5d, 0xb4, 0x0d, 0xd0, 0x68, 0x7d, 0x6a, 0x7c, 0xbe, 0xf4, 0x2e, + 0xce, 0xce, 0x0c, 0x0d, 0x15, 0x61, 0xf3, 0x76, 0xdf, 0x36, 0x74, 0x54, 0x82, 0xe2, 0x74, 0xdb, + 0x69, 0xb8, 0xa7, 0xed, 0xae, 0xb1, 0x62, 0xbf, 0x81, 0xd2, 0xbd, 0xb1, 0x44, 0x2f, 0xa0, 0xa8, + 0x6e, 0x45, 0x59, 0xa8, 0x54, 0xd5, 0xbb, 0xee, 0xe6, 0x45, 0xfb, 0x25, 0x94, 0x1f, 0x18, 0x36, + 0x64, 0x40, 0xe1, 0xeb, 0x30, 0x51, 0x91, 0x82, 0x9b, 0x2e, 0x9b, 0x3f, 0xf5, 0xdf, 0x63, 0x4b, + 0xbf, 0x19, 0x5b, 0xfa, 0xdf, 0xb1, 0xa5, 0xff, 0x98, 0x58, 0xda, 0xcd, 0xc4, 0xd2, 0xfe, 0x4c, + 0x2c, 0x0d, 0x9e, 0x51, 0xbe, 0xf4, 0x07, 0x69, 0x1a, 0x73, 0x5f, 0x76, 0x27, 0xb5, 0x3a, 0x7a, + 0xef, 0x63, 0xb8, 0x18, 0xa2, 0xdc, 0xf1, 0x79, 0x14, 0x11, 0x5f, 0x72, 0xe1, 0x50, 0x26, 0x89, + 0x60, 0x38, 0x72, 0x02, 0x2c, 0x71, 0xfe, 0x05, 0xaa, 0x2a, 0x7a, 0x35, 0x24, 0x6c, 0xee, 0xfc, + 0xed, 0x7b, 0xd4, 0x5f, 0x53, 0xee, 0xab, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4f, 0xc7, 0xe9, + 0xec, 0xb6, 0x04, 0x00, 0x00, +} + +func (m *TraceConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxNumberOfAttributesPerLink != 0 { + i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfAttributesPerLink)) + i-- + dAtA[i] = 0x40 + } + if m.MaxNumberOfLinks != 0 { + i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfLinks)) + i-- + dAtA[i] = 0x38 + } + if m.MaxNumberOfAttributesPerTimedEvent != 0 { + i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfAttributesPerTimedEvent)) + i-- + dAtA[i] = 0x30 + } + if m.MaxNumberOfTimedEvents != 0 { + i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfTimedEvents)) + i-- + dAtA[i] = 0x28 + } + if m.MaxNumberOfAttributes != 0 { + i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfAttributes)) + i-- + dAtA[i] = 0x20 + } + if m.Sampler != nil { + { + size := m.Sampler.Size() + i -= size + if _, err := m.Sampler.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *TraceConfig_ConstantSampler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceConfig_ConstantSampler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConstantSampler != nil { + { + size, err := m.ConstantSampler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTraceConfig(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *TraceConfig_TraceIdRatioBased) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceConfig_TraceIdRatioBased) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TraceIdRatioBased != nil { + { + size, err := m.TraceIdRatioBased.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTraceConfig(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *TraceConfig_RateLimitingSampler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceConfig_RateLimitingSampler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RateLimitingSampler != nil { + { + size, err := m.RateLimitingSampler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTraceConfig(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ConstantSampler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConstantSampler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConstantSampler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Decision != 0 { + i = encodeVarintTraceConfig(dAtA, i, uint64(m.Decision)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TraceIdRatioBased) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceIdRatioBased) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceIdRatioBased) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SamplingRatio != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SamplingRatio)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *RateLimitingSampler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitingSampler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RateLimitingSampler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Qps != 0 { + i = encodeVarintTraceConfig(dAtA, i, uint64(m.Qps)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTraceConfig(dAtA []byte, offset int, v uint64) int { + offset -= sovTraceConfig(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TraceConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sampler != nil { + n += m.Sampler.Size() + } + if m.MaxNumberOfAttributes != 0 { + n += 1 + sovTraceConfig(uint64(m.MaxNumberOfAttributes)) + } + if m.MaxNumberOfTimedEvents != 0 { + n += 1 + sovTraceConfig(uint64(m.MaxNumberOfTimedEvents)) + } + if m.MaxNumberOfAttributesPerTimedEvent != 0 { + n += 1 + sovTraceConfig(uint64(m.MaxNumberOfAttributesPerTimedEvent)) + } + if m.MaxNumberOfLinks != 0 { + n += 1 + sovTraceConfig(uint64(m.MaxNumberOfLinks)) + } + if m.MaxNumberOfAttributesPerLink != 0 { + n += 1 + sovTraceConfig(uint64(m.MaxNumberOfAttributesPerLink)) + } + return n +} + +func (m *TraceConfig_ConstantSampler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConstantSampler != nil { + l = m.ConstantSampler.Size() + n += 1 + l + sovTraceConfig(uint64(l)) + } + return n +} +func (m *TraceConfig_TraceIdRatioBased) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceIdRatioBased != nil { + l = m.TraceIdRatioBased.Size() + n += 1 + l + sovTraceConfig(uint64(l)) + } + return n +} +func (m *TraceConfig_RateLimitingSampler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RateLimitingSampler != nil { + l = m.RateLimitingSampler.Size() + n += 1 + l + sovTraceConfig(uint64(l)) + } + return n +} +func (m *ConstantSampler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Decision != 0 { + n += 1 + sovTraceConfig(uint64(m.Decision)) + } + return n +} + +func (m *TraceIdRatioBased) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SamplingRatio != 0 { + n += 9 + } + return n +} + +func (m *RateLimitingSampler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Qps != 0 { + n += 1 + sovTraceConfig(uint64(m.Qps)) + } + return n +} + +func sovTraceConfig(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTraceConfig(x uint64) (n int) { + return sovTraceConfig(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TraceConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraceConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraceConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConstantSampler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTraceConfig + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTraceConfig + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ConstantSampler{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sampler = &TraceConfig_ConstantSampler{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceIdRatioBased", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTraceConfig + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTraceConfig + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TraceIdRatioBased{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sampler = &TraceConfig_TraceIdRatioBased{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimitingSampler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTraceConfig + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTraceConfig + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RateLimitingSampler{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sampler = &TraceConfig_RateLimitingSampler{v} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfAttributes", wireType) + } + m.MaxNumberOfAttributes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxNumberOfAttributes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfTimedEvents", wireType) + } + m.MaxNumberOfTimedEvents = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxNumberOfTimedEvents |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfAttributesPerTimedEvent", wireType) + } + m.MaxNumberOfAttributesPerTimedEvent = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxNumberOfAttributesPerTimedEvent |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfLinks", wireType) + } + m.MaxNumberOfLinks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxNumberOfLinks |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfAttributesPerLink", wireType) + } + m.MaxNumberOfAttributesPerLink = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxNumberOfAttributesPerLink |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTraceConfig(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTraceConfig + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTraceConfig + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConstantSampler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConstantSampler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConstantSampler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Decision", wireType) + } + m.Decision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Decision |= ConstantSampler_ConstantDecision(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTraceConfig(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTraceConfig + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTraceConfig + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TraceIdRatioBased) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraceIdRatioBased: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraceIdRatioBased: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SamplingRatio", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SamplingRatio = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipTraceConfig(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTraceConfig + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTraceConfig + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RateLimitingSampler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimitingSampler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimitingSampler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Qps", wireType) + } + m.Qps = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Qps |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTraceConfig(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTraceConfig + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTraceConfig + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTraceConfig(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTraceConfig + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTraceConfig + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTraceConfig + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTraceConfig + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTraceConfig = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTraceConfig = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTraceConfig = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go new file mode 100644 index 00000000000..a96e40ea8a3 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go @@ -0,0 +1,559 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + + v1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportTraceServiceRequest struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } +func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceRequest) ProtoMessage() {} +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_192a962890318cf4, []int{0} +} +func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) +} +func (m *ExportTraceServiceRequest) XXX_Size() int { + return m.Size() +} +func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo + +func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { + if m != nil { + return m.ResourceSpans + } + return nil +} + +type ExportTraceServiceResponse struct { +} + +func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } +func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceResponse) ProtoMessage() {} +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_192a962890318cf4, []int{1} +} +func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) +} +func (m *ExportTraceServiceResponse) XXX_Size() int { + return m.Size() +} +func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest") + proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4) +} + +var fileDescriptor_192a962890318cf4 = []byte{ + // 306 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xca, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x2f, 0x29, 0x4a, 0x4c, 0x4e, 0xd5, + 0x2f, 0x33, 0x84, 0x30, 0xe2, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0xf5, 0xc0, 0xca, 0x84, + 0xd4, 0x50, 0xf4, 0x42, 0x04, 0xf5, 0xe0, 0x7a, 0xf5, 0xc0, 0x5a, 0xf4, 0xca, 0x0c, 0xa5, 0x44, + 0xd2, 0xf3, 0xd3, 0xf3, 0x21, 0x26, 0x83, 0x58, 0x10, 0x85, 0x52, 0x1a, 0xd8, 0x6c, 0x46, 0xb5, + 0x0f, 0xa2, 0x52, 0x29, 0x9f, 0x4b, 0xd2, 0xb5, 0xa2, 0x20, 0xbf, 0xa8, 0x24, 0x04, 0x24, 0x18, + 0x0c, 0x71, 0x43, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50, 0x10, 0x17, 0x5f, 0x51, 0x6a, + 0x71, 0x7e, 0x69, 0x11, 0xc8, 0x79, 0x05, 0x89, 0x79, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, + 0x46, 0xda, 0x7a, 0xd8, 0x5c, 0x07, 0x73, 0x93, 0x5e, 0x10, 0x54, 0x4f, 0x30, 0x48, 0x4b, 0x10, + 0x6f, 0x11, 0x32, 0x57, 0x49, 0x86, 0x4b, 0x0a, 0x9b, 0x85, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, + 0x46, 0x8b, 0x18, 0xb9, 0x78, 0x90, 0x25, 0x84, 0x26, 0x32, 0x72, 0xb1, 0x41, 0xd4, 0x0b, 0x39, + 0xea, 0x11, 0x17, 0x26, 0x7a, 0x38, 0x3d, 0x24, 0xe5, 0x44, 0x89, 0x11, 0x10, 0x27, 0x2a, 0x31, + 0x38, 0x6d, 0x60, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, + 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x06, 0x2e, 0xcd, 0xcc, + 0x7c, 0x22, 0xad, 0x70, 0x12, 0x44, 0x36, 0x3d, 0x00, 0xa4, 0x2a, 0x80, 0x31, 0x2a, 0x2c, 0x1d, + 0x5d, 0x7f, 0x26, 0x72, 0x92, 0xc9, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x4f, 0x49, + 0x2c, 0x49, 0xd4, 0x47, 0x51, 0xa8, 0x0b, 0xb6, 0x48, 0x37, 0x3d, 0x35, 0x0f, 0x4b, 0x12, 0x4b, + 0x62, 0x03, 0xcb, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x4c, 0x62, 0xbb, 0x93, 0x02, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { + out := new(ExportTraceServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportTraceServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _TraceService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", +} + +func (m *ExportTraceServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportTraceServiceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportTraceServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceSpans) > 0 { + for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTraceService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExportTraceServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportTraceServiceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportTraceServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTraceService(dAtA []byte, offset int, v uint64) int { + offset -= sovTraceService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExportTraceServiceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceSpans) > 0 { + for _, e := range m.ResourceSpans { + l = e.Size() + n += 1 + l + sovTraceService(uint64(l)) + } + } + return n +} + +func (m *ExportTraceServiceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTraceService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTraceService(x uint64) (n int) { + return sovTraceService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExportTraceServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportTraceServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportTraceServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTraceService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTraceService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceSpans = append(m.ResourceSpans, &v1.ResourceSpans{}) + if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTraceService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTraceService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTraceService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportTraceServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTraceService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportTraceServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportTraceServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTraceService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTraceService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTraceService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTraceService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTraceService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTraceService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTraceService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTraceService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTraceService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTraceService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTraceService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTraceService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTraceService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go new file mode 100644 index 00000000000..1da38f1cd28 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". +// UnaryRPC :call TraceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service_gateway_aliases.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service_gateway_aliases.go new file mode 100644 index 00000000000..21dfb731e9e --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/collector/trace/v1/trace_service_gateway_aliases.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + context "context" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" +) + +// The aliases in this file are necessary to fix the bug: +// https://github.com/open-telemetry/opentelemetry-collector/issues/1968 + +// patternTraceServiceExport0Alias is an alias for the incorrect pattern +// pattern_TraceService_Export_0 defined in trace_service.pb.gw.go. +// +// The path in the pattern_TraceService_Export_0 pattern is incorrect because it is +// composed from the historical name of the package v1.trace used in the Protobuf +// declarations in trace_service.proto file and results in the path of /v1/trace. +// +// This is incorrect since the OTLP spec requires the default path to be /v1/traces, +// see https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/protocol/otlp.md#request. +// +// We set the correct path in this alias. +var patternTraceServiceExport0Alias = runtime.MustPattern( + runtime.NewPattern( + 1, + []int{2, 0, 2, 1}, + []string{"v1", "traces"}, // Patch the path to be /v1/traces. + "", + runtime.AssumeColonVerbOpt(true)), +) + +// RegisterTraceServiceHandlerServerAlias registers the http handlers for service TraceService to "mux". +// UnaryRPC :call TraceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// +// RegisterTraceServiceHandlerServerAlias is the alias version of +// RegisterTraceServiceHandlerServer, and uses patternTraceServiceExport0Alias +// instead of pattern_TraceService_Export_0. +func RegisterTraceServiceHandlerServerAlias(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { + + // pattern_TraceService_Export_0 is replaced by patternTraceServiceExport0Alias + // in the following line. This is the only change in this func compared to + // RegisterTraceServiceHandlerServer. + mux.Handle("POST", patternTraceServiceExport0Alias, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/common/v1/common.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/common/v1/common.pb.go new file mode 100644 index 00000000000..3b362407449 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/common/v1/common.pb.go @@ -0,0 +1,1781 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/common/v1/common.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AnyValue struct { + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "null". + // + // Types that are valid to be assigned to Value: + // *AnyValue_StringValue + // *AnyValue_BoolValue + // *AnyValue_IntValue + // *AnyValue_DoubleValue + // *AnyValue_ArrayValue + // *AnyValue_KvlistValue + Value isAnyValue_Value `protobuf_oneof:"value"` +} + +func (m *AnyValue) Reset() { *m = AnyValue{} } +func (m *AnyValue) String() string { return proto.CompactTextString(m) } +func (*AnyValue) ProtoMessage() {} +func (*AnyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{0} +} +func (m *AnyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AnyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AnyValue.Merge(m, src) +} +func (m *AnyValue) XXX_Size() int { + return m.Size() +} +func (m *AnyValue) XXX_DiscardUnknown() { + xxx_messageInfo_AnyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AnyValue proto.InternalMessageInfo + +type isAnyValue_Value interface { + isAnyValue_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type AnyValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` +} +type AnyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` +} +type AnyValue_IntValue struct { + IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` +} +type AnyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` +} +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"` +} +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"` +} + +func (*AnyValue_StringValue) isAnyValue_Value() {} +func (*AnyValue_BoolValue) isAnyValue_Value() {} +func (*AnyValue_IntValue) isAnyValue_Value() {} +func (*AnyValue_DoubleValue) isAnyValue_Value() {} +func (*AnyValue_ArrayValue) isAnyValue_Value() {} +func (*AnyValue_KvlistValue) isAnyValue_Value() {} + +func (m *AnyValue) GetValue() isAnyValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AnyValue) GetStringValue() string { + if x, ok := m.GetValue().(*AnyValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *AnyValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AnyValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *AnyValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AnyValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AnyValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *AnyValue) GetArrayValue() *ArrayValue { + if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (m *AnyValue) GetKvlistValue() *KeyValueList { + if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok { + return x.KvlistValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AnyValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AnyValue_StringValue)(nil), + (*AnyValue_BoolValue)(nil), + (*AnyValue_IntValue)(nil), + (*AnyValue_DoubleValue)(nil), + (*AnyValue_ArrayValue)(nil), + (*AnyValue_KvlistValue)(nil), + } +} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + // Array of values. The array may be empty (contain 0 elements). + Values []AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"` +} + +func (m *ArrayValue) Reset() { *m = ArrayValue{} } +func (m *ArrayValue) String() string { return proto.CompactTextString(m) } +func (*ArrayValue) ProtoMessage() {} +func (*ArrayValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{1} +} +func (m *ArrayValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ArrayValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArrayValue.Merge(m, src) +} +func (m *ArrayValue) XXX_Size() int { + return m.Size() +} +func (m *ArrayValue) XXX_DiscardUnknown() { + xxx_messageInfo_ArrayValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ArrayValue proto.InternalMessageInfo + +func (m *ArrayValue) GetValues() []AnyValue { + if m != nil { + return m.Values + } + return nil +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +type KeyValueList struct { + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + Values []KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"` +} + +func (m *KeyValueList) Reset() { *m = KeyValueList{} } +func (m *KeyValueList) String() string { return proto.CompactTextString(m) } +func (*KeyValueList) ProtoMessage() {} +func (*KeyValueList) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{2} +} +func (m *KeyValueList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *KeyValueList) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValueList.Merge(m, src) +} +func (m *KeyValueList) XXX_Size() int { + return m.Size() +} +func (m *KeyValueList) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValueList.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValueList proto.InternalMessageInfo + +func (m *KeyValueList) GetValues() []KeyValue { + if m != nil { + return m.Values + } + return nil +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{3} +} +func (m *KeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValue.Merge(m, src) +} +func (m *KeyValue) XXX_Size() int { + return m.Size() +} +func (m *KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValue proto.InternalMessageInfo + +func (m *KeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValue) GetValue() AnyValue { + if m != nil { + return m.Value + } + return AnyValue{} +} + +// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version +// of KeyValue that only supports string values. +type StringKeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *StringKeyValue) Reset() { *m = StringKeyValue{} } +func (m *StringKeyValue) String() string { return proto.CompactTextString(m) } +func (*StringKeyValue) ProtoMessage() {} +func (*StringKeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{4} +} +func (m *StringKeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringKeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StringKeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringKeyValue.Merge(m, src) +} +func (m *StringKeyValue) XXX_Size() int { + return m.Size() +} +func (m *StringKeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringKeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringKeyValue proto.InternalMessageInfo + +func (m *StringKeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *StringKeyValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// InstrumentationLibrary is a message representing the instrumentation library information +// such as the fully qualified name and version. +type InstrumentationLibrary struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *InstrumentationLibrary) Reset() { *m = InstrumentationLibrary{} } +func (m *InstrumentationLibrary) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibrary) ProtoMessage() {} +func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{5} +} +func (m *InstrumentationLibrary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InstrumentationLibrary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InstrumentationLibrary.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InstrumentationLibrary) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibrary.Merge(m, src) +} +func (m *InstrumentationLibrary) XXX_Size() int { + return m.Size() +} +func (m *InstrumentationLibrary) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibrary.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibrary proto.InternalMessageInfo + +func (m *InstrumentationLibrary) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InstrumentationLibrary) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func init() { + proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue") + proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue") + proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList") + proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue") + proto.RegisterType((*StringKeyValue)(nil), "opentelemetry.proto.common.v1.StringKeyValue") + proto.RegisterType((*InstrumentationLibrary)(nil), "opentelemetry.proto.common.v1.InstrumentationLibrary") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817) +} + +var fileDescriptor_62ba46dcb97aa817 = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0x6b, 0xdb, 0x30, + 0x18, 0xb6, 0x9a, 0x34, 0x4d, 0x5e, 0x87, 0x31, 0x44, 0x19, 0x61, 0x50, 0xd7, 0x64, 0x87, 0x79, + 0x1b, 0x8d, 0x69, 0x77, 0xd9, 0xb5, 0x29, 0x1b, 0x19, 0xcb, 0x20, 0xb8, 0x6c, 0x87, 0x5d, 0x86, + 0xd2, 0x0a, 0x23, 0x2a, 0x4b, 0x45, 0x56, 0x0c, 0xfe, 0x17, 0x3b, 0xee, 0xcf, 0xec, 0xde, 0x63, + 0x8f, 0x3b, 0x8d, 0x91, 0xfc, 0x91, 0xa2, 0x0f, 0xf7, 0xeb, 0x90, 0x92, 0xdb, 0xab, 0x47, 0xcf, + 0xc7, 0xfb, 0xea, 0x03, 0xde, 0xca, 0x4b, 0x2a, 0x34, 0xe5, 0xb4, 0xa0, 0x5a, 0xd5, 0xe9, 0xa5, + 0x92, 0x5a, 0xa6, 0x67, 0xb2, 0x28, 0xa4, 0x48, 0xab, 0x43, 0x5f, 0x8d, 0x2c, 0x8c, 0xf7, 0x1e, + 0x70, 0x1d, 0x38, 0xf2, 0x8c, 0xea, 0xf0, 0xe5, 0x6e, 0x2e, 0x73, 0xe9, 0x0c, 0x4c, 0xe5, 0xf6, + 0x87, 0x7f, 0xb6, 0xa0, 0x7b, 0x2c, 0xea, 0xef, 0x84, 0x2f, 0x28, 0x7e, 0x05, 0xfd, 0x52, 0x2b, + 0x26, 0xf2, 0x9f, 0x95, 0x59, 0x0f, 0x50, 0x8c, 0x92, 0xde, 0x24, 0xc8, 0x42, 0x87, 0x3a, 0xd2, + 0x3e, 0xc0, 0x5c, 0x4a, 0xee, 0x29, 0x5b, 0x31, 0x4a, 0xba, 0x93, 0x20, 0xeb, 0x19, 0xcc, 0x11, + 0xf6, 0xa0, 0xc7, 0x84, 0xf6, 0xfb, 0xad, 0x18, 0x25, 0xad, 0x49, 0x90, 0x75, 0x99, 0xd0, 0xb7, + 0x21, 0xe7, 0x72, 0x31, 0xe7, 0xd4, 0x33, 0xda, 0x31, 0x4a, 0x90, 0x09, 0x71, 0xa8, 0x23, 0x4d, + 0x21, 0x24, 0x4a, 0x91, 0xda, 0x73, 0xb6, 0x63, 0x94, 0x84, 0x47, 0x6f, 0x46, 0x6b, 0x27, 0x1c, + 0x1d, 0x1b, 0x85, 0xd5, 0x4f, 0x82, 0x0c, 0xc8, 0xed, 0x0a, 0xcf, 0xa0, 0x7f, 0x51, 0x71, 0x56, + 0x36, 0x4d, 0x75, 0xac, 0xdd, 0xbb, 0x27, 0xec, 0xbe, 0x50, 0x27, 0x9f, 0xb2, 0x52, 0x9b, 0xfe, + 0x9c, 0x85, 0x85, 0xc6, 0x3b, 0xb0, 0x6d, 0xad, 0x86, 0xa7, 0x00, 0x77, 0xb1, 0xf8, 0x23, 0x74, + 0x2c, 0x5c, 0x0e, 0x50, 0xdc, 0x4a, 0xc2, 0xa3, 0xd7, 0x4f, 0x75, 0xec, 0x4f, 0x7e, 0xdc, 0xbe, + 0xfa, 0xb7, 0x1f, 0x64, 0x5e, 0x3c, 0xfc, 0x06, 0xfd, 0xfb, 0xe1, 0x1b, 0xdb, 0x36, 0xe2, 0x47, + 0xb6, 0x04, 0xba, 0xcd, 0x0e, 0x7e, 0x0e, 0xad, 0x0b, 0x5a, 0xbb, 0x1b, 0xce, 0x4c, 0x89, 0x4f, + 0xfc, 0x48, 0xf6, 0x4a, 0x37, 0x6e, 0xdd, 0x1f, 0xc7, 0x07, 0x78, 0x76, 0x6a, 0xdf, 0xca, 0x9a, + 0xa0, 0xdd, 0xfb, 0x41, 0xbd, 0x46, 0xf9, 0x09, 0x5e, 0x7c, 0x16, 0xa5, 0x56, 0x8b, 0x82, 0x0a, + 0x4d, 0x34, 0x93, 0x62, 0xca, 0xe6, 0x8a, 0xa8, 0x1a, 0x63, 0x68, 0x0b, 0x52, 0xf8, 0xd7, 0x98, + 0xd9, 0x1a, 0x0f, 0x60, 0xa7, 0xa2, 0xaa, 0x64, 0x52, 0x78, 0x97, 0x66, 0x39, 0xfe, 0x8d, 0xae, + 0x96, 0x11, 0xba, 0x5e, 0x46, 0xe8, 0xff, 0x32, 0x42, 0xbf, 0x56, 0x51, 0x70, 0xbd, 0x8a, 0x82, + 0xbf, 0xab, 0x28, 0x80, 0x98, 0xc9, 0xf5, 0x43, 0x8d, 0xc3, 0x13, 0x5b, 0xce, 0x0c, 0x3c, 0x43, + 0x3f, 0xbe, 0xe6, 0x8f, 0x05, 0xcc, 0xfc, 0x3e, 0xce, 0xe9, 0x99, 0x96, 0x2a, 0x65, 0x42, 0x53, + 0x25, 0x08, 0x4f, 0xcf, 0x89, 0x26, 0xe9, 0x03, 0xe2, 0x81, 0x75, 0x3e, 0xc8, 0xa9, 0xb8, 0xfb, + 0xad, 0xf3, 0x8e, 0x05, 0xdf, 0xdf, 0x04, 0x00, 0x00, 0xff, 0xff, 0x70, 0x0b, 0x4b, 0xbe, 0xd5, + 0x03, 0x00, 0x00, +} + +func (m *AnyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AnyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AnyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *AnyValue_StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AnyValue_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = encodeVarintCommon(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *AnyValue_BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AnyValue_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *AnyValue_IntValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AnyValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintCommon(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *AnyValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AnyValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) + i-- + dAtA[i] = 0x21 + return len(dAtA) - i, nil +} +func (m *AnyValue_ArrayValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AnyValue_ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ArrayValue != nil { + { + size, err := m.ArrayValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *AnyValue_KvlistValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AnyValue_KvlistValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.KvlistValue != nil { + { + size, err := m.KvlistValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ArrayValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArrayValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *KeyValueList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyValueList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StringKeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringKeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StringKeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InstrumentationLibrary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InstrumentationLibrary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InstrumentationLibrary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCommon(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintCommon(dAtA []byte, offset int, v uint64) int { + offset -= sovCommon(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AnyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *AnyValue_StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovCommon(uint64(l)) + return n +} +func (m *AnyValue_BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *AnyValue_IntValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovCommon(uint64(m.IntValue)) + return n +} +func (m *AnyValue_DoubleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *AnyValue_ArrayValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ArrayValue != nil { + l = m.ArrayValue.Size() + n += 1 + l + sovCommon(uint64(l)) + } + return n +} +func (m *AnyValue_KvlistValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KvlistValue != nil { + l = m.KvlistValue.Size() + n += 1 + l + sovCommon(uint64(l)) + } + return n +} +func (m *ArrayValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovCommon(uint64(l)) + } + } + return n +} + +func (m *KeyValueList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovCommon(uint64(l)) + } + } + return n +} + +func (m *KeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + l = m.Value.Size() + n += 1 + l + sovCommon(uint64(l)) + return n +} + +func (m *StringKeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + return n +} + +func (m *InstrumentationLibrary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + return n +} + +func sovCommon(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCommon(x uint64) (n int) { + return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *AnyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AnyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AnyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &AnyValue_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Value = &AnyValue_BoolValue{b} + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &AnyValue_IntValue{v} + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &AnyValue_DoubleValue{float64(math.Float64frombits(v))} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ArrayValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &AnyValue_ArrayValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &KeyValueList{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &AnyValue_KvlistValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArrayValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArrayValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArrayValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, AnyValue{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyValueList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValueList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValueList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, KeyValue{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringKeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringKeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringKeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InstrumentationLibrary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InstrumentationLibrary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InstrumentationLibrary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCommon(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCommon + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCommon + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCommon + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/logs/v1/logs.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/logs/v1/logs.pb.go new file mode 100644 index 00000000000..3092a71e09b --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/logs/v1/logs.pb.go @@ -0,0 +1,1387 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/logs/v1/logs.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + go_opentelemetry_io_collector_internal_data "go.opentelemetry.io/collector/internal/data" + v11 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + v1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Possible values for LogRecord.SeverityNumber. +type SeverityNumber int32 + +const ( + // UNSPECIFIED is the default SeverityNumber, it MUST not be used. + SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0 + SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1 + SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2 + SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3 + SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4 + SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5 + SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6 + SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7 + SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8 + SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9 + SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10 + SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11 + SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12 + SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13 + SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14 + SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15 + SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16 + SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17 + SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18 + SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19 + SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20 + SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21 + SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22 + SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23 + SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24 +) + +var SeverityNumber_name = map[int32]string{ + 0: "SEVERITY_NUMBER_UNSPECIFIED", + 1: "SEVERITY_NUMBER_TRACE", + 2: "SEVERITY_NUMBER_TRACE2", + 3: "SEVERITY_NUMBER_TRACE3", + 4: "SEVERITY_NUMBER_TRACE4", + 5: "SEVERITY_NUMBER_DEBUG", + 6: "SEVERITY_NUMBER_DEBUG2", + 7: "SEVERITY_NUMBER_DEBUG3", + 8: "SEVERITY_NUMBER_DEBUG4", + 9: "SEVERITY_NUMBER_INFO", + 10: "SEVERITY_NUMBER_INFO2", + 11: "SEVERITY_NUMBER_INFO3", + 12: "SEVERITY_NUMBER_INFO4", + 13: "SEVERITY_NUMBER_WARN", + 14: "SEVERITY_NUMBER_WARN2", + 15: "SEVERITY_NUMBER_WARN3", + 16: "SEVERITY_NUMBER_WARN4", + 17: "SEVERITY_NUMBER_ERROR", + 18: "SEVERITY_NUMBER_ERROR2", + 19: "SEVERITY_NUMBER_ERROR3", + 20: "SEVERITY_NUMBER_ERROR4", + 21: "SEVERITY_NUMBER_FATAL", + 22: "SEVERITY_NUMBER_FATAL2", + 23: "SEVERITY_NUMBER_FATAL3", + 24: "SEVERITY_NUMBER_FATAL4", +} + +var SeverityNumber_value = map[string]int32{ + "SEVERITY_NUMBER_UNSPECIFIED": 0, + "SEVERITY_NUMBER_TRACE": 1, + "SEVERITY_NUMBER_TRACE2": 2, + "SEVERITY_NUMBER_TRACE3": 3, + "SEVERITY_NUMBER_TRACE4": 4, + "SEVERITY_NUMBER_DEBUG": 5, + "SEVERITY_NUMBER_DEBUG2": 6, + "SEVERITY_NUMBER_DEBUG3": 7, + "SEVERITY_NUMBER_DEBUG4": 8, + "SEVERITY_NUMBER_INFO": 9, + "SEVERITY_NUMBER_INFO2": 10, + "SEVERITY_NUMBER_INFO3": 11, + "SEVERITY_NUMBER_INFO4": 12, + "SEVERITY_NUMBER_WARN": 13, + "SEVERITY_NUMBER_WARN2": 14, + "SEVERITY_NUMBER_WARN3": 15, + "SEVERITY_NUMBER_WARN4": 16, + "SEVERITY_NUMBER_ERROR": 17, + "SEVERITY_NUMBER_ERROR2": 18, + "SEVERITY_NUMBER_ERROR3": 19, + "SEVERITY_NUMBER_ERROR4": 20, + "SEVERITY_NUMBER_FATAL": 21, + "SEVERITY_NUMBER_FATAL2": 22, + "SEVERITY_NUMBER_FATAL3": 23, + "SEVERITY_NUMBER_FATAL4": 24, +} + +func (x SeverityNumber) String() string { + return proto.EnumName(SeverityNumber_name, int32(x)) +} + +func (SeverityNumber) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{0} +} + +// Masks for LogRecord.flags field. +type LogRecordFlags int32 + +const ( + LogRecordFlags_LOG_RECORD_FLAG_UNSPECIFIED LogRecordFlags = 0 + LogRecordFlags_LOG_RECORD_FLAG_TRACE_FLAGS_MASK LogRecordFlags = 255 +) + +var LogRecordFlags_name = map[int32]string{ + 0: "LOG_RECORD_FLAG_UNSPECIFIED", + 255: "LOG_RECORD_FLAG_TRACE_FLAGS_MASK", +} + +var LogRecordFlags_value = map[string]int32{ + "LOG_RECORD_FLAG_UNSPECIFIED": 0, + "LOG_RECORD_FLAG_TRACE_FLAGS_MASK": 255, +} + +func (x LogRecordFlags) String() string { + return proto.EnumName(LogRecordFlags_name, int32(x)) +} + +func (LogRecordFlags) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{1} +} + +// A collection of InstrumentationLibraryLogs from a Resource. +type ResourceLogs struct { + // The resource for the logs in this message. + // If this field is not set then no resource info is known. + Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` + // A list of InstrumentationLibraryLogs that originate from a resource. + InstrumentationLibraryLogs []*InstrumentationLibraryLogs `protobuf:"bytes,2,rep,name=instrumentation_library_logs,json=instrumentationLibraryLogs,proto3" json:"instrumentation_library_logs,omitempty"` +} + +func (m *ResourceLogs) Reset() { *m = ResourceLogs{} } +func (m *ResourceLogs) String() string { return proto.CompactTextString(m) } +func (*ResourceLogs) ProtoMessage() {} +func (*ResourceLogs) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{0} +} +func (m *ResourceLogs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceLogs) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceLogs.Merge(m, src) +} +func (m *ResourceLogs) XXX_Size() int { + return m.Size() +} +func (m *ResourceLogs) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceLogs.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo + +func (m *ResourceLogs) GetResource() v1.Resource { + if m != nil { + return m.Resource + } + return v1.Resource{} +} + +func (m *ResourceLogs) GetInstrumentationLibraryLogs() []*InstrumentationLibraryLogs { + if m != nil { + return m.InstrumentationLibraryLogs + } + return nil +} + +// A collection of Logs produced by an InstrumentationLibrary. +type InstrumentationLibraryLogs struct { + // The instrumentation library information for the logs in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library"` + // A list of log records. + Logs []*LogRecord `protobuf:"bytes,2,rep,name=logs,proto3" json:"logs,omitempty"` +} + +func (m *InstrumentationLibraryLogs) Reset() { *m = InstrumentationLibraryLogs{} } +func (m *InstrumentationLibraryLogs) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibraryLogs) ProtoMessage() {} +func (*InstrumentationLibraryLogs) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{1} +} +func (m *InstrumentationLibraryLogs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InstrumentationLibraryLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InstrumentationLibraryLogs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InstrumentationLibraryLogs) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibraryLogs.Merge(m, src) +} +func (m *InstrumentationLibraryLogs) XXX_Size() int { + return m.Size() +} +func (m *InstrumentationLibraryLogs) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibraryLogs.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibraryLogs proto.InternalMessageInfo + +func (m *InstrumentationLibraryLogs) GetInstrumentationLibrary() v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return v11.InstrumentationLibrary{} +} + +func (m *InstrumentationLibraryLogs) GetLogs() []*LogRecord { + if m != nil { + return m.Logs + } + return nil +} + +// A log record according to OpenTelemetry Log Data Model: +// https://github.com/open-telemetry/oteps/blob/master/text/logs/0097-log-data-model.md +type LogRecord struct { + // time_unix_nano is the time when the event occurred. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical value of the severity, normalized to values described in Log Data Model. + // [Optional]. + SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"` + // The severity text (also known as log level). The original string representation as + // it is known at the source. [Optional]. + SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"` + // Short event identifier that does not contain varying parts. Name describes + // what happened (e.g. "ProcessStarted"). Recommended to be no longer than 50 + // characters. Not guaranteed to be unique in any way. [Optional]. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // A value containing the body of the log record. Can be for example a human-readable + // string message (including multi-line) describing the event in a free form or it can + // be a structured data composed of arrays and maps of other values. [Optional]. + Body v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body"` + // Additional attributes that describe the specific event occurrence. [Optional]. + Attributes []v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes"` + DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Flags, a bit field. 8 least significant bits are the trace flags as + // defined in W3C Trace Context specification. 24 most significant bits are reserved + // and must be set to 0. Readers must not assume that 24 most significant bits + // will be zero and must correctly mask the bits when reading 8-bit trace flag (use + // flags & TRACE_FLAGS_MASK). [Optional]. + Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"` + // A unique identifier for a trace. All logs from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. Can be set for logs that are part of request processing + // and have an assigned trace id. [Optional]. + TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. Can be set for logs that are part of a particular processing span. + // If span_id is present trace_id SHOULD be also present. [Optional]. + SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` +} + +func (m *LogRecord) Reset() { *m = LogRecord{} } +func (m *LogRecord) String() string { return proto.CompactTextString(m) } +func (*LogRecord) ProtoMessage() {} +func (*LogRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{2} +} +func (m *LogRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LogRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRecord.Merge(m, src) +} +func (m *LogRecord) XXX_Size() int { + return m.Size() +} +func (m *LogRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRecord proto.InternalMessageInfo + +func (m *LogRecord) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *LogRecord) GetSeverityNumber() SeverityNumber { + if m != nil { + return m.SeverityNumber + } + return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED +} + +func (m *LogRecord) GetSeverityText() string { + if m != nil { + return m.SeverityText + } + return "" +} + +func (m *LogRecord) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LogRecord) GetBody() v11.AnyValue { + if m != nil { + return m.Body + } + return v11.AnyValue{} +} + +func (m *LogRecord) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *LogRecord) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *LogRecord) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value) + proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value) + proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs") + proto.RegisterType((*InstrumentationLibraryLogs)(nil), "opentelemetry.proto.logs.v1.InstrumentationLibraryLogs") + proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e) +} + +var fileDescriptor_d1c030a3ec7e961e = []byte{ + // 849 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xdf, 0x6e, 0x22, 0x37, + 0x14, 0xc6, 0x71, 0x42, 0x20, 0x71, 0x08, 0xeb, 0xba, 0xd9, 0xec, 0x94, 0x54, 0x04, 0xa5, 0xed, + 0x96, 0xa6, 0x5a, 0x50, 0x06, 0xaa, 0x56, 0xdb, 0xab, 0x21, 0x0c, 0xd1, 0x28, 0x84, 0x44, 0x86, + 0xa4, 0x7f, 0x6e, 0x46, 0x03, 0xb8, 0x68, 0x24, 0xb0, 0xd1, 0x8c, 0x89, 0xc2, 0x5b, 0x54, 0x7d, + 0xa5, 0xde, 0xec, 0x55, 0xbb, 0x57, 0x55, 0xd5, 0x8b, 0x55, 0x95, 0x3c, 0x48, 0x2b, 0x9b, 0x81, + 0x5d, 0xd0, 0x98, 0xd5, 0x5e, 0xc5, 0x73, 0x7e, 0xe7, 0xfb, 0xce, 0xf1, 0x89, 0x6d, 0x01, 0x9f, + 0xf3, 0x31, 0x65, 0x82, 0x0e, 0xe9, 0x88, 0x8a, 0x60, 0x5a, 0x1e, 0x07, 0x5c, 0xf0, 0xf2, 0x90, + 0x0f, 0xc2, 0xf2, 0xdd, 0xa9, 0xfa, 0x5b, 0x52, 0x21, 0x7c, 0xb8, 0x94, 0x37, 0x0b, 0x96, 0x14, + 0xbf, 0x3b, 0xcd, 0xed, 0x0f, 0xf8, 0x80, 0xcf, 0xa4, 0x72, 0x35, 0xa3, 0xb9, 0x93, 0x38, 0xeb, + 0x1e, 0x1f, 0x8d, 0x38, 0x93, 0xe6, 0xb3, 0x55, 0x94, 0x5b, 0x8a, 0xcb, 0x0d, 0x68, 0xc8, 0x27, + 0x41, 0x8f, 0xca, 0xec, 0xf9, 0x7a, 0x96, 0x7f, 0xfc, 0x17, 0x80, 0x19, 0x12, 0x85, 0x9a, 0x7c, + 0x10, 0xe2, 0x0b, 0xb8, 0x3d, 0x4f, 0x31, 0x40, 0x01, 0x14, 0x77, 0xcd, 0xaf, 0x4a, 0x71, 0x2d, + 0x2f, 0x7c, 0xee, 0x4e, 0x4b, 0x73, 0x83, 0x5a, 0xf2, 0xd5, 0x9b, 0xa3, 0x04, 0x59, 0x18, 0xe0, + 0x29, 0xfc, 0xd4, 0x67, 0xa1, 0x08, 0x26, 0x23, 0xca, 0x84, 0x27, 0x7c, 0xce, 0xdc, 0xa1, 0xdf, + 0x0d, 0xbc, 0x60, 0xea, 0xca, 0x2d, 0x1b, 0x1b, 0x85, 0xcd, 0xe2, 0xae, 0xf9, 0x6d, 0x69, 0xcd, + 0x4c, 0x4a, 0xce, 0xb2, 0x41, 0x73, 0xa6, 0x97, 0xbd, 0x92, 0x9c, 0xaf, 0x65, 0xc7, 0x7f, 0x00, + 0x98, 0xd3, 0x4b, 0xb1, 0x80, 0xcf, 0x34, 0x9d, 0x45, 0xbb, 0xfe, 0x26, 0xb6, 0xa9, 0x68, 0xd6, + 0xda, 0xb6, 0xa2, 0x09, 0x1c, 0xc4, 0x37, 0x86, 0x5f, 0xc2, 0xe4, 0x3b, 0xfb, 0x7e, 0xbe, 0x76, + 0xdf, 0x4d, 0x3e, 0x20, 0xb4, 0xc7, 0x83, 0x3e, 0x51, 0x9a, 0xe3, 0x3f, 0x93, 0x70, 0x67, 0x11, + 0xc3, 0x9f, 0xc3, 0xac, 0xf0, 0x47, 0xd4, 0x9d, 0x30, 0xff, 0xde, 0x65, 0x1e, 0xe3, 0xaa, 0xed, + 0x14, 0xc9, 0xc8, 0xe8, 0x0d, 0xf3, 0xef, 0x5b, 0x1e, 0xe3, 0xb8, 0x03, 0x9f, 0x84, 0xf4, 0x8e, + 0x06, 0xbe, 0x98, 0xba, 0x6c, 0x32, 0xea, 0xd2, 0xc0, 0xd8, 0x28, 0x80, 0x62, 0xd6, 0xfc, 0x7a, + 0x6d, 0xe9, 0x76, 0xa4, 0x69, 0x29, 0x09, 0xc9, 0x86, 0x4b, 0xdf, 0xf8, 0x33, 0xb8, 0xb7, 0x70, + 0x15, 0xf4, 0x5e, 0x18, 0x9b, 0x05, 0x50, 0xdc, 0x21, 0x99, 0x79, 0xb0, 0x43, 0xef, 0x05, 0xc6, + 0x30, 0xc9, 0xbc, 0x11, 0x35, 0x92, 0x8a, 0xa9, 0x35, 0xb6, 0x60, 0xb2, 0xcb, 0xfb, 0x53, 0x63, + 0x4b, 0x4d, 0xf8, 0xcb, 0xf7, 0x4c, 0xd8, 0x62, 0xd3, 0x5b, 0x6f, 0x38, 0x99, 0x9f, 0x2a, 0x25, + 0xc5, 0x97, 0x10, 0x7a, 0x42, 0x04, 0x7e, 0x77, 0x22, 0x68, 0x68, 0xa4, 0xd4, 0x1c, 0xdf, 0x67, + 0x74, 0x41, 0x97, 0x8c, 0xde, 0x31, 0xc0, 0xdf, 0x41, 0xa3, 0x1f, 0xf0, 0xf1, 0x98, 0xf6, 0xdd, + 0xb7, 0x51, 0xb7, 0xc7, 0x27, 0x4c, 0x18, 0xe9, 0x02, 0x28, 0xee, 0x91, 0x83, 0x88, 0x5b, 0x0b, + 0x7c, 0x26, 0x29, 0xde, 0x87, 0x5b, 0xbf, 0x0c, 0xbd, 0x41, 0x68, 0x6c, 0x17, 0x40, 0x31, 0x4d, + 0x66, 0x1f, 0xf8, 0x16, 0x6e, 0x8b, 0xc0, 0xeb, 0x51, 0xd7, 0xef, 0x1b, 0x3b, 0x05, 0x50, 0xcc, + 0xd4, 0xbe, 0x97, 0x35, 0xff, 0x79, 0x73, 0x54, 0x19, 0xf0, 0x95, 0x36, 0x7d, 0x79, 0x89, 0x87, + 0x43, 0xda, 0x13, 0x3c, 0x28, 0xfb, 0x4c, 0xd0, 0x80, 0x79, 0xc3, 0x72, 0xdf, 0x13, 0x5e, 0xa9, + 0x23, 0x3d, 0x9c, 0x3a, 0x49, 0x2b, 0x33, 0xa7, 0x8f, 0xdb, 0x30, 0x1d, 0x8e, 0x3d, 0x26, 0x6d, + 0xa1, 0xb2, 0x7d, 0x19, 0xd9, 0x9a, 0x1f, 0x62, 0xdb, 0x1e, 0x7b, 0xcc, 0xa9, 0x93, 0x94, 0xb4, + 0x72, 0xfa, 0x27, 0xbf, 0x6f, 0xc1, 0xec, 0xf2, 0xbf, 0x1a, 0x1f, 0xc1, 0xc3, 0xb6, 0x7d, 0x6b, + 0x13, 0xa7, 0xf3, 0x93, 0xdb, 0xba, 0xb9, 0xac, 0xd9, 0xc4, 0xbd, 0x69, 0xb5, 0xaf, 0xed, 0x33, + 0xa7, 0xe1, 0xd8, 0x75, 0x94, 0xc0, 0x9f, 0xc0, 0xa7, 0xab, 0x09, 0x1d, 0x62, 0x9d, 0xd9, 0x08, + 0xe0, 0x1c, 0x3c, 0x88, 0x45, 0x26, 0xda, 0xd0, 0xb2, 0x0a, 0xda, 0xd4, 0xb2, 0x2a, 0x4a, 0xc6, + 0x95, 0xab, 0xdb, 0xb5, 0x9b, 0x73, 0xb4, 0x15, 0x27, 0x53, 0xc8, 0x44, 0x29, 0x2d, 0xab, 0xa0, + 0xb4, 0x96, 0x55, 0xd1, 0x36, 0x36, 0xe0, 0xfe, 0x2a, 0x73, 0x5a, 0x8d, 0x2b, 0xb4, 0x13, 0xd7, + 0x88, 0x24, 0x26, 0x82, 0x3a, 0x54, 0x41, 0xbb, 0x3a, 0x54, 0x45, 0x99, 0xb8, 0x52, 0x3f, 0x58, + 0xa4, 0x85, 0xf6, 0xe2, 0x44, 0x92, 0x98, 0x28, 0xab, 0x43, 0x15, 0xf4, 0x44, 0x87, 0xaa, 0x08, + 0xc5, 0x21, 0x9b, 0x90, 0x2b, 0x82, 0x3e, 0x8a, 0x1b, 0x86, 0x42, 0x26, 0xc2, 0x5a, 0x56, 0x41, + 0x1f, 0x6b, 0x59, 0x15, 0xed, 0xc7, 0x95, 0x6b, 0x58, 0x1d, 0xab, 0x89, 0x9e, 0xc6, 0xc9, 0x14, + 0x32, 0xd1, 0x81, 0x96, 0x55, 0xd0, 0x33, 0x2d, 0xab, 0x22, 0xe3, 0xe4, 0x47, 0x98, 0x5d, 0x3c, + 0x8b, 0x0d, 0x75, 0x09, 0x8f, 0xe0, 0x61, 0xf3, 0xea, 0xdc, 0x25, 0xf6, 0xd9, 0x15, 0xa9, 0xbb, + 0x8d, 0xa6, 0x75, 0xbe, 0x72, 0x88, 0xbf, 0x80, 0x85, 0xd5, 0x04, 0x75, 0xe2, 0xd4, 0xb2, 0xed, + 0x5e, 0x5a, 0xed, 0x0b, 0xf4, 0x1f, 0xa8, 0xfd, 0x06, 0x5e, 0x3d, 0xe4, 0xc1, 0xeb, 0x87, 0x3c, + 0xf8, 0xf7, 0x21, 0x0f, 0x7e, 0x7d, 0xcc, 0x27, 0x5e, 0x3f, 0xe6, 0x13, 0x7f, 0x3f, 0xe6, 0x13, + 0x30, 0xef, 0xf3, 0x75, 0x2f, 0x68, 0x4d, 0xbe, 0xd4, 0xe1, 0xb5, 0x0c, 0x5d, 0x83, 0x9f, 0x2f, + 0x3e, 0xe0, 0x8e, 0x96, 0x97, 0x12, 0x5f, 0x28, 0xd7, 0x17, 0x03, 0xca, 0xe6, 0x3f, 0x25, 0xba, + 0x29, 0x15, 0xaa, 0xfc, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x28, 0xc9, 0xea, 0x70, 0x08, 0x00, + 0x00, +} + +func (m *ResourceLogs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceLogs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.InstrumentationLibraryLogs) > 0 { + for iNdEx := len(m.InstrumentationLibraryLogs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InstrumentationLibraryLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *InstrumentationLibraryLogs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InstrumentationLibraryLogs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InstrumentationLibraryLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Logs) > 0 { + for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.InstrumentationLibrary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LogRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LogRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLogs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLogs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + if m.Flags != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags)) + i-- + dAtA[i] = 0x45 + } + if m.DroppedAttributesCount != 0 { + i = encodeVarintLogs(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x38 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + { + size, err := m.Body.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogs(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintLogs(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if len(m.SeverityText) > 0 { + i -= len(m.SeverityText) + copy(dAtA[i:], m.SeverityText) + i = encodeVarintLogs(dAtA, i, uint64(len(m.SeverityText))) + i-- + dAtA[i] = 0x1a + } + if m.SeverityNumber != 0 { + i = encodeVarintLogs(dAtA, i, uint64(m.SeverityNumber)) + i-- + dAtA[i] = 0x10 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func encodeVarintLogs(dAtA []byte, offset int, v uint64) int { + offset -= sovLogs(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourceLogs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovLogs(uint64(l)) + if len(m.InstrumentationLibraryLogs) > 0 { + for _, e := range m.InstrumentationLibraryLogs { + l = e.Size() + n += 1 + l + sovLogs(uint64(l)) + } + } + return n +} + +func (m *InstrumentationLibraryLogs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.InstrumentationLibrary.Size() + n += 1 + l + sovLogs(uint64(l)) + if len(m.Logs) > 0 { + for _, e := range m.Logs { + l = e.Size() + n += 1 + l + sovLogs(uint64(l)) + } + } + return n +} + +func (m *LogRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeUnixNano != 0 { + n += 9 + } + if m.SeverityNumber != 0 { + n += 1 + sovLogs(uint64(m.SeverityNumber)) + } + l = len(m.SeverityText) + if l > 0 { + n += 1 + l + sovLogs(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovLogs(uint64(l)) + } + l = m.Body.Size() + n += 1 + l + sovLogs(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovLogs(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovLogs(uint64(m.DroppedAttributesCount)) + } + if m.Flags != 0 { + n += 5 + } + l = m.TraceId.Size() + n += 1 + l + sovLogs(uint64(l)) + l = m.SpanId.Size() + n += 1 + l + sovLogs(uint64(l)) + return n +} + +func sovLogs(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLogs(x uint64) (n int) { + return sovLogs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResourceLogs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceLogs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceLogs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibraryLogs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstrumentationLibraryLogs = append(m.InstrumentationLibraryLogs, &InstrumentationLibraryLogs{}) + if err := m.InstrumentationLibraryLogs[len(m.InstrumentationLibraryLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogs + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InstrumentationLibraryLogs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InstrumentationLibraryLogs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InstrumentationLibraryLogs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InstrumentationLibrary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logs = append(m.Logs, &LogRecord{}) + if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogs + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType) + } + m.SeverityNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SeverityNumber |= SeverityNumber(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeverityText = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Body.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogs + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogs(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLogs + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLogs + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLogs + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLogs = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogs = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLogs = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/metrics/v1/metrics.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/metrics/v1/metrics.pb.go new file mode 100644 index 00000000000..963162699df --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/metrics/v1/metrics.pb.go @@ -0,0 +1,6374 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/metrics/v1/metrics.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + go_opentelemetry_io_collector_internal_data "go.opentelemetry.io/collector/internal/data" + v11 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + v1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +type AggregationTemporality int32 + +const ( + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 + // CUMULATIVE is an AggregationTemporality for a metric aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 +) + +var AggregationTemporality_name = map[int32]string{ + 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1: "AGGREGATION_TEMPORALITY_DELTA", + 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", +} + +var AggregationTemporality_value = map[string]int32{ + "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, + "AGGREGATION_TEMPORALITY_DELTA": 1, + "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, +} + +func (x AggregationTemporality) String() string { + return proto.EnumName(AggregationTemporality_name, int32(x)) +} + +func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{0} +} + +// A collection of InstrumentationLibraryMetrics from a Resource. +type ResourceMetrics struct { + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` + // A list of metrics that originate from a resource. + InstrumentationLibraryMetrics []*InstrumentationLibraryMetrics `protobuf:"bytes,2,rep,name=instrumentation_library_metrics,json=instrumentationLibraryMetrics,proto3" json:"instrumentation_library_metrics,omitempty"` +} + +func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} } +func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) } +func (*ResourceMetrics) ProtoMessage() {} +func (*ResourceMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{0} +} +func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetrics.Merge(m, src) +} +func (m *ResourceMetrics) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo + +func (m *ResourceMetrics) GetResource() v1.Resource { + if m != nil { + return m.Resource + } + return v1.Resource{} +} + +func (m *ResourceMetrics) GetInstrumentationLibraryMetrics() []*InstrumentationLibraryMetrics { + if m != nil { + return m.InstrumentationLibraryMetrics + } + return nil +} + +// A collection of Metrics produced by an InstrumentationLibrary. +type InstrumentationLibraryMetrics struct { + // The instrumentation library information for the metrics in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library"` + // A list of metrics that originate from an instrumentation library. + Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (m *InstrumentationLibraryMetrics) Reset() { *m = InstrumentationLibraryMetrics{} } +func (m *InstrumentationLibraryMetrics) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibraryMetrics) ProtoMessage() {} +func (*InstrumentationLibraryMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{1} +} +func (m *InstrumentationLibraryMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InstrumentationLibraryMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InstrumentationLibraryMetrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InstrumentationLibraryMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibraryMetrics.Merge(m, src) +} +func (m *InstrumentationLibraryMetrics) XXX_Size() int { + return m.Size() +} +func (m *InstrumentationLibraryMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibraryMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibraryMetrics proto.InternalMessageInfo + +func (m *InstrumentationLibraryMetrics) GetInstrumentationLibrary() v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return v11.InstrumentationLibrary{} +} + +func (m *InstrumentationLibraryMetrics) GetMetrics() []*Metric { + if m != nil { + return m.Metrics + } + return nil +} + +// Defines a Metric which has one or more timeseries. +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// - Metadata part contains a name, description, unit. +// - Data is one of the possible types (Gauge, Sum, Histogram, etc.). +// - DataPoint contains timestamps, labels, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +------------------------------------+ +// |data |---> |Gauge, Sum, Histogram, Summary, ... | +// +------------+ +------------------------------------+ +// +// Data [One of Gauge, Sum, Histogram, Summary, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// All DataPoint types have three common fields: +// - Labels zero or more key-value pairs associated with the data point. +// - StartTimeUnixNano MUST be set to the start of the interval when the data's +// type includes an AggregationTemporality. This field is not set otherwise. +// - TimeUnixNano MUST be set to: +// - the moment when an aggregation is reported (independent of the +// aggregation temporality). +// - the instantaneous time of the event. +type Metric struct { + // name of the metric, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + // + // TODO: Update table after the decision on: + // https://github.com/open-telemetry/opentelemetry-specification/issues/731. + // By default, metrics recording using the OpenTelemetry API are exported as + // (the table does not include MeasurementValueType to avoid extra rows): + // + // Instrument Type + // ---------------------------------------------- + // Counter Sum(aggregation_temporality=delta;is_monotonic=true) + // UpDownCounter Sum(aggregation_temporality=delta;is_monotonic=false) + // ValueRecorder TBD + // SumObserver Sum(aggregation_temporality=cumulative;is_monotonic=true) + // UpDownSumObserver Sum(aggregation_temporality=cumulative;is_monotonic=false) + // ValueObserver Gauge() + // + // Types that are valid to be assigned to Data: + // *Metric_IntGauge + // *Metric_DoubleGauge + // *Metric_IntSum + // *Metric_DoubleSum + // *Metric_IntHistogram + // *Metric_DoubleHistogram + // *Metric_DoubleSummary + Data isMetric_Data `protobuf_oneof:"data"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{2} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +type isMetric_Data interface { + isMetric_Data() + MarshalTo([]byte) (int, error) + Size() int +} + +type Metric_IntGauge struct { + IntGauge *IntGauge `protobuf:"bytes,4,opt,name=int_gauge,json=intGauge,proto3,oneof" json:"int_gauge,omitempty"` +} +type Metric_DoubleGauge struct { + DoubleGauge *DoubleGauge `protobuf:"bytes,5,opt,name=double_gauge,json=doubleGauge,proto3,oneof" json:"double_gauge,omitempty"` +} +type Metric_IntSum struct { + IntSum *IntSum `protobuf:"bytes,6,opt,name=int_sum,json=intSum,proto3,oneof" json:"int_sum,omitempty"` +} +type Metric_DoubleSum struct { + DoubleSum *DoubleSum `protobuf:"bytes,7,opt,name=double_sum,json=doubleSum,proto3,oneof" json:"double_sum,omitempty"` +} +type Metric_IntHistogram struct { + IntHistogram *IntHistogram `protobuf:"bytes,8,opt,name=int_histogram,json=intHistogram,proto3,oneof" json:"int_histogram,omitempty"` +} +type Metric_DoubleHistogram struct { + DoubleHistogram *DoubleHistogram `protobuf:"bytes,9,opt,name=double_histogram,json=doubleHistogram,proto3,oneof" json:"double_histogram,omitempty"` +} +type Metric_DoubleSummary struct { + DoubleSummary *DoubleSummary `protobuf:"bytes,11,opt,name=double_summary,json=doubleSummary,proto3,oneof" json:"double_summary,omitempty"` +} + +func (*Metric_IntGauge) isMetric_Data() {} +func (*Metric_DoubleGauge) isMetric_Data() {} +func (*Metric_IntSum) isMetric_Data() {} +func (*Metric_DoubleSum) isMetric_Data() {} +func (*Metric_IntHistogram) isMetric_Data() {} +func (*Metric_DoubleHistogram) isMetric_Data() {} +func (*Metric_DoubleSummary) isMetric_Data() {} + +func (m *Metric) GetData() isMetric_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *Metric) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Metric) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Metric) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *Metric) GetIntGauge() *IntGauge { + if x, ok := m.GetData().(*Metric_IntGauge); ok { + return x.IntGauge + } + return nil +} + +func (m *Metric) GetDoubleGauge() *DoubleGauge { + if x, ok := m.GetData().(*Metric_DoubleGauge); ok { + return x.DoubleGauge + } + return nil +} + +func (m *Metric) GetIntSum() *IntSum { + if x, ok := m.GetData().(*Metric_IntSum); ok { + return x.IntSum + } + return nil +} + +func (m *Metric) GetDoubleSum() *DoubleSum { + if x, ok := m.GetData().(*Metric_DoubleSum); ok { + return x.DoubleSum + } + return nil +} + +func (m *Metric) GetIntHistogram() *IntHistogram { + if x, ok := m.GetData().(*Metric_IntHistogram); ok { + return x.IntHistogram + } + return nil +} + +func (m *Metric) GetDoubleHistogram() *DoubleHistogram { + if x, ok := m.GetData().(*Metric_DoubleHistogram); ok { + return x.DoubleHistogram + } + return nil +} + +func (m *Metric) GetDoubleSummary() *DoubleSummary { + if x, ok := m.GetData().(*Metric_DoubleSummary); ok { + return x.DoubleSummary + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Metric) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Metric_IntGauge)(nil), + (*Metric_DoubleGauge)(nil), + (*Metric_IntSum)(nil), + (*Metric_DoubleSum)(nil), + (*Metric_IntHistogram)(nil), + (*Metric_DoubleHistogram)(nil), + (*Metric_DoubleSummary)(nil), + } +} + +// Gauge represents the type of a int scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type IntGauge struct { + DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +func (m *IntGauge) Reset() { *m = IntGauge{} } +func (m *IntGauge) String() string { return proto.CompactTextString(m) } +func (*IntGauge) ProtoMessage() {} +func (*IntGauge) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{3} +} +func (m *IntGauge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IntGauge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IntGauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntGauge.Merge(m, src) +} +func (m *IntGauge) XXX_Size() int { + return m.Size() +} +func (m *IntGauge) XXX_DiscardUnknown() { + xxx_messageInfo_IntGauge.DiscardUnknown(m) +} + +var xxx_messageInfo_IntGauge proto.InternalMessageInfo + +func (m *IntGauge) GetDataPoints() []*IntDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// Gauge represents the type of a double scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type DoubleGauge struct { + DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +func (m *DoubleGauge) Reset() { *m = DoubleGauge{} } +func (m *DoubleGauge) String() string { return proto.CompactTextString(m) } +func (*DoubleGauge) ProtoMessage() {} +func (*DoubleGauge) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{4} +} +func (m *DoubleGauge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleGauge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleGauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleGauge.Merge(m, src) +} +func (m *DoubleGauge) XXX_Size() int { + return m.Size() +} +func (m *DoubleGauge) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleGauge.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleGauge proto.InternalMessageInfo + +func (m *DoubleGauge) GetDataPoints() []*DoubleDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// Sum represents the type of a numeric int scalar metric that is calculated as +// a sum of all reported measurements over a time interval. +type IntSum struct { + DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` +} + +func (m *IntSum) Reset() { *m = IntSum{} } +func (m *IntSum) String() string { return proto.CompactTextString(m) } +func (*IntSum) ProtoMessage() {} +func (*IntSum) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{5} +} +func (m *IntSum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IntSum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IntSum) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntSum.Merge(m, src) +} +func (m *IntSum) XXX_Size() int { + return m.Size() +} +func (m *IntSum) XXX_DiscardUnknown() { + xxx_messageInfo_IntSum.DiscardUnknown(m) +} + +var xxx_messageInfo_IntSum proto.InternalMessageInfo + +func (m *IntSum) GetDataPoints() []*IntDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *IntSum) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +func (m *IntSum) GetIsMonotonic() bool { + if m != nil { + return m.IsMonotonic + } + return false +} + +// Sum represents the type of a numeric double scalar metric that is calculated +// as a sum of all reported measurements over a time interval. +type DoubleSum struct { + DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` +} + +func (m *DoubleSum) Reset() { *m = DoubleSum{} } +func (m *DoubleSum) String() string { return proto.CompactTextString(m) } +func (*DoubleSum) ProtoMessage() {} +func (*DoubleSum) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{6} +} +func (m *DoubleSum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleSum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleSum) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleSum.Merge(m, src) +} +func (m *DoubleSum) XXX_Size() int { + return m.Size() +} +func (m *DoubleSum) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleSum.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleSum proto.InternalMessageInfo + +func (m *DoubleSum) GetDataPoints() []*DoubleDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *DoubleSum) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +func (m *DoubleSum) GetIsMonotonic() bool { + if m != nil { + return m.IsMonotonic + } + return false +} + +// Represents the type of a metric that is calculated by aggregating as a +// Histogram of all reported int measurements over a time interval. +type IntHistogram struct { + DataPoints []*IntHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +func (m *IntHistogram) Reset() { *m = IntHistogram{} } +func (m *IntHistogram) String() string { return proto.CompactTextString(m) } +func (*IntHistogram) ProtoMessage() {} +func (*IntHistogram) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{7} +} +func (m *IntHistogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IntHistogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IntHistogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntHistogram.Merge(m, src) +} +func (m *IntHistogram) XXX_Size() int { + return m.Size() +} +func (m *IntHistogram) XXX_DiscardUnknown() { + xxx_messageInfo_IntHistogram.DiscardUnknown(m) +} + +var xxx_messageInfo_IntHistogram proto.InternalMessageInfo + +func (m *IntHistogram) GetDataPoints() []*IntHistogramDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *IntHistogram) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// Represents the type of a metric that is calculated by aggregating as a +// Histogram of all reported double measurements over a time interval. +type DoubleHistogram struct { + DataPoints []*DoubleHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +func (m *DoubleHistogram) Reset() { *m = DoubleHistogram{} } +func (m *DoubleHistogram) String() string { return proto.CompactTextString(m) } +func (*DoubleHistogram) ProtoMessage() {} +func (*DoubleHistogram) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{8} +} +func (m *DoubleHistogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleHistogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleHistogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleHistogram.Merge(m, src) +} +func (m *DoubleHistogram) XXX_Size() int { + return m.Size() +} +func (m *DoubleHistogram) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleHistogram.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleHistogram proto.InternalMessageInfo + +func (m *DoubleHistogram) GetDataPoints() []*DoubleHistogramDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *DoubleHistogram) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// DoubleSummary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// data type. These data points cannot always be merged in a meaningful way. +// While they can be useful in some applications, histogram data points are +// recommended for new applications. +type DoubleSummary struct { + DataPoints []*DoubleSummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +func (m *DoubleSummary) Reset() { *m = DoubleSummary{} } +func (m *DoubleSummary) String() string { return proto.CompactTextString(m) } +func (*DoubleSummary) ProtoMessage() {} +func (*DoubleSummary) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{9} +} +func (m *DoubleSummary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleSummary.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleSummary) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleSummary.Merge(m, src) +} +func (m *DoubleSummary) XXX_Size() int { + return m.Size() +} +func (m *DoubleSummary) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleSummary.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleSummary proto.InternalMessageInfo + +func (m *DoubleSummary) GetDataPoints() []*DoubleSummaryDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// IntDataPoint is a single data point in a timeseries that describes the +// time-varying values of a int64 metric. +type IntDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // value itself. + Value int64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*IntExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` +} + +func (m *IntDataPoint) Reset() { *m = IntDataPoint{} } +func (m *IntDataPoint) String() string { return proto.CompactTextString(m) } +func (*IntDataPoint) ProtoMessage() {} +func (*IntDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{10} +} +func (m *IntDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IntDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IntDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntDataPoint.Merge(m, src) +} +func (m *IntDataPoint) XXX_Size() int { + return m.Size() +} +func (m *IntDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_IntDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_IntDataPoint proto.InternalMessageInfo + +func (m *IntDataPoint) GetLabels() []v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *IntDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *IntDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntDataPoint) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *IntDataPoint) GetExemplars() []*IntExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// DoubleDataPoint is a single data point in a timeseries that describes the +// time-varying value of a double metric. +type DoubleDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // value itself. + Value float64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*DoubleExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` +} + +func (m *DoubleDataPoint) Reset() { *m = DoubleDataPoint{} } +func (m *DoubleDataPoint) String() string { return proto.CompactTextString(m) } +func (*DoubleDataPoint) ProtoMessage() {} +func (*DoubleDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{11} +} +func (m *DoubleDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleDataPoint.Merge(m, src) +} +func (m *DoubleDataPoint) XXX_Size() int { + return m.Size() +} +func (m *DoubleDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleDataPoint proto.InternalMessageInfo + +func (m *DoubleDataPoint) GetLabels() []v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *DoubleDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *DoubleDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleDataPoint) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DoubleDataPoint) GetExemplars() []*DoubleExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// IntHistogramDataPoint is a single data point in a timeseries that describes +// the time-varying values of a Histogram of int values. A Histogram contains +// summary statistics for a population of values, it may optionally contain +// the distribution of those values across a set of buckets. +type IntHistogramDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. This value must be equal to the sum of the "sum" fields in + // buckets if a histogram is provided. + Sum int64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // The bucket boundaries are described by "bounds" field. + // + // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket + // at index i are: + // + // (-infinity, bounds[i]) for i == 0 + // [bounds[i-1], bounds[i]) for 0 < i < N-1 + // [bounds[i], +infinity) for i == N-1 + // The values in bounds array must be strictly increasing. + // + // Note: only [a, b) intervals are currently supported for each bucket except the first one. + // If we decide to also support (a, b] intervals we should add support for these by defining + // a boolean value which decides what type of intervals to use. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*IntExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` +} + +func (m *IntHistogramDataPoint) Reset() { *m = IntHistogramDataPoint{} } +func (m *IntHistogramDataPoint) String() string { return proto.CompactTextString(m) } +func (*IntHistogramDataPoint) ProtoMessage() {} +func (*IntHistogramDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{12} +} +func (m *IntHistogramDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IntHistogramDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IntHistogramDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntHistogramDataPoint.Merge(m, src) +} +func (m *IntHistogramDataPoint) XXX_Size() int { + return m.Size() +} +func (m *IntHistogramDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_IntHistogramDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_IntHistogramDataPoint proto.InternalMessageInfo + +func (m *IntHistogramDataPoint) GetLabels() []v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *IntHistogramDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *IntHistogramDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntHistogramDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *IntHistogramDataPoint) GetSum() int64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *IntHistogramDataPoint) GetBucketCounts() []uint64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *IntHistogramDataPoint) GetExplicitBounds() []float64 { + if m != nil { + return m.ExplicitBounds + } + return nil +} + +func (m *IntHistogramDataPoint) GetExemplars() []*IntExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram of double values. A Histogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +type DoubleHistogramDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. This value must be equal to the sum of the "sum" fields in + // buckets if a histogram is provided. + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // The bucket boundaries are described by "bounds" field. + // + // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket + // at index i are: + // + // (-infinity, bounds[i]) for i == 0 + // [bounds[i-1], bounds[i]) for 0 < i < N-1 + // [bounds[i], +infinity) for i == N-1 + // The values in bounds array must be strictly increasing. + // + // Note: only [a, b) intervals are currently supported for each bucket except the first one. + // If we decide to also support (a, b] intervals we should add support for these by defining + // a boolean value which decides what type of intervals to use. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*DoubleExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` +} + +func (m *DoubleHistogramDataPoint) Reset() { *m = DoubleHistogramDataPoint{} } +func (m *DoubleHistogramDataPoint) String() string { return proto.CompactTextString(m) } +func (*DoubleHistogramDataPoint) ProtoMessage() {} +func (*DoubleHistogramDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{13} +} +func (m *DoubleHistogramDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleHistogramDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleHistogramDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleHistogramDataPoint.Merge(m, src) +} +func (m *DoubleHistogramDataPoint) XXX_Size() int { + return m.Size() +} +func (m *DoubleHistogramDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleHistogramDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleHistogramDataPoint proto.InternalMessageInfo + +func (m *DoubleHistogramDataPoint) GetLabels() []v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetBucketCounts() []uint64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetExplicitBounds() []float64 { + if m != nil { + return m.ExplicitBounds + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetExemplars() []*DoubleExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// DoubleSummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +type DoubleSummaryDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + QuantileValues []*DoubleSummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"` +} + +func (m *DoubleSummaryDataPoint) Reset() { *m = DoubleSummaryDataPoint{} } +func (m *DoubleSummaryDataPoint) String() string { return proto.CompactTextString(m) } +func (*DoubleSummaryDataPoint) ProtoMessage() {} +func (*DoubleSummaryDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{14} +} +func (m *DoubleSummaryDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleSummaryDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleSummaryDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleSummaryDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleSummaryDataPoint.Merge(m, src) +} +func (m *DoubleSummaryDataPoint) XXX_Size() int { + return m.Size() +} +func (m *DoubleSummaryDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleSummaryDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleSummaryDataPoint proto.InternalMessageInfo + +func (m *DoubleSummaryDataPoint) GetLabels() []v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *DoubleSummaryDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *DoubleSummaryDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleSummaryDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DoubleSummaryDataPoint) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DoubleSummaryDataPoint) GetQuantileValues() []*DoubleSummaryDataPoint_ValueAtQuantile { + if m != nil { + return m.QuantileValues + } + return nil +} + +// Represents the value at a given quantile of a distribution. +// +// To record Min and Max values following conventions are used: +// - The 1.0 quantile is equivalent to the maximum value observed. +// - The 0.0 quantile is equivalent to the minimum value observed. +// +// See the following issue for more context: +// https://github.com/open-telemetry/opentelemetry-proto/issues/125 +type DoubleSummaryDataPoint_ValueAtQuantile struct { + // The quantile of a distribution. Must be in the interval + // [0.0, 1.0]. + Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` + // The value at the given quantile of a distribution. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *DoubleSummaryDataPoint_ValueAtQuantile) Reset() { + *m = DoubleSummaryDataPoint_ValueAtQuantile{} +} +func (m *DoubleSummaryDataPoint_ValueAtQuantile) String() string { return proto.CompactTextString(m) } +func (*DoubleSummaryDataPoint_ValueAtQuantile) ProtoMessage() {} +func (*DoubleSummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{14, 0} +} +func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleSummaryDataPoint_ValueAtQuantile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleSummaryDataPoint_ValueAtQuantile.Merge(m, src) +} +func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_Size() int { + return m.Size() +} +func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleSummaryDataPoint_ValueAtQuantile.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleSummaryDataPoint_ValueAtQuantile proto.InternalMessageInfo + +func (m *DoubleSummaryDataPoint_ValueAtQuantile) GetQuantile() float64 { + if m != nil { + return m.Quantile + } + return 0 +} + +func (m *DoubleSummaryDataPoint_ValueAtQuantile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// A representation of an exemplar, which is a sample input int measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type IntExemplar struct { + // The set of labels that were filtered out by the aggregator, but recorded + // alongside the original measurement. Only labels that were filtered out + // by the aggregator should be included + FilteredLabels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical int value of the measurement that was recorded. + Value int64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` +} + +func (m *IntExemplar) Reset() { *m = IntExemplar{} } +func (m *IntExemplar) String() string { return proto.CompactTextString(m) } +func (*IntExemplar) ProtoMessage() {} +func (*IntExemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{15} +} +func (m *IntExemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IntExemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IntExemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntExemplar.Merge(m, src) +} +func (m *IntExemplar) XXX_Size() int { + return m.Size() +} +func (m *IntExemplar) XXX_DiscardUnknown() { + xxx_messageInfo_IntExemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_IntExemplar proto.InternalMessageInfo + +func (m *IntExemplar) GetFilteredLabels() []v11.StringKeyValue { + if m != nil { + return m.FilteredLabels + } + return nil +} + +func (m *IntExemplar) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntExemplar) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// A representation of an exemplar, which is a sample input double measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type DoubleExemplar struct { + // The set of labels that were filtered out by the aggregator, but recorded + // alongside the original measurement. Only labels that were filtered out + // by the aggregator should be included + FilteredLabels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical double value of the measurement that was recorded. + Value float64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` +} + +func (m *DoubleExemplar) Reset() { *m = DoubleExemplar{} } +func (m *DoubleExemplar) String() string { return proto.CompactTextString(m) } +func (*DoubleExemplar) ProtoMessage() {} +func (*DoubleExemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{16} +} +func (m *DoubleExemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleExemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleExemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleExemplar.Merge(m, src) +} +func (m *DoubleExemplar) XXX_Size() int { + return m.Size() +} +func (m *DoubleExemplar) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleExemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleExemplar proto.InternalMessageInfo + +func (m *DoubleExemplar) GetFilteredLabels() []v11.StringKeyValue { + if m != nil { + return m.FilteredLabels + } + return nil +} + +func (m *DoubleExemplar) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleExemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) + proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics") + proto.RegisterType((*InstrumentationLibraryMetrics)(nil), "opentelemetry.proto.metrics.v1.InstrumentationLibraryMetrics") + proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric") + proto.RegisterType((*IntGauge)(nil), "opentelemetry.proto.metrics.v1.IntGauge") + proto.RegisterType((*DoubleGauge)(nil), "opentelemetry.proto.metrics.v1.DoubleGauge") + proto.RegisterType((*IntSum)(nil), "opentelemetry.proto.metrics.v1.IntSum") + proto.RegisterType((*DoubleSum)(nil), "opentelemetry.proto.metrics.v1.DoubleSum") + proto.RegisterType((*IntHistogram)(nil), "opentelemetry.proto.metrics.v1.IntHistogram") + proto.RegisterType((*DoubleHistogram)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogram") + proto.RegisterType((*DoubleSummary)(nil), "opentelemetry.proto.metrics.v1.DoubleSummary") + proto.RegisterType((*IntDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntDataPoint") + proto.RegisterType((*DoubleDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleDataPoint") + proto.RegisterType((*IntHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntHistogramDataPoint") + proto.RegisterType((*DoubleHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogramDataPoint") + proto.RegisterType((*DoubleSummaryDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleSummaryDataPoint") + proto.RegisterType((*DoubleSummaryDataPoint_ValueAtQuantile)(nil), "opentelemetry.proto.metrics.v1.DoubleSummaryDataPoint.ValueAtQuantile") + proto.RegisterType((*IntExemplar)(nil), "opentelemetry.proto.metrics.v1.IntExemplar") + proto.RegisterType((*DoubleExemplar)(nil), "opentelemetry.proto.metrics.v1.DoubleExemplar") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917) +} + +var fileDescriptor_3c3112f9fa006917 = []byte{ + // 1259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xc1, 0x6b, 0x1b, 0x47, + 0x17, 0xd7, 0x4a, 0xb6, 0x2c, 0x3f, 0xc9, 0x92, 0xbf, 0x21, 0x9f, 0xb3, 0x18, 0xa2, 0x38, 0x4a, + 0x49, 0xdc, 0x24, 0x96, 0x88, 0x43, 0x42, 0x69, 0x29, 0x54, 0xb2, 0x14, 0x5b, 0x8d, 0xec, 0xa8, + 0x2b, 0xd9, 0x25, 0x25, 0xb0, 0xac, 0xb4, 0x53, 0x65, 0xe8, 0xee, 0x8c, 0xba, 0x3b, 0x6b, 0xec, + 0x6b, 0xa1, 0xb7, 0x42, 0x0b, 0x3d, 0xb5, 0xfd, 0x87, 0x72, 0xcc, 0xa1, 0x90, 0x52, 0x68, 0x28, + 0x09, 0xf4, 0xd2, 0x53, 0xef, 0x3d, 0x94, 0x99, 0xdd, 0xb5, 0x24, 0x7b, 0x6d, 0xc9, 0x4d, 0x0a, + 0x76, 0x6e, 0x6f, 0xde, 0xbc, 0xf7, 0x9b, 0xf7, 0x7e, 0xef, 0xcd, 0xdb, 0x91, 0xe0, 0x16, 0xeb, + 0x63, 0xca, 0xb1, 0x85, 0x6d, 0xcc, 0x9d, 0xfd, 0x52, 0xdf, 0x61, 0x9c, 0x95, 0x84, 0x4c, 0xba, + 0x6e, 0x69, 0xf7, 0x76, 0x28, 0x16, 0xe5, 0x06, 0xca, 0x8f, 0x58, 0xfb, 0xca, 0x62, 0x68, 0xb2, + 0x7b, 0x7b, 0xf1, 0x42, 0x8f, 0xf5, 0x98, 0x8f, 0x21, 0x24, 0xdf, 0x60, 0xf1, 0x46, 0xd4, 0x19, + 0x5d, 0x66, 0xdb, 0x8c, 0x8a, 0x23, 0x7c, 0x29, 0xb0, 0x2d, 0x46, 0xd9, 0x3a, 0xd8, 0x65, 0x9e, + 0xd3, 0xc5, 0xc2, 0x3a, 0x94, 0x7d, 0xfb, 0xc2, 0x1f, 0x0a, 0xe4, 0xb4, 0x40, 0xb5, 0xe9, 0x07, + 0x82, 0x1e, 0x40, 0x2a, 0xb4, 0x52, 0x95, 0x25, 0x65, 0x39, 0xbd, 0xfa, 0x6e, 0x31, 0x2a, 0xf0, + 0x03, 0xa8, 0xdd, 0xdb, 0xc5, 0x10, 0xa3, 0x32, 0xf5, 0xf4, 0xc5, 0xe5, 0x98, 0x76, 0x00, 0x80, + 0xbe, 0x56, 0xe0, 0x32, 0xa1, 0x2e, 0x77, 0x3c, 0x1b, 0x53, 0x6e, 0x70, 0xc2, 0xa8, 0x6e, 0x91, + 0x8e, 0x63, 0x38, 0xfb, 0x7a, 0x90, 0xb9, 0x1a, 0x5f, 0x4a, 0x2c, 0xa7, 0x57, 0x3f, 0x2c, 0x9e, + 0xcc, 0x4e, 0xb1, 0x3e, 0x0a, 0xd3, 0xf0, 0x51, 0x82, 0xa8, 0xb5, 0x4b, 0xe4, 0xa4, 0xed, 0xc2, + 0x73, 0x05, 0x2e, 0x9d, 0x08, 0x80, 0x38, 0x5c, 0x3c, 0x26, 0xd0, 0x80, 0x85, 0xbb, 0x91, 0x01, + 0x06, 0xf4, 0x1f, 0x1b, 0x5f, 0xc0, 0xc8, 0x42, 0x74, 0x78, 0xe8, 0x23, 0x98, 0x19, 0xa5, 0xe1, + 0xda, 0x38, 0x1a, 0xfc, 0x78, 0xb5, 0xd0, 0xad, 0xf0, 0xed, 0x34, 0x24, 0x7d, 0x1d, 0x42, 0x30, + 0x45, 0x0d, 0xdb, 0xaf, 0xda, 0xac, 0x26, 0x65, 0xb4, 0x04, 0x69, 0x13, 0xbb, 0x5d, 0x87, 0xf4, + 0xc5, 0xb1, 0x6a, 0x5c, 0x6e, 0x0d, 0xab, 0x84, 0x97, 0x47, 0x09, 0x57, 0x13, 0xbe, 0x97, 0x90, + 0xd1, 0x3a, 0xcc, 0x12, 0xca, 0xf5, 0x9e, 0xe1, 0xf5, 0xb0, 0x3a, 0x25, 0xd3, 0x5f, 0x1e, 0x5f, + 0x1f, 0xbe, 0x2e, 0xec, 0x37, 0x62, 0x5a, 0x8a, 0x04, 0x32, 0x6a, 0x42, 0xc6, 0x64, 0x5e, 0xc7, + 0xc2, 0x01, 0xd6, 0xb4, 0xc4, 0xba, 0x39, 0x0e, 0xab, 0x2a, 0x7d, 0x42, 0xb8, 0xb4, 0x39, 0x58, + 0xa2, 0x32, 0xcc, 0x88, 0xd0, 0x5c, 0xcf, 0x56, 0x93, 0x12, 0xec, 0xda, 0x04, 0x81, 0xb5, 0x3c, + 0x7b, 0x23, 0xa6, 0x25, 0x89, 0x94, 0xd0, 0xc7, 0x00, 0x41, 0x50, 0x02, 0x65, 0xe6, 0x84, 0x1e, + 0x3f, 0x12, 0x92, 0x0f, 0x34, 0x6b, 0x86, 0x0b, 0xd4, 0x82, 0x39, 0x11, 0xce, 0x13, 0xe2, 0x72, + 0xd6, 0x73, 0x0c, 0x5b, 0x4d, 0x49, 0xb8, 0x5b, 0x13, 0x04, 0xb5, 0x11, 0xfa, 0x6c, 0xc4, 0xb4, + 0x0c, 0x19, 0x5a, 0xa3, 0xc7, 0x30, 0x1f, 0x04, 0x38, 0xc0, 0x9d, 0x95, 0xb8, 0xa5, 0xc9, 0xc2, + 0x1c, 0x86, 0xce, 0x99, 0xa3, 0x2a, 0xb4, 0x03, 0xd9, 0x41, 0xfa, 0xb6, 0x68, 0xf0, 0xb4, 0xc4, + 0x5e, 0x99, 0x98, 0x02, 0xe1, 0xb4, 0x11, 0xd3, 0xe6, 0xcc, 0x61, 0x45, 0x25, 0x09, 0x53, 0xa6, + 0xc1, 0x8d, 0xc2, 0x23, 0x48, 0x85, 0xbd, 0x80, 0x36, 0x21, 0x2d, 0x74, 0x7a, 0x9f, 0x11, 0xca, + 0x5d, 0x55, 0x91, 0x3d, 0x3e, 0x09, 0x39, 0x55, 0x83, 0x1b, 0x4d, 0xe1, 0xa4, 0x81, 0x19, 0x8a, + 0x6e, 0x41, 0x87, 0xf4, 0x50, 0x6b, 0xa0, 0x66, 0x14, 0xfa, 0x84, 0x14, 0x45, 0x1f, 0xf0, 0xa7, + 0x02, 0x49, 0xbf, 0x5f, 0xde, 0x70, 0xe8, 0x88, 0xc1, 0x45, 0xa3, 0xd7, 0x73, 0x70, 0xcf, 0x9f, + 0x2d, 0x1c, 0xdb, 0x7d, 0xe6, 0x18, 0x16, 0xe1, 0xfb, 0xf2, 0x52, 0x66, 0x57, 0xef, 0x8d, 0x83, + 0x2e, 0x0f, 0xdc, 0xdb, 0x03, 0x6f, 0x6d, 0xc1, 0x88, 0xd4, 0xa3, 0x2b, 0x90, 0x21, 0xae, 0x6e, + 0x33, 0xca, 0x38, 0xa3, 0xa4, 0x2b, 0xef, 0x77, 0x4a, 0x4b, 0x13, 0x77, 0x33, 0x54, 0x15, 0xfe, + 0x52, 0x60, 0xf6, 0xa0, 0xa8, 0x6f, 0x9e, 0xcd, 0x33, 0x99, 0xf3, 0x73, 0x05, 0x32, 0xc3, 0x97, + 0x0f, 0xed, 0x44, 0xa5, 0x7d, 0xf7, 0x34, 0xf7, 0xf7, 0x6c, 0x24, 0x5f, 0xf8, 0x4d, 0x81, 0xdc, + 0xa1, 0xeb, 0x8f, 0x1e, 0x45, 0x25, 0xf7, 0xde, 0x29, 0x87, 0xc8, 0x19, 0xc9, 0xef, 0x09, 0xcc, + 0x8d, 0x4c, 0x20, 0xf4, 0x69, 0x54, 0x72, 0xf7, 0x4e, 0x35, 0xc5, 0xa2, 0xa7, 0xc0, 0xf7, 0x71, + 0xd9, 0x23, 0x07, 0x9b, 0xe8, 0x01, 0x24, 0x2d, 0xa3, 0x83, 0xad, 0xf0, 0x90, 0x95, 0x31, 0x6f, + 0x81, 0x16, 0x77, 0x08, 0xed, 0x3d, 0xc0, 0xfb, 0x3b, 0x86, 0xe5, 0x85, 0xaf, 0xa2, 0x00, 0x02, + 0x95, 0xe0, 0x82, 0xcb, 0x0d, 0x87, 0xeb, 0x9c, 0xd8, 0x58, 0xf7, 0x28, 0xd9, 0xd3, 0xa9, 0x41, + 0x99, 0x64, 0x2d, 0xa9, 0xfd, 0x4f, 0xee, 0xb5, 0x89, 0x8d, 0xb7, 0x29, 0xd9, 0xdb, 0x32, 0x28, + 0x43, 0xef, 0x40, 0xf6, 0x90, 0x69, 0x42, 0x9a, 0x66, 0xf8, 0xb0, 0xd5, 0x05, 0x98, 0xde, 0x15, + 0xa7, 0xc9, 0xef, 0xf5, 0xbc, 0xe6, 0x2f, 0x50, 0x1d, 0x66, 0xf1, 0x1e, 0xb6, 0xfb, 0x96, 0xe1, + 0xb8, 0xea, 0xb4, 0x0c, 0xfe, 0xe6, 0x04, 0xbd, 0x5d, 0x0b, 0x7c, 0xb4, 0x81, 0x77, 0xe1, 0x87, + 0x78, 0xd8, 0x5f, 0xe7, 0x98, 0x18, 0x25, 0x24, 0xa6, 0x71, 0x94, 0x98, 0xe2, 0x64, 0xad, 0x13, + 0xc5, 0xcd, 0xdf, 0x71, 0xf8, 0x7f, 0xe4, 0x48, 0x38, 0x2f, 0x0c, 0x75, 0x99, 0x47, 0xb9, 0x64, + 0x28, 0xa9, 0xf9, 0x0b, 0x34, 0x0f, 0x09, 0xf1, 0x3e, 0x9a, 0x96, 0xed, 0x24, 0x44, 0x74, 0x15, + 0xe6, 0x3a, 0x5e, 0xf7, 0x0b, 0xcc, 0x75, 0x69, 0xe1, 0xaa, 0xc9, 0xa5, 0x84, 0x00, 0xf3, 0x95, + 0x6b, 0x52, 0x87, 0xae, 0x43, 0x0e, 0xef, 0xf5, 0x2d, 0xd2, 0x25, 0x5c, 0xef, 0x30, 0x8f, 0x9a, + 0xae, 0x3a, 0xb3, 0x94, 0x58, 0x56, 0xb4, 0x6c, 0xa8, 0xae, 0x48, 0xed, 0x68, 0x6b, 0xa6, 0x5e, + 0xab, 0x35, 0xbf, 0x4a, 0x80, 0x7a, 0xdc, 0xd0, 0x7a, 0x3b, 0x2a, 0xa0, 0xfc, 0x17, 0x15, 0x68, + 0x1c, 0xad, 0xc0, 0x6b, 0xdc, 0x81, 0x1f, 0x13, 0xb0, 0x10, 0x3d, 0x5c, 0xdf, 0xaa, 0x12, 0x30, + 0xc8, 0x7d, 0xe9, 0x19, 0x94, 0x13, 0x0b, 0xeb, 0x72, 0x94, 0xf8, 0x45, 0x48, 0xaf, 0xde, 0xff, + 0x77, 0x5f, 0x9e, 0xa2, 0xcc, 0xb1, 0xcc, 0x3f, 0x09, 0x40, 0xb5, 0x6c, 0x08, 0x2f, 0x37, 0xdc, + 0xc5, 0x35, 0xc8, 0x1d, 0x32, 0x41, 0x8b, 0x90, 0x0a, 0x8d, 0xe4, 0xaf, 0x3d, 0x45, 0x3b, 0x58, + 0x0f, 0xc6, 0x5d, 0x7c, 0x68, 0xdc, 0x15, 0x7e, 0x8e, 0x43, 0x7a, 0xe8, 0xf2, 0xa0, 0xc7, 0x90, + 0xfb, 0x9c, 0x58, 0x1c, 0x3b, 0xd8, 0xd4, 0x5f, 0xbf, 0x34, 0xd9, 0x10, 0xab, 0xe1, 0x97, 0xe8, + 0x28, 0xe3, 0xf1, 0x93, 0x06, 0x73, 0x62, 0xf8, 0x8b, 0xd5, 0x82, 0x19, 0xb7, 0x6f, 0x50, 0x9d, + 0x98, 0xb2, 0x12, 0x99, 0xca, 0xfb, 0xe2, 0x88, 0x5f, 0x5f, 0x5c, 0x5e, 0xed, 0xb1, 0x43, 0xb1, + 0x11, 0x56, 0xea, 0x32, 0xcb, 0xc2, 0x5d, 0xce, 0x9c, 0x12, 0xa1, 0x1c, 0x3b, 0xd4, 0xb0, 0x4a, + 0xe2, 0x43, 0x5e, 0x6c, 0xf5, 0x0d, 0x5a, 0xaf, 0x6a, 0x49, 0x01, 0x55, 0x37, 0xd1, 0x0e, 0xa4, + 0xb8, 0x63, 0x74, 0xb1, 0x40, 0x9d, 0x96, 0xa8, 0x1f, 0x04, 0xa8, 0x77, 0x4e, 0x83, 0xda, 0x16, + 0x18, 0xf5, 0xaa, 0x36, 0x23, 0xc1, 0xea, 0x66, 0xe1, 0x79, 0x1c, 0xb2, 0xa3, 0x37, 0xe2, 0xec, + 0x31, 0xab, 0x9c, 0x47, 0x66, 0x6f, 0x7c, 0xa3, 0xc0, 0x42, 0xf4, 0x03, 0x11, 0x5d, 0x87, 0xab, + 0xe5, 0xf5, 0x75, 0xad, 0xb6, 0x5e, 0x6e, 0xd7, 0x1f, 0x6e, 0xe9, 0xed, 0xda, 0x66, 0xf3, 0xa1, + 0x56, 0x6e, 0xd4, 0xdb, 0x8f, 0xf4, 0xed, 0xad, 0x56, 0xb3, 0xb6, 0x56, 0xbf, 0x5f, 0xaf, 0x55, + 0xe7, 0x63, 0xe8, 0x0a, 0x5c, 0x3a, 0xce, 0xb0, 0x5a, 0x6b, 0xb4, 0xcb, 0xf3, 0x0a, 0xba, 0x06, + 0x85, 0xe3, 0x4c, 0xd6, 0xb6, 0x37, 0xb7, 0x1b, 0xe5, 0x76, 0x7d, 0xa7, 0x36, 0x1f, 0xaf, 0xfc, + 0xa4, 0x3c, 0x7d, 0x99, 0x57, 0x9e, 0xbd, 0xcc, 0x2b, 0xbf, 0xbf, 0xcc, 0x2b, 0xdf, 0xbd, 0xca, + 0xc7, 0x9e, 0xbd, 0xca, 0xc7, 0x7e, 0x79, 0x95, 0x8f, 0xc1, 0x15, 0xc2, 0xc6, 0xdc, 0xfc, 0x4a, + 0x26, 0xf8, 0x97, 0xa9, 0x29, 0x36, 0x9a, 0xca, 0x67, 0x5b, 0xa7, 0xa0, 0xa6, 0x34, 0x62, 0xb8, + 0x22, 0xb1, 0x57, 0x7a, 0x98, 0x0e, 0xfd, 0xcf, 0xd8, 0x49, 0x4a, 0xed, 0x9d, 0x7f, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x00, 0x76, 0x79, 0x73, 0x90, 0x14, 0x00, 0x00, +} + +func (m *ResourceMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.InstrumentationLibraryMetrics) > 0 { + for iNdEx := len(m.InstrumentationLibraryMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InstrumentationLibraryMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *InstrumentationLibraryMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InstrumentationLibraryMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InstrumentationLibraryMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.InstrumentationLibrary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + { + size := m.Data.Size() + i -= size + if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x1a + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Metric_IntGauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_IntGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.IntGauge != nil { + { + size, err := m.IntGauge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Metric_DoubleGauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_DoubleGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DoubleGauge != nil { + { + size, err := m.DoubleGauge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Metric_IntSum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_IntSum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.IntSum != nil { + { + size, err := m.IntSum.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Metric_DoubleSum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_DoubleSum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DoubleSum != nil { + { + size, err := m.DoubleSum.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Metric_IntHistogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_IntHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.IntHistogram != nil { + { + size, err := m.IntHistogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Metric_DoubleHistogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_DoubleHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DoubleHistogram != nil { + { + size, err := m.DoubleHistogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Metric_DoubleSummary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_DoubleSummary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DoubleSummary != nil { + { + size, err := m.DoubleSummary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *IntGauge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntGauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleGauge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleGauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IntSum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntSum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntSum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsMonotonic { + i-- + if m.IsMonotonic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AggregationTemporality != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleSum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleSum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleSum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsMonotonic { + i-- + if m.IsMonotonic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AggregationTemporality != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IntHistogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntHistogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationTemporality != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleHistogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleHistogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationTemporality != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleSummary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleSummary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleSummary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IntDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Value)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IntHistogramDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.ExplicitBounds) > 0 { + for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- { + f10 := math.Float64bits(float64(m.ExplicitBounds[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f10)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8)) + i-- + dAtA[i] = 0x3a + } + if len(m.BucketCounts) > 0 { + for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx])) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8)) + i-- + dAtA[i] = 0x32 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Sum)) + i-- + dAtA[i] = 0x29 + } + if m.Count != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleHistogramDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.ExplicitBounds) > 0 { + for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- { + f11 := math.Float64bits(float64(m.ExplicitBounds[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8)) + i-- + dAtA[i] = 0x3a + } + if len(m.BucketCounts) > 0 { + for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx])) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8)) + i-- + dAtA[i] = 0x32 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x29 + } + if m.Count != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleSummaryDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleSummaryDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleSummaryDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.QuantileValues) > 0 { + for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.QuantileValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x29 + } + if m.Count != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleSummaryDataPoint_ValueAtQuantile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleSummaryDataPoint_ValueAtQuantile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleSummaryDataPoint_ValueAtQuantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.Quantile != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *IntExemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntExemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntExemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Value)) + i-- + dAtA[i] = 0x19 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.FilteredLabels) > 0 { + for iNdEx := len(m.FilteredLabels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FilteredLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DoubleExemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleExemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleExemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x19 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.FilteredLabels) > 0 { + for iNdEx := len(m.FilteredLabels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FilteredLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourceMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovMetrics(uint64(l)) + if len(m.InstrumentationLibraryMetrics) > 0 { + for _, e := range m.InstrumentationLibraryMetrics { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *InstrumentationLibraryMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.InstrumentationLibrary.Size() + n += 1 + l + sovMetrics(uint64(l)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Data != nil { + n += m.Data.Size() + } + return n +} + +func (m *Metric_IntGauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntGauge != nil { + l = m.IntGauge.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_DoubleGauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoubleGauge != nil { + l = m.DoubleGauge.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_IntSum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntSum != nil { + l = m.IntSum.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_DoubleSum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoubleSum != nil { + l = m.DoubleSum.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_IntHistogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntHistogram != nil { + l = m.IntHistogram.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_DoubleHistogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoubleHistogram != nil { + l = m.DoubleHistogram.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_DoubleSummary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoubleSummary != nil { + l = m.DoubleSummary.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *IntGauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *DoubleGauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *IntSum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sovMetrics(uint64(m.AggregationTemporality)) + } + if m.IsMonotonic { + n += 2 + } + return n +} + +func (m *DoubleSum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sovMetrics(uint64(m.AggregationTemporality)) + } + if m.IsMonotonic { + n += 2 + } + return n +} + +func (m *IntHistogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sovMetrics(uint64(m.AggregationTemporality)) + } + return n +} + +func (m *DoubleHistogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sovMetrics(uint64(m.AggregationTemporality)) + } + return n +} + +func (m *DoubleSummary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *IntDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *DoubleDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *IntHistogramDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum != 0 { + n += 9 + } + if len(m.BucketCounts) > 0 { + n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8 + } + if len(m.ExplicitBounds) > 0 { + n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8 + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *DoubleHistogramDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum != 0 { + n += 9 + } + if len(m.BucketCounts) > 0 { + n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8 + } + if len(m.ExplicitBounds) > 0 { + n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8 + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *DoubleSummaryDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum != 0 { + n += 9 + } + if len(m.QuantileValues) > 0 { + for _, e := range m.QuantileValues { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *DoubleSummaryDataPoint_ValueAtQuantile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Quantile != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *IntExemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FilteredLabels) > 0 { + for _, e := range m.FilteredLabels { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + l = m.SpanId.Size() + n += 1 + l + sovMetrics(uint64(l)) + l = m.TraceId.Size() + n += 1 + l + sovMetrics(uint64(l)) + return n +} + +func (m *DoubleExemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FilteredLabels) > 0 { + for _, e := range m.FilteredLabels { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + l = m.SpanId.Size() + n += 1 + l + sovMetrics(uint64(l)) + l = m.TraceId.Size() + n += 1 + l + sovMetrics(uint64(l)) + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResourceMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibraryMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstrumentationLibraryMetrics = append(m.InstrumentationLibraryMetrics, &InstrumentationLibraryMetrics{}) + if err := m.InstrumentationLibraryMetrics[len(m.InstrumentationLibraryMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InstrumentationLibraryMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InstrumentationLibraryMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InstrumentationLibraryMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InstrumentationLibrary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &Metric{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IntGauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &IntGauge{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_IntGauge{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleGauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DoubleGauge{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_DoubleGauge{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IntSum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &IntSum{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_IntSum{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleSum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DoubleSum{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_DoubleSum{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IntHistogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &IntHistogram{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_IntHistogram{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleHistogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DoubleHistogram{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_DoubleHistogram{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleSummary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DoubleSummary{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_DoubleSummary{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IntGauge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntGauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntGauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &IntDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleGauge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleGauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleGauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &DoubleDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IntSum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntSum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntSum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &IntDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsMonotonic = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleSum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleSum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleSum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &DoubleDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsMonotonic = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IntHistogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntHistogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntHistogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &IntHistogramDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleHistogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleHistogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleHistogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &DoubleHistogramDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleSummary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleSummary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleSummary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &DoubleSummaryDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IntDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, v11.StringKeyValue{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Value = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &IntExemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, v11.StringKeyValue{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &DoubleExemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IntHistogramDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntHistogramDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, v11.StringKeyValue{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + m.Sum = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Sum = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 6: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BucketCounts = append(m.BucketCounts, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.BucketCounts) == 0 { + m.BucketCounts = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BucketCounts = append(m.BucketCounts, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) + } + case 7: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ExplicitBounds = append(m.ExplicitBounds, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.ExplicitBounds) == 0 { + m.ExplicitBounds = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ExplicitBounds = append(m.ExplicitBounds, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType) + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &IntExemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleHistogramDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleHistogramDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, v11.StringKeyValue{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 6: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BucketCounts = append(m.BucketCounts, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.BucketCounts) == 0 { + m.BucketCounts = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BucketCounts = append(m.BucketCounts, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) + } + case 7: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ExplicitBounds = append(m.ExplicitBounds, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.ExplicitBounds) == 0 { + m.ExplicitBounds = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ExplicitBounds = append(m.ExplicitBounds, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType) + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &DoubleExemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleSummaryDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleSummaryDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleSummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, v11.StringKeyValue{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QuantileValues = append(m.QuantileValues, &DoubleSummaryDataPoint_ValueAtQuantile{}) + if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleSummaryDataPoint_ValueAtQuantile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Quantile = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IntExemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntExemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntExemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilteredLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilteredLabels = append(m.FilteredLabels, v11.StringKeyValue{}) + if err := m.FilteredLabels[len(m.FilteredLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Value = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleExemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleExemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleExemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilteredLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilteredLabels = append(m.FilteredLabels, v11.StringKeyValue{}) + if err := m.FilteredLabels[len(m.FilteredLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/resource/v1/resource.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/resource/v1/resource.pb.go new file mode 100644 index 00000000000..519b6e0a7a4 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/resource/v1/resource.pb.go @@ -0,0 +1,381 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + v1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Set of labels that describe the resource. + Attributes []v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_446f73eacf88f3f5, []int{0} +} +func (m *Resource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return m.Size() +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetAttributes() []v1.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Resource) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func init() { + proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5) +} + +var fileDescriptor_446f73eacf88f3f5 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcb, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, + 0x42, 0xf2, 0x28, 0xea, 0x21, 0x82, 0x7a, 0x70, 0x35, 0x65, 0x86, 0x52, 0x22, 0xe9, 0xf9, 0xe9, + 0xf9, 0x10, 0x63, 0x40, 0x2c, 0x88, 0x0a, 0x29, 0x2d, 0x6c, 0xd6, 0x24, 0xe7, 0xe7, 0xe6, 0xe6, + 0xe7, 0x81, 0x2c, 0x81, 0xb0, 0x20, 0x6a, 0x95, 0x26, 0x33, 0x72, 0x71, 0x04, 0x41, 0x4d, 0x14, + 0xf2, 0xe5, 0xe2, 0x4a, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x49, 0x2d, 0x96, 0x60, 0x54, + 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd7, 0xc3, 0xe6, 0x08, 0xa8, 0x19, 0x65, 0x86, 0x7a, 0xde, 0xa9, + 0x95, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x21, 0x19, 0x20, + 0x64, 0xc1, 0x25, 0x91, 0x52, 0x94, 0x5f, 0x50, 0x90, 0x9a, 0x12, 0x8f, 0x10, 0x8d, 0x4f, 0xce, + 0x2f, 0xcd, 0x2b, 0x91, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x0d, 0x12, 0x83, 0xca, 0x3b, 0xc2, 0xa5, + 0x9d, 0x41, 0xb2, 0x4e, 0xf3, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, + 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x81, + 0x4b, 0x29, 0x33, 0x5f, 0x8f, 0x40, 0xb0, 0x38, 0xf1, 0xc2, 0x7c, 0x14, 0x00, 0x92, 0x0a, 0x60, + 0x8c, 0xf2, 0x4f, 0x47, 0xd7, 0x94, 0x09, 0x0a, 0x91, 0x9c, 0x9c, 0xd4, 0xe4, 0x92, 0xfc, 0x22, + 0xfd, 0xcc, 0xbc, 0x92, 0xd4, 0xa2, 0xbc, 0xc4, 0x1c, 0xfd, 0x94, 0xc4, 0x92, 0x44, 0x7d, 0x14, + 0x85, 0xba, 0x60, 0xd3, 0x75, 0xd3, 0x53, 0xf3, 0x90, 0x23, 0x2a, 0x89, 0x0d, 0x2c, 0x6c, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x6b, 0xf7, 0x11, 0xd2, 0x01, 0x00, 0x00, +} + +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DroppedAttributesCount != 0 { + i = encodeVarintResource(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResource(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintResource(dAtA []byte, offset int, v uint64) int { + offset -= sovResource(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Resource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovResource(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovResource(uint64(m.DroppedAttributesCount)) + } + return n +} + +func sovResource(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozResource(x uint64) (n int) { + return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Resource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResource + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResource(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthResource + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupResource + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthResource + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/opentelemetry-proto-gen/trace/v1/trace.pb.go b/internal/otel_collector/internal/data/opentelemetry-proto-gen/trace/v1/trace.pb.go new file mode 100644 index 00000000000..45eb93fda66 --- /dev/null +++ b/internal/otel_collector/internal/data/opentelemetry-proto-gen/trace/v1/trace.pb.go @@ -0,0 +1,2667 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/trace/v1/trace.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + go_opentelemetry_io_collector_internal_data "go.opentelemetry.io/collector/internal/data" + v11 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + v1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span represents an internal operation within an application, + // as opposed to an operations happening at the boundaries. Default value. + Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SPAN_KIND_SERVER Span_SpanKind = 2 + // Indicates that the span describes a request to some remote service. + Span_SPAN_KIND_CLIENT Span_SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 0} +} + +type Status_DeprecatedStatusCode int32 + +const ( + Status_DEPRECATED_STATUS_CODE_OK Status_DeprecatedStatusCode = 0 + Status_DEPRECATED_STATUS_CODE_CANCELLED Status_DeprecatedStatusCode = 1 + Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR Status_DeprecatedStatusCode = 2 + Status_DEPRECATED_STATUS_CODE_INVALID_ARGUMENT Status_DeprecatedStatusCode = 3 + Status_DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED Status_DeprecatedStatusCode = 4 + Status_DEPRECATED_STATUS_CODE_NOT_FOUND Status_DeprecatedStatusCode = 5 + Status_DEPRECATED_STATUS_CODE_ALREADY_EXISTS Status_DeprecatedStatusCode = 6 + Status_DEPRECATED_STATUS_CODE_PERMISSION_DENIED Status_DeprecatedStatusCode = 7 + Status_DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED Status_DeprecatedStatusCode = 8 + Status_DEPRECATED_STATUS_CODE_FAILED_PRECONDITION Status_DeprecatedStatusCode = 9 + Status_DEPRECATED_STATUS_CODE_ABORTED Status_DeprecatedStatusCode = 10 + Status_DEPRECATED_STATUS_CODE_OUT_OF_RANGE Status_DeprecatedStatusCode = 11 + Status_DEPRECATED_STATUS_CODE_UNIMPLEMENTED Status_DeprecatedStatusCode = 12 + Status_DEPRECATED_STATUS_CODE_INTERNAL_ERROR Status_DeprecatedStatusCode = 13 + Status_DEPRECATED_STATUS_CODE_UNAVAILABLE Status_DeprecatedStatusCode = 14 + Status_DEPRECATED_STATUS_CODE_DATA_LOSS Status_DeprecatedStatusCode = 15 + Status_DEPRECATED_STATUS_CODE_UNAUTHENTICATED Status_DeprecatedStatusCode = 16 +) + +var Status_DeprecatedStatusCode_name = map[int32]string{ + 0: "DEPRECATED_STATUS_CODE_OK", + 1: "DEPRECATED_STATUS_CODE_CANCELLED", + 2: "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR", + 3: "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT", + 4: "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED", + 5: "DEPRECATED_STATUS_CODE_NOT_FOUND", + 6: "DEPRECATED_STATUS_CODE_ALREADY_EXISTS", + 7: "DEPRECATED_STATUS_CODE_PERMISSION_DENIED", + 8: "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED", + 9: "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION", + 10: "DEPRECATED_STATUS_CODE_ABORTED", + 11: "DEPRECATED_STATUS_CODE_OUT_OF_RANGE", + 12: "DEPRECATED_STATUS_CODE_UNIMPLEMENTED", + 13: "DEPRECATED_STATUS_CODE_INTERNAL_ERROR", + 14: "DEPRECATED_STATUS_CODE_UNAVAILABLE", + 15: "DEPRECATED_STATUS_CODE_DATA_LOSS", + 16: "DEPRECATED_STATUS_CODE_UNAUTHENTICATED", +} + +var Status_DeprecatedStatusCode_value = map[string]int32{ + "DEPRECATED_STATUS_CODE_OK": 0, + "DEPRECATED_STATUS_CODE_CANCELLED": 1, + "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR": 2, + "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT": 3, + "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED": 4, + "DEPRECATED_STATUS_CODE_NOT_FOUND": 5, + "DEPRECATED_STATUS_CODE_ALREADY_EXISTS": 6, + "DEPRECATED_STATUS_CODE_PERMISSION_DENIED": 7, + "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED": 8, + "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION": 9, + "DEPRECATED_STATUS_CODE_ABORTED": 10, + "DEPRECATED_STATUS_CODE_OUT_OF_RANGE": 11, + "DEPRECATED_STATUS_CODE_UNIMPLEMENTED": 12, + "DEPRECATED_STATUS_CODE_INTERNAL_ERROR": 13, + "DEPRECATED_STATUS_CODE_UNAVAILABLE": 14, + "DEPRECATED_STATUS_CODE_DATA_LOSS": 15, + "DEPRECATED_STATUS_CODE_UNAUTHENTICATED": 16, +} + +func (x Status_DeprecatedStatusCode) String() string { + return proto.EnumName(Status_DeprecatedStatusCode_name, int32(x)) +} + +func (Status_DeprecatedStatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 0} +} + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status +type Status_StatusCode int32 + +const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 + // The Span has been validated by an Application developers or Operator to have + // completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +) + +var Status_StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", +} + +var Status_StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, +} + +func (x Status_StatusCode) String() string { + return proto.EnumName(Status_StatusCode_name, int32(x)) +} + +func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 1} +} + +// A collection of InstrumentationLibrarySpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` + // A list of InstrumentationLibrarySpans that originate from a resource. + InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` +} + +func (m *ResourceSpans) Reset() { *m = ResourceSpans{} } +func (m *ResourceSpans) String() string { return proto.CompactTextString(m) } +func (*ResourceSpans) ProtoMessage() {} +func (*ResourceSpans) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{0} +} +func (m *ResourceSpans) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceSpans) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSpans.Merge(m, src) +} +func (m *ResourceSpans) XXX_Size() int { + return m.Size() +} +func (m *ResourceSpans) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSpans.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo + +func (m *ResourceSpans) GetResource() v1.Resource { + if m != nil { + return m.Resource + } + return v1.Resource{} +} + +func (m *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { + if m != nil { + return m.InstrumentationLibrarySpans + } + return nil +} + +// A collection of Spans produced by an InstrumentationLibrary. +type InstrumentationLibrarySpans struct { + // The instrumentation library information for the spans in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library"` + // A list of Spans that originate from an instrumentation library. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` +} + +func (m *InstrumentationLibrarySpans) Reset() { *m = InstrumentationLibrarySpans{} } +func (m *InstrumentationLibrarySpans) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibrarySpans) ProtoMessage() {} +func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{1} +} +func (m *InstrumentationLibrarySpans) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InstrumentationLibrarySpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InstrumentationLibrarySpans.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InstrumentationLibrarySpans) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibrarySpans.Merge(m, src) +} +func (m *InstrumentationLibrarySpans) XXX_Size() int { + return m.Size() +} +func (m *InstrumentationLibrarySpans) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibrarySpans.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibrarySpans proto.InternalMessageInfo + +func (m *InstrumentationLibrarySpans) GetInstrumentationLibrary() v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return v11.InstrumentationLibrary{} +} + +func (m *InstrumentationLibrarySpans) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +// Span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace and form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next available field id is 17. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"parent_span_id"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // When null or empty string received - receiver may use string "name" + // as a replacement. There might be smarted algorithms implemented by + // receiver to fix the empty span name. + // + // This field is required. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. The value can be a string, + // an integer, a double or the Boolean values `true` or `false`. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // events is a collection of Event items. + Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2} +} +func (m *Span) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return m.Size() +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceState() string { + if m != nil { + return m.TraceState + } + return "" +} + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *Span) GetEndTimeUnixNano() uint64 { + if m != nil { + return m.EndTimeUnixNano + } + return 0 +} + +func (m *Span) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *Span) GetEvents() []*Span_Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *Span) GetDroppedEventsCount() uint32 { + if m != nil { + return m.DroppedEventsCount + } + return 0 +} + +func (m *Span) GetLinks() []*Span_Link { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetDroppedLinksCount() uint32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +func (m *Span) GetStatus() Status { + if m != nil { + return m.Status + } + return Status{} +} + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type Span_Event struct { + // time_unix_nano is the time the event occurred. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + Attributes []v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (m *Span_Event) Reset() { *m = Span_Event{} } +func (m *Span_Event) String() string { return proto.CompactTextString(m) } +func (*Span_Event) ProtoMessage() {} +func (*Span_Event) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 0} +} +func (m *Span_Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Span_Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Event.Merge(m, src) +} +func (m *Span_Event) XXX_Size() int { + return m.Size() +} +func (m *Span_Event) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Event proto.InternalMessageInfo + +func (m *Span_Event) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *Span_Event) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span_Event) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Event) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` + // The trace_state associated with the link. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 1} +} +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return m.Size() +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceState() string { + if m != nil { + return m.TraceState + } + return "" +} + +func (m *Span_Link) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Link) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // The deprecated status code. This is an optional field. + // + // This field is deprecated and is replaced by the `code` field below. See backward + // compatibility notes below. According to our stability guarantees this field + // will be removed in 12 months, on Oct 22, 2021. All usage of old senders and + // receivers that do not understand the `code` field MUST be phased out by then. + DeprecatedCode Status_DeprecatedStatusCode `protobuf:"varint,1,opt,name=deprecated_code,json=deprecatedCode,proto3,enum=opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode" json:"deprecated_code,omitempty"` // Deprecated: Do not use. + // A developer-facing human readable error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The status code. + Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *Status) GetDeprecatedCode() Status_DeprecatedStatusCode { + if m != nil { + return m.DeprecatedCode + } + return Status_DEPRECATED_STATUS_CODE_OK +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetCode() Status_StatusCode { + if m != nil { + return m.Code + } + return Status_STATUS_CODE_UNSET +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode", Status_DeprecatedStatusCode_name, Status_DeprecatedStatusCode_value) + proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value) + proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans") + proto.RegisterType((*InstrumentationLibrarySpans)(nil), "opentelemetry.proto.trace.v1.InstrumentationLibrarySpans") + proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span") + proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event") + proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link") + proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601) +} + +var fileDescriptor_5c407ac9c675a601 = []byte{ + // 1228 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x41, 0x6f, 0xdb, 0x46, + 0x13, 0x15, 0x6d, 0x49, 0x76, 0xc6, 0xb6, 0xcc, 0xec, 0xe7, 0xe4, 0x63, 0x9c, 0x46, 0x16, 0x54, + 0x37, 0x51, 0x92, 0x46, 0x6a, 0x1c, 0x14, 0x48, 0x5b, 0x14, 0x2d, 0x45, 0xae, 0x13, 0xc2, 0x34, + 0x29, 0x2c, 0x29, 0x37, 0xed, 0x85, 0x65, 0xcc, 0xad, 0x41, 0x44, 0x22, 0x05, 0x6a, 0x65, 0x24, + 0x87, 0xfe, 0x87, 0x5e, 0x7a, 0xe8, 0x3f, 0x0a, 0x0a, 0x14, 0xc8, 0xb1, 0x48, 0xd1, 0xa0, 0xb0, + 0xff, 0x46, 0x0f, 0xc5, 0x2e, 0x29, 0xdb, 0x32, 0x44, 0x39, 0x41, 0x91, 0x4b, 0x2f, 0x06, 0x39, + 0xf3, 0xe6, 0xbd, 0xb7, 0x33, 0xb3, 0xb4, 0x0d, 0x8d, 0x78, 0x40, 0x23, 0x46, 0x7b, 0xb4, 0x4f, + 0x59, 0xf2, 0xa2, 0x35, 0x48, 0x62, 0x16, 0xb7, 0x58, 0xe2, 0xef, 0xd3, 0xd6, 0xe1, 0xfd, 0xf4, + 0xa1, 0x29, 0x82, 0xe8, 0x83, 0x09, 0x64, 0x1a, 0x6c, 0xa6, 0x80, 0xc3, 0xfb, 0xeb, 0x6b, 0x07, + 0xf1, 0x41, 0x9c, 0x56, 0xf3, 0xa7, 0x34, 0xbd, 0x7e, 0x67, 0x1a, 0xfb, 0x7e, 0xdc, 0xef, 0xc7, + 0x11, 0xa7, 0x4f, 0x9f, 0x32, 0x6c, 0x73, 0x1a, 0x36, 0xa1, 0xc3, 0x78, 0x94, 0xa4, 0x66, 0xc6, + 0xcf, 0x29, 0xbe, 0xfe, 0x87, 0x04, 0x2b, 0x24, 0x0b, 0x39, 0x03, 0x3f, 0x1a, 0xa2, 0x1d, 0x58, + 0x1c, 0x63, 0x14, 0xa9, 0x26, 0x35, 0x96, 0xb6, 0x6e, 0x37, 0xa7, 0x99, 0x3e, 0x21, 0x3a, 0xbc, + 0xdf, 0x1c, 0x33, 0xb4, 0x8b, 0x2f, 0xdf, 0x6c, 0x14, 0xc8, 0x09, 0x01, 0xfa, 0x11, 0x6e, 0x84, + 0xd1, 0x90, 0x25, 0xa3, 0x3e, 0x8d, 0x98, 0xcf, 0xc2, 0x38, 0xf2, 0x7a, 0xe1, 0xd3, 0xc4, 0x4f, + 0x5e, 0x78, 0x43, 0xae, 0xa6, 0xcc, 0xd5, 0xe6, 0x1b, 0x4b, 0x5b, 0x9f, 0x35, 0x67, 0xb5, 0xa5, + 0x69, 0x4c, 0x52, 0x98, 0x29, 0x83, 0xb0, 0x4b, 0xae, 0x87, 0xf9, 0xc9, 0xfa, 0x6f, 0x12, 0x5c, + 0x9f, 0x51, 0x8c, 0x18, 0xfc, 0x3f, 0xc7, 0x5e, 0x76, 0xf4, 0x4f, 0xa7, 0x1a, 0xcb, 0x3a, 0x9e, + 0xeb, 0x2c, 0x6b, 0xc3, 0xd5, 0xe9, 0xd6, 0xd0, 0x43, 0x28, 0x9d, 0x3d, 0x7c, 0x7d, 0xf6, 0xe1, + 0xb9, 0x53, 0x92, 0x16, 0xd4, 0x8f, 0x96, 0xa1, 0xc8, 0xdf, 0xd1, 0x1e, 0x2c, 0x0a, 0x80, 0x17, + 0x06, 0xc2, 0xe9, 0x72, 0xfb, 0x0b, 0x2e, 0xf9, 0xfa, 0xcd, 0xc6, 0x83, 0x83, 0xf8, 0x1c, 0x5f, + 0xc8, 0x97, 0xa5, 0xd7, 0xa3, 0xfb, 0x2c, 0x4e, 0x5a, 0x61, 0xc4, 0x68, 0x12, 0xf9, 0xbd, 0x56, + 0xe0, 0x33, 0xbf, 0xe9, 0x72, 0x0e, 0x43, 0x27, 0x0b, 0x82, 0xcc, 0x08, 0x90, 0x03, 0x0b, 0x5c, + 0x89, 0xd3, 0xce, 0x09, 0xda, 0xcf, 0x33, 0xda, 0xad, 0x77, 0xa1, 0xe5, 0x16, 0x0d, 0x9d, 0x94, + 0x39, 0x95, 0x11, 0xa0, 0x0d, 0x58, 0x4a, 0xcd, 0x0e, 0x99, 0xcf, 0xa8, 0x32, 0x5f, 0x93, 0x1a, + 0x97, 0x08, 0x88, 0x90, 0xc3, 0x23, 0xe8, 0x7b, 0xa8, 0x0c, 0xfc, 0x84, 0x46, 0xcc, 0x1b, 0x8b, + 0x17, 0xff, 0xb5, 0xf8, 0x72, 0xca, 0xe8, 0xa4, 0x16, 0x10, 0x14, 0x23, 0xbf, 0x4f, 0x95, 0x92, + 0xd0, 0x16, 0xcf, 0xe8, 0x2b, 0x28, 0x3e, 0x0b, 0xa3, 0x40, 0x29, 0xd7, 0xa4, 0x46, 0x65, 0xeb, + 0xee, 0xc5, 0x53, 0x10, 0x3f, 0x76, 0xc2, 0x28, 0x20, 0xa2, 0x10, 0xb5, 0x60, 0x6d, 0xc8, 0xfc, + 0x84, 0x79, 0x2c, 0xec, 0x53, 0x6f, 0x14, 0x85, 0xcf, 0xbd, 0xc8, 0x8f, 0x62, 0x65, 0xa1, 0x26, + 0x35, 0xca, 0xe4, 0xb2, 0xc8, 0xb9, 0x61, 0x9f, 0x76, 0xa3, 0xf0, 0xb9, 0xe5, 0x47, 0x31, 0xba, + 0x0b, 0x88, 0x46, 0xc1, 0x79, 0xf8, 0xa2, 0x80, 0xaf, 0xd2, 0x28, 0x98, 0x00, 0xef, 0x02, 0xf8, + 0x8c, 0x25, 0xe1, 0xd3, 0x11, 0xa3, 0x43, 0xe5, 0x92, 0x58, 0x95, 0x5b, 0x17, 0xac, 0xe3, 0x0e, + 0x7d, 0xb1, 0xe7, 0xf7, 0x46, 0xe3, 0x7b, 0x78, 0x86, 0x00, 0x3d, 0x04, 0x25, 0x48, 0xe2, 0xc1, + 0x80, 0x06, 0xde, 0x69, 0xd4, 0xdb, 0x8f, 0x47, 0x11, 0x53, 0xa0, 0x26, 0x35, 0x56, 0xc8, 0xd5, + 0x2c, 0xaf, 0x9e, 0xa4, 0x35, 0x9e, 0x45, 0x5f, 0x43, 0x99, 0x1e, 0xd2, 0x88, 0x0d, 0x95, 0x25, + 0x61, 0xa2, 0xf1, 0x16, 0x9d, 0xc2, 0xbc, 0x80, 0x64, 0x75, 0xe8, 0x13, 0x58, 0x1b, 0x6b, 0xa7, + 0x91, 0x4c, 0x77, 0x59, 0xe8, 0xa2, 0x2c, 0x27, 0x6a, 0x32, 0xcd, 0x2f, 0xa1, 0xd4, 0x0b, 0xa3, + 0x67, 0x43, 0x65, 0x65, 0xc6, 0xb9, 0x27, 0x25, 0xcd, 0x30, 0x7a, 0x46, 0xd2, 0x2a, 0xd4, 0x84, + 0xff, 0x8d, 0x05, 0x45, 0x20, 0xd3, 0xab, 0x08, 0xbd, 0xcb, 0x59, 0x8a, 0x17, 0x64, 0x72, 0x6d, + 0x28, 0xf3, 0xdd, 0x1c, 0x0d, 0x95, 0x55, 0x71, 0xed, 0x37, 0x2f, 0xd0, 0x13, 0xd8, 0xac, 0xc9, + 0x59, 0xe5, 0xfa, 0xaf, 0x12, 0x94, 0xc4, 0x11, 0xd0, 0x26, 0x54, 0xce, 0x8d, 0x58, 0x12, 0x23, + 0x5e, 0x66, 0x67, 0xe7, 0x3b, 0x5e, 0xc9, 0xb9, 0x33, 0x2b, 0x39, 0x39, 0xf3, 0xf9, 0xf7, 0x39, + 0xf3, 0xe2, 0xac, 0x99, 0xaf, 0xff, 0x39, 0x07, 0x45, 0xde, 0x9f, 0xff, 0xd8, 0x87, 0x66, 0xb2, + 0xbf, 0xc5, 0xf7, 0xd9, 0xdf, 0xd2, 0xac, 0xfe, 0xd6, 0x7f, 0x91, 0x60, 0x71, 0xfc, 0x35, 0x41, + 0xd7, 0xe0, 0x8a, 0xd3, 0x51, 0x2d, 0x6f, 0xc7, 0xb0, 0x74, 0xaf, 0x6b, 0x39, 0x1d, 0xac, 0x19, + 0xdb, 0x06, 0xd6, 0xe5, 0x02, 0xba, 0x0a, 0xe8, 0x34, 0x65, 0x58, 0x2e, 0x26, 0x96, 0x6a, 0xca, + 0x12, 0x5a, 0x03, 0xf9, 0x34, 0xee, 0x60, 0xb2, 0x87, 0x89, 0x3c, 0x37, 0x19, 0xd5, 0x4c, 0x03, + 0x5b, 0xae, 0x3c, 0x3f, 0xc9, 0xd1, 0x21, 0xb6, 0xde, 0xd5, 0x30, 0x91, 0x8b, 0x93, 0x71, 0xcd, + 0xb6, 0x9c, 0xee, 0x2e, 0x26, 0x72, 0xa9, 0xfe, 0xf7, 0x02, 0x94, 0xd3, 0x0d, 0x47, 0x3f, 0xc0, + 0x6a, 0x40, 0x07, 0x09, 0xdd, 0xf7, 0x19, 0x0d, 0xbc, 0xfd, 0x38, 0x48, 0xff, 0x24, 0xa8, 0x5c, + 0xf4, 0x0b, 0x3b, 0x2d, 0x6f, 0xea, 0x27, 0xb5, 0x69, 0x40, 0x8b, 0x03, 0xda, 0x9e, 0x53, 0x24, + 0x52, 0x39, 0x65, 0xe5, 0x31, 0xa4, 0xc0, 0x42, 0x9f, 0x0e, 0x87, 0xfe, 0xc1, 0xf8, 0x3a, 0x8c, + 0x5f, 0x91, 0x06, 0x45, 0x21, 0x3b, 0x2f, 0x64, 0x5b, 0x6f, 0x25, 0x7b, 0x2a, 0x46, 0x44, 0x71, + 0xfd, 0x75, 0x09, 0xd6, 0xa6, 0x79, 0x41, 0x37, 0xe0, 0x9a, 0x8e, 0x3b, 0x04, 0x6b, 0xaa, 0x8b, + 0x75, 0xcf, 0x71, 0x55, 0xb7, 0xeb, 0x78, 0x9a, 0xad, 0x63, 0xcf, 0xde, 0x91, 0x0b, 0x68, 0x13, + 0x6a, 0x39, 0x69, 0x4d, 0xb5, 0x34, 0x6c, 0x9a, 0x58, 0x97, 0x25, 0xd4, 0x80, 0xcd, 0x1c, 0x54, + 0xd7, 0xda, 0xb1, 0xec, 0x6f, 0x2c, 0x0f, 0x13, 0x62, 0xf3, 0xf9, 0xdc, 0x85, 0x5b, 0x39, 0x48, + 0xc3, 0xda, 0x53, 0x4d, 0x43, 0xf7, 0x54, 0xf2, 0xa8, 0xbb, 0x9b, 0x8e, 0xed, 0x63, 0x68, 0xe4, + 0x80, 0x75, 0xac, 0xea, 0xa6, 0x61, 0x61, 0x0f, 0x3f, 0xd1, 0x30, 0xd6, 0xb1, 0x2e, 0x17, 0x67, + 0x58, 0xb5, 0x6c, 0xd7, 0xdb, 0xb6, 0xbb, 0x96, 0x2e, 0x97, 0xd0, 0x6d, 0xf8, 0x28, 0x07, 0xa5, + 0x9a, 0x04, 0xab, 0xfa, 0xb7, 0x1e, 0x7e, 0x62, 0x38, 0xae, 0x23, 0x97, 0x67, 0xc8, 0x77, 0x30, + 0xd9, 0x35, 0x1c, 0xc7, 0xb0, 0x2d, 0x4f, 0xc7, 0x16, 0xdf, 0xd3, 0x05, 0x74, 0x0f, 0x6e, 0xe7, + 0xa0, 0x09, 0x76, 0xec, 0x2e, 0xd1, 0xb8, 0xd9, 0xc7, 0x6a, 0xd7, 0x71, 0xb1, 0x2e, 0x2f, 0xa2, + 0x26, 0xdc, 0xc9, 0x81, 0x6f, 0xab, 0x86, 0x89, 0xf9, 0x9a, 0x62, 0xcd, 0xb6, 0x74, 0xc3, 0x35, + 0x6c, 0x4b, 0xbe, 0x84, 0xea, 0x50, 0xcd, 0xf3, 0xdd, 0xb6, 0x09, 0xe7, 0x04, 0x74, 0x0b, 0x3e, + 0xcc, 0x9b, 0x65, 0xd7, 0xf5, 0xec, 0x6d, 0x8f, 0xa8, 0xd6, 0x23, 0x2c, 0x2f, 0xcd, 0x9c, 0x97, + 0xb1, 0xdb, 0x31, 0x31, 0x1f, 0x00, 0xd6, 0xe5, 0xe5, 0x19, 0xed, 0x1a, 0x5f, 0xc5, 0x6c, 0xb4, + 0x2b, 0xe8, 0x26, 0xd4, 0x73, 0x49, 0xd5, 0x3d, 0xd5, 0x30, 0xd5, 0xb6, 0x89, 0xe5, 0xca, 0x8c, + 0x39, 0xe9, 0xaa, 0xab, 0x7a, 0xa6, 0xed, 0x38, 0xf2, 0x2a, 0xba, 0x03, 0x37, 0xf3, 0xd9, 0xba, + 0xee, 0x63, 0x6c, 0xb9, 0x86, 0xc8, 0xc9, 0x72, 0xdd, 0x02, 0x38, 0xb3, 0xd1, 0x57, 0xe0, 0xf2, + 0x24, 0xdc, 0xc1, 0xae, 0x5c, 0x40, 0x08, 0x2a, 0xe7, 0xb6, 0x5b, 0x3a, 0x0f, 0xcd, 0x96, 0xb4, + 0xfd, 0xb3, 0xf4, 0xf2, 0xa8, 0x2a, 0xbd, 0x3a, 0xaa, 0x4a, 0x7f, 0x1d, 0x55, 0xa5, 0x9f, 0x8e, + 0xab, 0x85, 0x57, 0xc7, 0xd5, 0xc2, 0xef, 0xc7, 0xd5, 0x02, 0x6c, 0x84, 0xf1, 0xcc, 0x0b, 0xd8, + 0x06, 0xf1, 0x9d, 0xef, 0xf0, 0x60, 0x47, 0xfa, 0xce, 0x7c, 0x87, 0x4f, 0x79, 0x6b, 0x02, 0x78, + 0x4f, 0xf0, 0xde, 0x3b, 0xa0, 0xd1, 0xc9, 0x7f, 0x51, 0x4f, 0xcb, 0x22, 0xf6, 0xe0, 0x9f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x70, 0x20, 0x6f, 0x05, 0x6c, 0x0d, 0x00, 0x00, +} + +func (m *ResourceSpans) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSpans) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.InstrumentationLibrarySpans) > 0 { + for iNdEx := len(m.InstrumentationLibrarySpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InstrumentationLibrarySpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *InstrumentationLibrarySpans) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InstrumentationLibrarySpans) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InstrumentationLibrarySpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Spans) > 0 { + for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.InstrumentationLibrary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Span) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + if m.DroppedLinksCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedLinksCount)) + i-- + dAtA[i] = 0x70 + } + if len(m.Links) > 0 { + for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if m.DroppedEventsCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedEventsCount)) + i-- + dAtA[i] = 0x60 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if m.DroppedAttributesCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x50 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.EndTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano)) + i-- + dAtA[i] = 0x41 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x39 + } + if m.Kind != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x30 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + } + { + size := m.ParentSpanId.Size() + i -= size + if _, err := m.ParentSpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.TraceState) > 0 { + i -= len(m.TraceState) + copy(dAtA[i:], m.TraceState) + i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) + i-- + dAtA[i] = 0x1a + } + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Span_Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span_Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Span_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DroppedAttributesCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x20 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Span_Link) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span_Link) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Span_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DroppedAttributesCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x28 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.TraceState) > 0 { + i -= len(m.TraceState) + copy(dAtA[i:], m.TraceState) + i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) + i-- + dAtA[i] = 0x1a + } + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Code != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x18 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintTrace(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if m.DeprecatedCode != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DeprecatedCode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTrace(dAtA []byte, offset int, v uint64) int { + offset -= sovTrace(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourceSpans) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovTrace(uint64(l)) + if len(m.InstrumentationLibrarySpans) > 0 { + for _, e := range m.InstrumentationLibrarySpans { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + return n +} + +func (m *InstrumentationLibrarySpans) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.InstrumentationLibrary.Size() + n += 1 + l + sovTrace(uint64(l)) + if len(m.Spans) > 0 { + for _, e := range m.Spans { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + return n +} + +func (m *Span) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TraceId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = m.SpanId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = len(m.TraceState) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + l = m.ParentSpanId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if m.Kind != 0 { + n += 1 + sovTrace(uint64(m.Kind)) + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.EndTimeUnixNano != 0 { + n += 9 + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedEventsCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedEventsCount)) + } + if len(m.Links) > 0 { + for _, e := range m.Links { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedLinksCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedLinksCount)) + } + l = m.Status.Size() + n += 1 + l + sovTrace(uint64(l)) + return n +} + +func (m *Span_Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeUnixNano != 0 { + n += 9 + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) + } + return n +} + +func (m *Span_Link) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TraceId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = m.SpanId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = len(m.TraceState) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) + } + return n +} + +func (m *Status) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeprecatedCode != 0 { + n += 1 + sovTrace(uint64(m.DeprecatedCode)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if m.Code != 0 { + n += 1 + sovTrace(uint64(m.Code)) + } + return n +} + +func sovTrace(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTrace(x uint64) (n int) { + return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResourceSpans) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSpans: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSpans: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrarySpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstrumentationLibrarySpans = append(m.InstrumentationLibrarySpans, &InstrumentationLibrarySpans{}) + if err := m.InstrumentationLibrarySpans[len(m.InstrumentationLibrarySpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InstrumentationLibrarySpans) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InstrumentationLibrarySpans: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InstrumentationLibrarySpans: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InstrumentationLibrary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Spans = append(m.Spans, &Span{}) + if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Span: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ParentSpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Span_SpanKind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType) + } + m.EndTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &Span_Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType) + } + m.DroppedEventsCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedEventsCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Links = append(m.Links, &Span_Link{}) + if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType) + } + m.DroppedLinksCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedLinksCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span_Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span_Link) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Link: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCode", wireType) + } + m.DeprecatedCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedCode |= Status_DeprecatedStatusCode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= Status_StatusCode(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTrace(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTrace + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTrace + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTrace + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group") +) diff --git a/internal/otel_collector/internal/data/spanid.go b/internal/otel_collector/internal/data/spanid.go new file mode 100644 index 00000000000..0a9c1f605a0 --- /dev/null +++ b/internal/otel_collector/internal/data/spanid.go @@ -0,0 +1,104 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "encoding/hex" + "errors" +) + +const spanIDSize = 8 + +var errInvalidSpanIDSize = errors.New("invalid length for SpanID") + +// SpanID is a custom data type that is used for all span_id fields in OTLP +// Protobuf messages. +type SpanID struct { + id [spanIDSize]byte +} + +// NewSpanID creates a SpanID from a byte slice. +func NewSpanID(bytes [8]byte) SpanID { + return SpanID{id: bytes} +} + +// HexString returns hex representation of the ID. +func (sid SpanID) HexString() string { + if !sid.IsValid() { + return "" + } + return hex.EncodeToString(sid.id[:]) +} + +// Size returns the size of the data to serialize. +func (sid *SpanID) Size() int { + if !sid.IsValid() { + return 0 + } + return spanIDSize +} + +// Equal returns true if ids are equal. +func (sid SpanID) Equal(that SpanID) bool { + return sid.id == that.id +} + +// IsValid returns true if id contains at least one non-zero byte. +func (sid SpanID) IsValid() bool { + return sid.id != [8]byte{} +} + +// Bytes returns the byte array representation of the SpanID. +func (sid SpanID) Bytes() [8]byte { + return sid.id +} + +// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization. +func (sid *SpanID) MarshalTo(data []byte) (n int, err error) { + if !sid.IsValid() { + return 0, nil + } + return marshalBytes(data, sid.id[:]) +} + +// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization. +func (sid *SpanID) Unmarshal(data []byte) error { + if len(data) == 0 { + sid.id = [8]byte{} + return nil + } + + if len(data) != spanIDSize { + return errInvalidSpanIDSize + } + + copy(sid.id[:], data) + return nil +} + +// MarshalJSON converts SpanID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if !sid.IsValid() { + return []byte(`""`), nil + } + return marshalJSON(sid.id[:]) +} + +// UnmarshalJSON decodes SpanID from hex string, possibly enclosed in quotes. +// Called by Protobuf JSON deserialization. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + sid.id = [8]byte{} + return unmarshalJSON(sid.id[:], data) +} diff --git a/internal/otel_collector/internal/data/spanid_test.go b/internal/otel_collector/internal/data/spanid_test.go new file mode 100644 index 00000000000..5d4526649f6 --- /dev/null +++ b/internal/otel_collector/internal/data/spanid_test.go @@ -0,0 +1,129 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewSpanID(t *testing.T) { + sid := NewSpanID([8]byte{}) + assert.EqualValues(t, [8]byte{}, sid.id) + assert.EqualValues(t, 0, sid.Size()) + + b := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} + sid = NewSpanID(b) + assert.EqualValues(t, b, sid.id) + assert.EqualValues(t, 8, sid.Size()) +} + +func TestSpanIDHexString(t *testing.T) { + sid := NewSpanID([8]byte{}) + assert.EqualValues(t, "", sid.HexString()) + + sid = NewSpanID([8]byte{0x12, 0x23, 0xAD, 0x12, 0x23, 0xAD, 0x12, 0x23}) + assert.EqualValues(t, "1223ad1223ad1223", sid.HexString()) +} + +func TestSpanIDEqual(t *testing.T) { + sid := NewSpanID([8]byte{}) + assert.True(t, sid.Equal(sid)) + assert.True(t, sid.Equal(NewSpanID([8]byte{}))) + assert.False(t, sid.Equal(NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}))) + + sid = NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) + assert.True(t, sid.Equal(sid)) + assert.False(t, sid.Equal(NewSpanID([8]byte{}))) + assert.True(t, sid.Equal(NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}))) +} + +func TestSpanIDMarshal(t *testing.T) { + buf := make([]byte, 10) + + sid := NewSpanID([8]byte{}) + n, err := sid.MarshalTo(buf) + assert.EqualValues(t, 0, n) + assert.NoError(t, err) + + sid = NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) + n, err = sid.MarshalTo(buf) + assert.NoError(t, err) + assert.EqualValues(t, 8, n) + assert.EqualValues(t, []byte{1, 2, 3, 4, 5, 6, 7, 8}, buf[0:8]) + + _, err = sid.MarshalTo(buf[0:1]) + assert.Error(t, err) +} + +func TestSpanIDMarshalJSON(t *testing.T) { + sid := NewSpanID([8]byte{}) + json, err := sid.MarshalJSON() + assert.EqualValues(t, []byte(`""`), json) + assert.NoError(t, err) + + sid = NewSpanID([8]byte{0x12, 0x23, 0xAD, 0x12, 0x23, 0xAD, 0x12, 0x23}) + json, err = sid.MarshalJSON() + assert.EqualValues(t, []byte(`"1223ad1223ad1223"`), json) + assert.NoError(t, err) +} + +func TestSpanIDUnmarshal(t *testing.T) { + buf := []byte{0x12, 0x23, 0xAD, 0x12, 0x23, 0xAD, 0x12, 0x23} + + sid := SpanID{} + err := sid.Unmarshal(buf[0:8]) + assert.NoError(t, err) + assert.EqualValues(t, [8]byte{0x12, 0x23, 0xAD, 0x12, 0x23, 0xAD, 0x12, 0x23}, sid.id) + + err = sid.Unmarshal(buf[0:0]) + assert.NoError(t, err) + assert.EqualValues(t, [8]byte{}, sid.id) + + err = sid.Unmarshal(nil) + assert.NoError(t, err) + assert.EqualValues(t, [8]byte{}, sid.id) + + err = sid.Unmarshal(buf[0:3]) + assert.Error(t, err) +} + +func TestSpanIDUnmarshalJSON(t *testing.T) { + sid := SpanID{} + err := sid.UnmarshalJSON([]byte(`""`)) + assert.NoError(t, err) + assert.EqualValues(t, [8]byte{}, sid.id) + + err = sid.UnmarshalJSON([]byte(`"1234567812345678"`)) + assert.NoError(t, err) + assert.EqualValues(t, [8]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}, sid.id) + + err = sid.UnmarshalJSON([]byte(`1234567812345678`)) + assert.NoError(t, err) + assert.EqualValues(t, [8]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}, sid.id) + + err = sid.UnmarshalJSON([]byte(`"nothex"`)) + assert.Error(t, err) + + err = sid.UnmarshalJSON([]byte(`"1"`)) + assert.Error(t, err) + + err = sid.UnmarshalJSON([]byte(`"123"`)) + assert.Error(t, err) + + err = sid.UnmarshalJSON([]byte(`"`)) + assert.Error(t, err) +} diff --git a/internal/otel_collector/internal/data/traceid.go b/internal/otel_collector/internal/data/traceid.go new file mode 100644 index 00000000000..c3d7f2f2a02 --- /dev/null +++ b/internal/otel_collector/internal/data/traceid.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "encoding/hex" + "errors" +) + +const traceIDSize = 16 + +var errInvalidTraceIDSize = errors.New("invalid length for SpanID") + +// TraceID is a custom data type that is used for all trace_id fields in OTLP +// Protobuf messages. +type TraceID struct { + id [traceIDSize]byte +} + +// NewTraceID creates a TraceID from a byte slice. +func NewTraceID(bytes [16]byte) TraceID { + return TraceID{ + id: bytes, + } +} + +// HexString returns hex representation of the ID. +func (tid TraceID) HexString() string { + if !tid.IsValid() { + return "" + } + return hex.EncodeToString(tid.id[:]) +} + +// Size returns the size of the data to serialize. +func (tid *TraceID) Size() int { + if !tid.IsValid() { + return 0 + } + return traceIDSize +} + +// Equal returns true if ids are equal. +func (tid TraceID) Equal(that TraceID) bool { + return tid.id == that.id +} + +// IsValid returns true if id contains at leas one non-zero byte. +func (tid TraceID) IsValid() bool { + return tid.id != [16]byte{} +} + +// Bytes returns the byte array representation of the TraceID. +func (tid TraceID) Bytes() [16]byte { + return tid.id +} + +// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization. +func (tid *TraceID) MarshalTo(data []byte) (n int, err error) { + if !tid.IsValid() { + return 0, nil + } + return marshalBytes(data, tid.id[:]) +} + +// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization. +func (tid *TraceID) Unmarshal(data []byte) error { + if len(data) == 0 { + tid.id = [16]byte{} + return nil + } + + if len(data) != traceIDSize { + return errInvalidTraceIDSize + } + + copy(tid.id[:], data) + return nil +} + +// MarshalJSON converts trace id into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if !tid.IsValid() { + return []byte(`""`), nil + } + return marshalJSON(tid.id[:]) +} + +// UnmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +// Called by Protobuf JSON deserialization. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + tid.id = [16]byte{} + return unmarshalJSON(tid.id[:], data) +} diff --git a/internal/otel_collector/internal/data/traceid_test.go b/internal/otel_collector/internal/data/traceid_test.go new file mode 100644 index 00000000000..1a16c455dc2 --- /dev/null +++ b/internal/otel_collector/internal/data/traceid_test.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewTraceID(t *testing.T) { + tid := NewTraceID([16]byte{}) + assert.EqualValues(t, [16]byte{}, tid.id) + assert.EqualValues(t, 0, tid.Size()) + + b := [16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78} + tid = NewTraceID(b) + assert.EqualValues(t, b, tid.id) + assert.EqualValues(t, 16, tid.Size()) +} + +func TestTraceIDHexString(t *testing.T) { + tid := NewTraceID([16]byte{}) + assert.EqualValues(t, "", tid.HexString()) + + tid = NewTraceID([16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}) + assert.EqualValues(t, "12345678123456781234567812345678", tid.HexString()) +} + +func TestTraceIDEqual(t *testing.T) { + tid := NewTraceID([16]byte{}) + assert.True(t, tid.Equal(tid)) + assert.True(t, tid.Equal(NewTraceID([16]byte{}))) + assert.False(t, tid.Equal(NewTraceID([16]byte{1}))) + + tid = NewTraceID([16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}) + assert.True(t, tid.Equal(tid)) + assert.False(t, tid.Equal(NewTraceID([16]byte{}))) + assert.True(t, tid.Equal(NewTraceID([16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}))) +} + +func TestTraceIDMarshal(t *testing.T) { + buf := make([]byte, 20) + + tid := NewTraceID([16]byte{}) + n, err := tid.MarshalTo(buf) + assert.EqualValues(t, 0, n) + assert.NoError(t, err) + + tid = NewTraceID([16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}) + n, err = tid.MarshalTo(buf) + assert.EqualValues(t, 16, n) + assert.EqualValues(t, []byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}, buf[0:16]) + assert.NoError(t, err) + + _, err = tid.MarshalTo(buf[0:1]) + assert.Error(t, err) +} + +func TestTraceIDMarshalJSON(t *testing.T) { + tid := NewTraceID([16]byte{}) + json, err := tid.MarshalJSON() + assert.EqualValues(t, []byte(`""`), json) + assert.NoError(t, err) + + tid = NewTraceID([16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}) + json, err = tid.MarshalJSON() + assert.EqualValues(t, []byte(`"12345678123456781234567812345678"`), json) + assert.NoError(t, err) +} + +func TestTraceIDUnmarshal(t *testing.T) { + buf := [16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78} + + tid := TraceID{} + err := tid.Unmarshal(buf[0:16]) + assert.NoError(t, err) + assert.EqualValues(t, buf, tid.id) + + err = tid.Unmarshal(buf[0:0]) + assert.NoError(t, err) + assert.EqualValues(t, [16]byte{}, tid.id) + + err = tid.Unmarshal(nil) + assert.NoError(t, err) + assert.EqualValues(t, [16]byte{}, tid.id) +} + +func TestTraceIDUnmarshalJSON(t *testing.T) { + tid := NewTraceID([16]byte{}) + err := tid.UnmarshalJSON([]byte(`""`)) + assert.NoError(t, err) + assert.EqualValues(t, [16]byte{}, tid.id) + + err = tid.UnmarshalJSON([]byte(`""""`)) + assert.Error(t, err) + + tidBytes := [16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78} + err = tid.UnmarshalJSON([]byte(`"12345678123456781234567812345678"`)) + assert.NoError(t, err) + assert.EqualValues(t, tidBytes, tid.id) + + err = tid.UnmarshalJSON([]byte(`12345678123456781234567812345678`)) + assert.NoError(t, err) + assert.EqualValues(t, tidBytes, tid.id) + + err = tid.UnmarshalJSON([]byte(`"nothex"`)) + assert.Error(t, err) + + err = tid.UnmarshalJSON([]byte(`"1"`)) + assert.Error(t, err) + + err = tid.UnmarshalJSON([]byte(`"123"`)) + assert.Error(t, err) + + err = tid.UnmarshalJSON([]byte(`"`)) + assert.Error(t, err) +} diff --git a/internal/otel_collector/internal/goldendataset/generator_commons.go b/internal/otel_collector/internal/goldendataset/generator_commons.go new file mode 100644 index 00000000000..cedbed1418e --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/generator_commons.go @@ -0,0 +1,113 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "encoding/csv" + "io" + "os" + "path/filepath" + + "github.com/spf13/cast" + + "go.opentelemetry.io/collector/internal/data" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" +) + +func convertMapToAttributeKeyValues(attrsMap map[string]interface{}) []otlpcommon.KeyValue { + if attrsMap == nil { + return nil + } + attrList := make([]otlpcommon.KeyValue, len(attrsMap)) + index := 0 + for key, value := range attrsMap { + attrList[index] = constructAttributeKeyValue(key, value) + index++ + } + return attrList +} + +func constructAttributeKeyValue(key string, value interface{}) otlpcommon.KeyValue { + var attr otlpcommon.KeyValue + switch val := value.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + attr = otlpcommon.KeyValue{ + Key: key, + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: cast.ToInt64(val)}}, + } + case float32, float64: + attr = otlpcommon.KeyValue{ + Key: key, + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_DoubleValue{DoubleValue: cast.ToFloat64(val)}}, + } + case bool: + attr = otlpcommon.KeyValue{ + Key: key, + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_BoolValue{BoolValue: cast.ToBool(val)}}, + } + case *otlpcommon.ArrayValue: + attr = otlpcommon.KeyValue{ + Key: key, + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: val}}, + } + case *otlpcommon.KeyValueList: + attr = otlpcommon.KeyValue{ + Key: key, + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: val}}, + } + default: + attr = otlpcommon.KeyValue{ + Key: key, + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: val.(string)}}, + } + } + return attr +} + +func loadPictOutputFile(fileName string) ([][]string, error) { + file, err := os.Open(filepath.Clean(fileName)) + if err != nil { + return nil, err + } + defer func() { + cerr := file.Close() + if err == nil { + err = cerr + } + }() + + reader := csv.NewReader(file) + reader.Comma = '\t' + + return reader.ReadAll() +} + +func generateTraceID(random io.Reader) data.TraceID { + var r [16]byte + _, err := random.Read(r[:]) + if err != nil { + panic(err) + } + return data.NewTraceID(r) +} + +func generateSpanID(random io.Reader) data.SpanID { + var r [8]byte + _, err := random.Read(r[:]) + if err != nil { + panic(err) + } + return data.NewSpanID(r) +} diff --git a/internal/otel_collector/internal/goldendataset/metric_gen.go b/internal/otel_collector/internal/goldendataset/metric_gen.go new file mode 100644 index 00000000000..76e376647f6 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/metric_gen.go @@ -0,0 +1,269 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "fmt" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// Simple utilities for generating metrics for testing + +// MetricCfg holds parameters for generating dummy metrics for testing. Set values on this struct to generate +// metrics with the corresponding number/type of attributes and pass into MetricDataFromCfg to generate metrics. +type MetricCfg struct { + // The type of metric to generate + MetricDescriptorType pdata.MetricDataType + // If MetricDescriptorType is one of the Sum, this describes if the sum is monotonic or not. + IsMonotonicSum bool + // A prefix for every metric name + MetricNamePrefix string + // The number of instrumentation library metrics per resource + NumILMPerResource int + // The size of the MetricSlice and number of Metrics + NumMetricsPerILM int + // The number of labels on the LabelsMap associated with each point + NumPtLabels int + // The number of points to generate per Metric + NumPtsPerMetric int + // The number of Attributes to insert into each Resource's AttributesMap + NumResourceAttrs int + // The number of ResourceMetrics for the single MetricData generated + NumResourceMetrics int + // The base value for each point + PtVal int + // The start time for each point + StartTime uint64 + // The duration of the steps between each generated point starting at StartTime + StepSize uint64 +} + +// DefaultCfg produces a MetricCfg with default values. These should be good enough to produce sane +// (but boring) metrics, and can be used as a starting point for making alterations. +func DefaultCfg() MetricCfg { + return MetricCfg{ + MetricDescriptorType: pdata.MetricDataTypeIntGauge, + MetricNamePrefix: "", + NumILMPerResource: 1, + NumMetricsPerILM: 1, + NumPtLabels: 1, + NumPtsPerMetric: 1, + NumResourceAttrs: 1, + NumResourceMetrics: 1, + PtVal: 1, + StartTime: 940000000000000000, + StepSize: 42, + } +} + +// DefaultMetricData produces MetricData with a default config. +func DefaultMetricData() pdata.Metrics { + return MetricDataFromCfg(DefaultCfg()) +} + +// MetricDataFromCfg produces MetricData with the passed-in config. +func MetricDataFromCfg(cfg MetricCfg) pdata.Metrics { + return newMetricGenerator().genMetricDataFromCfg(cfg) +} + +type metricGenerator struct { + metricID int +} + +func newMetricGenerator() *metricGenerator { + return &metricGenerator{} +} + +func (g *metricGenerator) genMetricDataFromCfg(cfg MetricCfg) pdata.Metrics { + md := pdata.NewMetrics() + rms := md.ResourceMetrics() + rms.Resize(cfg.NumResourceMetrics) + for i := 0; i < cfg.NumResourceMetrics; i++ { + rm := rms.At(i) + resource := rm.Resource() + for j := 0; j < cfg.NumResourceAttrs; j++ { + resource.Attributes().Insert( + fmt.Sprintf("resource-attr-name-%d", j), + pdata.NewAttributeValueString(fmt.Sprintf("resource-attr-val-%d", j)), + ) + } + g.populateIlm(cfg, rm) + } + return md +} + +func (g *metricGenerator) populateIlm(cfg MetricCfg, rm pdata.ResourceMetrics) { + ilms := rm.InstrumentationLibraryMetrics() + ilms.Resize(cfg.NumILMPerResource) + for i := 0; i < cfg.NumILMPerResource; i++ { + ilm := ilms.At(i) + g.populateMetrics(cfg, ilm) + } +} + +func (g *metricGenerator) populateMetrics(cfg MetricCfg, ilm pdata.InstrumentationLibraryMetrics) { + metrics := ilm.Metrics() + metrics.Resize(cfg.NumMetricsPerILM) + for i := 0; i < cfg.NumMetricsPerILM; i++ { + metric := metrics.At(i) + g.populateMetricDesc(cfg, metric) + switch cfg.MetricDescriptorType { + case pdata.MetricDataTypeIntGauge: + metric.SetDataType(pdata.MetricDataTypeIntGauge) + populateIntPoints(cfg, metric.IntGauge().DataPoints()) + case pdata.MetricDataTypeDoubleGauge: + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + populateDoublePoints(cfg, metric.DoubleGauge().DataPoints()) + case pdata.MetricDataTypeIntSum: + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(cfg.IsMonotonicSum) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + populateIntPoints(cfg, sum.DataPoints()) + case pdata.MetricDataTypeDoubleSum: + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + sum := metric.DoubleSum() + sum.SetIsMonotonic(cfg.IsMonotonicSum) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + populateDoublePoints(cfg, sum.DataPoints()) + case pdata.MetricDataTypeIntHistogram: + metric.SetDataType(pdata.MetricDataTypeIntHistogram) + histo := metric.IntHistogram() + histo.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + populateIntHistogram(cfg, histo) + case pdata.MetricDataTypeDoubleHistogram: + metric.SetDataType(pdata.MetricDataTypeDoubleHistogram) + histo := metric.DoubleHistogram() + histo.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + populateDoubleHistogram(cfg, histo) + } + } +} + +func (g *metricGenerator) populateMetricDesc(cfg MetricCfg, metric pdata.Metric) { + metric.SetName(fmt.Sprintf("%smetric_%d", cfg.MetricNamePrefix, g.metricID)) + g.metricID++ + metric.SetDescription("my-md-description") + metric.SetUnit("my-md-units") +} + +func populateIntPoints(cfg MetricCfg, pts pdata.IntDataPointSlice) { + pts.Resize(cfg.NumPtsPerMetric) + for i := 0; i < cfg.NumPtsPerMetric; i++ { + pt := pts.At(i) + pt.SetStartTime(pdata.TimestampUnixNano(cfg.StartTime)) + pt.SetTimestamp(getTimestamp(cfg.StartTime, cfg.StepSize, i)) + pt.SetValue(int64(cfg.PtVal + i)) + populatePtLabels(cfg, pt.LabelsMap()) + } +} + +func populateDoublePoints(cfg MetricCfg, pts pdata.DoubleDataPointSlice) { + pts.Resize(cfg.NumPtsPerMetric) + for i := 0; i < cfg.NumPtsPerMetric; i++ { + pt := pts.At(i) + pt.SetStartTime(pdata.TimestampUnixNano(cfg.StartTime)) + pt.SetTimestamp(getTimestamp(cfg.StartTime, cfg.StepSize, i)) + pt.SetValue(float64(cfg.PtVal + i)) + populatePtLabels(cfg, pt.LabelsMap()) + } +} + +func populateDoubleHistogram(cfg MetricCfg, dh pdata.DoubleHistogram) { + pts := dh.DataPoints() + pts.Resize(cfg.NumPtsPerMetric) + for i := 0; i < cfg.NumPtsPerMetric; i++ { + pt := pts.At(i) + pt.SetStartTime(pdata.TimestampUnixNano(cfg.StartTime)) + ts := getTimestamp(cfg.StartTime, cfg.StepSize, i) + pt.SetTimestamp(ts) + populatePtLabels(cfg, pt.LabelsMap()) + setDoubleHistogramBounds(pt, 1, 2, 3, 4, 5) + addDoubleHistogramVal(pt, 1) + for i := 0; i < cfg.PtVal; i++ { + addDoubleHistogramVal(pt, 3) + } + addDoubleHistogramVal(pt, 5) + } +} + +func setDoubleHistogramBounds(hdp pdata.DoubleHistogramDataPoint, bounds ...float64) { + hdp.SetBucketCounts(make([]uint64, len(bounds))) + hdp.SetExplicitBounds(bounds) +} + +func addDoubleHistogramVal(hdp pdata.DoubleHistogramDataPoint, val float64) { + hdp.SetCount(hdp.Count() + 1) + hdp.SetSum(hdp.Sum() + val) + buckets := hdp.BucketCounts() + bounds := hdp.ExplicitBounds() + for i := 0; i < len(bounds); i++ { + bound := bounds[i] + if val <= bound { + buckets[i]++ + break + } + } +} + +func populateIntHistogram(cfg MetricCfg, dh pdata.IntHistogram) { + pts := dh.DataPoints() + pts.Resize(cfg.NumPtsPerMetric) + for i := 0; i < cfg.NumPtsPerMetric; i++ { + pt := pts.At(i) + pt.SetStartTime(pdata.TimestampUnixNano(cfg.StartTime)) + ts := getTimestamp(cfg.StartTime, cfg.StepSize, i) + pt.SetTimestamp(ts) + populatePtLabels(cfg, pt.LabelsMap()) + setIntHistogramBounds(pt, 1, 2, 3, 4, 5) + addIntHistogramVal(pt, 1) + for i := 0; i < cfg.PtVal; i++ { + addIntHistogramVal(pt, 3) + } + addIntHistogramVal(pt, 5) + } +} + +func setIntHistogramBounds(hdp pdata.IntHistogramDataPoint, bounds ...float64) { + hdp.SetBucketCounts(make([]uint64, len(bounds))) + hdp.SetExplicitBounds(bounds) +} + +func addIntHistogramVal(hdp pdata.IntHistogramDataPoint, val int64) { + hdp.SetCount(hdp.Count() + 1) + hdp.SetSum(hdp.Sum() + val) + buckets := hdp.BucketCounts() + bounds := hdp.ExplicitBounds() + for i := 0; i < len(bounds); i++ { + bound := bounds[i] + if float64(val) <= bound { + buckets[i]++ + break + } + } +} + +func populatePtLabels(cfg MetricCfg, lm pdata.StringMap) { + for i := 0; i < cfg.NumPtLabels; i++ { + k := fmt.Sprintf("pt-label-key-%d", i) + v := fmt.Sprintf("pt-label-val-%d", i) + lm.Insert(k, v) + } +} + +func getTimestamp(startTime uint64, stepSize uint64, i int) pdata.TimestampUnixNano { + return pdata.TimestampUnixNano(startTime + (stepSize * uint64(i+1))) +} diff --git a/internal/otel_collector/internal/goldendataset/metric_gen_test.go b/internal/otel_collector/internal/goldendataset/metric_gen_test.go new file mode 100644 index 00000000000..360f9e50a08 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/metric_gen_test.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestGenDefault(t *testing.T) { + md := DefaultMetricData() + mCount, ptCount := md.MetricAndDataPointCount() + require.Equal(t, 1, mCount) + require.Equal(t, 1, ptCount) + rms := md.ResourceMetrics() + rm := rms.At(0) + resource := rm.Resource() + rattrs := resource.Attributes() + rattrs.Len() + require.Equal(t, 1, rattrs.Len()) + val, _ := rattrs.Get("resource-attr-name-0") + require.Equal(t, "resource-attr-val-0", val.StringVal()) + ilms := rm.InstrumentationLibraryMetrics() + require.Equal(t, 1, ilms.Len()) + ms := ilms.At(0).Metrics() + require.Equal(t, 1, ms.Len()) + pdm := ms.At(0) + require.Equal(t, "metric_0", pdm.Name()) + require.Equal(t, "my-md-description", pdm.Description()) + require.Equal(t, "my-md-units", pdm.Unit()) + + require.Equal(t, pdata.MetricDataTypeIntGauge, pdm.DataType()) + pts := pdm.IntGauge().DataPoints() + require.Equal(t, 1, pts.Len()) + pt := pts.At(0) + + require.Equal(t, 1, pt.LabelsMap().Len()) + ptLabels, _ := pt.LabelsMap().Get("pt-label-key-0") + require.Equal(t, "pt-label-val-0", ptLabels) + + require.EqualValues(t, 940000000000000000, pt.StartTime()) + require.EqualValues(t, 940000000000000042, pt.Timestamp()) + require.EqualValues(t, 1, pt.Value()) +} + +func TestDoubleHistogramFunctions(t *testing.T) { + pt := pdata.NewDoubleHistogramDataPoint() + setDoubleHistogramBounds(pt, 1, 2, 3, 4, 5) + require.Equal(t, 5, len(pt.ExplicitBounds())) + require.Equal(t, 5, len(pt.BucketCounts())) + + addDoubleHistogramVal(pt, 1) + require.EqualValues(t, 1, pt.Count()) + require.EqualValues(t, 1, pt.Sum()) + require.EqualValues(t, 1, pt.BucketCounts()[0]) + + addDoubleHistogramVal(pt, 2) + require.EqualValues(t, 2, pt.Count()) + require.EqualValues(t, 3, pt.Sum()) + require.EqualValues(t, 1, pt.BucketCounts()[1]) + + addDoubleHistogramVal(pt, 2) + require.EqualValues(t, 3, pt.Count()) + require.EqualValues(t, 5, pt.Sum()) + require.EqualValues(t, 2, pt.BucketCounts()[1]) +} + +func TestIntHistogramFunctions(t *testing.T) { + pt := pdata.NewIntHistogramDataPoint() + setIntHistogramBounds(pt, 1, 2, 3, 4, 5) + require.Equal(t, 5, len(pt.ExplicitBounds())) + require.Equal(t, 5, len(pt.BucketCounts())) + + addIntHistogramVal(pt, 1) + require.EqualValues(t, 1, pt.Count()) + require.EqualValues(t, 1, pt.Sum()) + require.EqualValues(t, 1, pt.BucketCounts()[0]) + + addIntHistogramVal(pt, 2) + require.EqualValues(t, 2, pt.Count()) + require.EqualValues(t, 3, pt.Sum()) + require.EqualValues(t, 1, pt.BucketCounts()[1]) + + addIntHistogramVal(pt, 2) + require.EqualValues(t, 3, pt.Count()) + require.EqualValues(t, 5, pt.Sum()) + require.EqualValues(t, 2, pt.BucketCounts()[1]) +} + +func TestGenDoubleHistogram(t *testing.T) { + cfg := DefaultCfg() + cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleHistogram + cfg.PtVal = 2 + md := MetricDataFromCfg(cfg) + pts := getMetric(md).DoubleHistogram().DataPoints() + pt := pts.At(0) + buckets := pt.BucketCounts() + require.Equal(t, 5, len(buckets)) + require.EqualValues(t, 2, buckets[2]) +} + +func TestGenDoubleGauge(t *testing.T) { + cfg := DefaultCfg() + cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleGauge + md := MetricDataFromCfg(cfg) + metric := getMetric(md) + pts := metric.DoubleGauge().DataPoints() + require.Equal(t, 1, pts.Len()) + pt := pts.At(0) + require.EqualValues(t, 1, pt.Value()) +} + +func getMetric(md pdata.Metrics) pdata.Metric { + return md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) +} diff --git a/internal/otel_collector/internal/goldendataset/pict_metric_gen.go b/internal/otel_collector/internal/goldendataset/pict_metric_gen.go new file mode 100644 index 00000000000..53b8e853563 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/pict_metric_gen.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "fmt" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// GenerateMetricDatas takes the filename of a PICT-generated file, walks through all of the rows in the PICT +// file and for each row, generates a MetricData object, collecting them and returning them to the caller. +func GenerateMetricDatas(metricPairsFile string) ([]pdata.Metrics, error) { + pictData, err := loadPictOutputFile(metricPairsFile) + if err != nil { + return nil, err + } + var out []pdata.Metrics + for i, values := range pictData { + if i == 0 { + continue + } + metricInputs := PICTMetricInputs{ + NumPtsPerMetric: PICTNumPtsPerMetric(values[0]), + MetricType: PICTMetricDataType(values[1]), + NumPtLabels: PICTNumPtLabels(values[2]), + } + cfg := pictToCfg(metricInputs) + cfg.MetricNamePrefix = fmt.Sprintf("pict_%d_", i) + md := MetricDataFromCfg(cfg) + out = append(out, md) + } + return out, nil +} + +func pictToCfg(inputs PICTMetricInputs) MetricCfg { + cfg := DefaultCfg() + switch inputs.NumResourceAttrs { + case AttrsNone: + cfg.NumResourceAttrs = 0 + case AttrsOne: + cfg.NumResourceAttrs = 1 + case AttrsTwo: + cfg.NumResourceAttrs = 2 + } + + switch inputs.NumPtsPerMetric { + case NumPtsPerMetricOne: + cfg.NumPtsPerMetric = 1 + case NumPtsPerMetricMany: + cfg.NumPtsPerMetric = 16 + } + + switch inputs.MetricType { + case MetricTypeIntGauge: + cfg.MetricDescriptorType = pdata.MetricDataTypeIntGauge + case MetricTypeMonotonicIntSum: + cfg.MetricDescriptorType = pdata.MetricDataTypeIntSum + cfg.IsMonotonicSum = true + case MetricTypeNonMonotonicIntSum: + cfg.MetricDescriptorType = pdata.MetricDataTypeIntSum + cfg.IsMonotonicSum = false + case MetricTypeDoubleGauge: + cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleGauge + case MetricTypeMonotonicDoubleSum: + cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleSum + cfg.IsMonotonicSum = true + case MetricTypeNonMonotonicDoubleSum: + cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleSum + cfg.IsMonotonicSum = false + case MetricTypeIntHistogram: + cfg.MetricDescriptorType = pdata.MetricDataTypeIntHistogram + case MetricTypeDoubleHistogram: + cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleHistogram + default: + panic("Should not happen, unsupported type " + string(inputs.MetricType)) + } + + switch inputs.NumPtLabels { + case LabelsNone: + cfg.NumPtLabels = 0 + case LabelsOne: + cfg.NumPtLabels = 1 + case LabelsMany: + cfg.NumPtLabels = 16 + } + return cfg +} diff --git a/internal/otel_collector/internal/goldendataset/pict_metric_gen_test.go b/internal/otel_collector/internal/goldendataset/pict_metric_gen_test.go new file mode 100644 index 00000000000..678d6237516 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/pict_metric_gen_test.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestGenerateMetricDatas(t *testing.T) { + mds, err := GenerateMetricDatas("testdata/generated_pict_pairs_metrics.txt") + require.NoError(t, err) + require.Equal(t, 25, len(mds)) +} + +func TestPICTtoCfg(t *testing.T) { + tests := []struct { + name string + inputs PICTMetricInputs + cfg MetricCfg + }{ + { + name: "none", + inputs: PICTMetricInputs{ + NumResourceAttrs: AttrsNone, + NumPtsPerMetric: NumPtsPerMetricOne, + MetricType: MetricTypeIntGauge, + NumPtLabels: LabelsNone, + }, + cfg: MetricCfg{ + NumResourceAttrs: 0, + NumPtsPerMetric: 1, + MetricDescriptorType: pdata.MetricDataTypeIntGauge, + NumPtLabels: 0, + }, + }, + { + name: "one", + inputs: PICTMetricInputs{ + NumResourceAttrs: AttrsOne, + NumPtsPerMetric: NumPtsPerMetricOne, + MetricType: MetricTypeDoubleGauge, + NumPtLabels: LabelsOne, + }, + cfg: MetricCfg{ + NumResourceAttrs: 1, + NumPtsPerMetric: 1, + MetricDescriptorType: pdata.MetricDataTypeDoubleGauge, + NumPtLabels: 1, + }, + }, + { + name: "many", + inputs: PICTMetricInputs{ + NumResourceAttrs: AttrsTwo, + NumPtsPerMetric: NumPtsPerMetricMany, + MetricType: MetricTypeDoubleHistogram, + NumPtLabels: LabelsMany, + }, + cfg: MetricCfg{ + NumResourceAttrs: 2, + NumPtsPerMetric: 16, + MetricDescriptorType: pdata.MetricDataTypeDoubleHistogram, + NumPtLabels: 16, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual := pictToCfg(test.inputs) + expected := test.cfg + require.Equal(t, expected.NumResourceAttrs, actual.NumResourceAttrs) + require.Equal(t, expected.NumPtsPerMetric, actual.NumPtsPerMetric) + require.Equal(t, expected.MetricDescriptorType, actual.MetricDescriptorType) + require.Equal(t, expected.NumPtLabels, actual.NumPtLabels) + }) + } +} diff --git a/internal/otel_collector/internal/goldendataset/pict_metrics_input_defs.go b/internal/otel_collector/internal/goldendataset/pict_metrics_input_defs.go new file mode 100644 index 00000000000..cab11a0686b --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/pict_metrics_input_defs.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +// Start of PICT inputs for generating golden dataset metrics (pict_input_metrics.txt) + +// PICTMetricInputs defines one pairwise combination of MetricData variations +type PICTMetricInputs struct { + // Specifies the number of points on each metric. + NumPtsPerMetric PICTNumPtsPerMetric + // Specifies the types of metrics that can be generated. + MetricType PICTMetricDataType + // Specifies the number of labels on each datapoint. + NumPtLabels PICTNumPtLabels + // Specifies the number of attributes on each resource. + NumResourceAttrs PICTNumResourceAttrs +} + +// Enumerates the types of metrics that can be generated. +type PICTMetricDataType string + +const ( + MetricTypeIntGauge PICTMetricDataType = "IntGauge" + MetricTypeMonotonicIntSum PICTMetricDataType = "MonotonicIntSum" + MetricTypeNonMonotonicIntSum PICTMetricDataType = "NonMonotonicIntSum" + MetricTypeDoubleGauge PICTMetricDataType = "DoubleGauge" + MetricTypeMonotonicDoubleSum PICTMetricDataType = "MonotonicDoubleSum" + MetricTypeNonMonotonicDoubleSum PICTMetricDataType = "NonMonotonicDoubleSum" + MetricTypeIntHistogram PICTMetricDataType = "IntHistogram" + MetricTypeDoubleHistogram PICTMetricDataType = "DoubleHistogram" +) + +// Enumerates the number of labels on each datapoint. +type PICTNumPtLabels string + +const ( + LabelsNone PICTNumPtLabels = "NoLabels" + LabelsOne PICTNumPtLabels = "OneLabel" + LabelsMany PICTNumPtLabels = "ManyLabels" +) + +// Enumerates the number of points on each metric. +type PICTNumPtsPerMetric string + +const ( + NumPtsPerMetricOne PICTNumPtsPerMetric = "OnePt" + NumPtsPerMetricMany PICTNumPtsPerMetric = "ManyPts" +) + +// Enumerates the number of attributes on each resource. +type PICTNumResourceAttrs string + +const ( + AttrsNone PICTNumResourceAttrs = "NoAttrs" + AttrsOne PICTNumResourceAttrs = "OneAttr" + AttrsTwo PICTNumResourceAttrs = "TwoAttrs" +) diff --git a/internal/otel_collector/internal/goldendataset/pict_tracing_input_defs.go b/internal/otel_collector/internal/goldendataset/pict_tracing_input_defs.go new file mode 100644 index 00000000000..fcf1aa28700 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/pict_tracing_input_defs.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +// Start of PICT inputs for generating golden dataset ResourceSpans (pict_input_traces.txt) + +// Input columns in pict_input_traces.txt +const ( + TracesColumnResource = 0 + TracesColumnInstrumentationLibrary = 1 + TracesColumnSpans = 2 +) + +// Enumerates the supported types of resource instances that can be generated. +type PICTInputResource string + +const ( + ResourceNil PICTInputResource = "Nil" + ResourceEmpty PICTInputResource = "Empty" + ResourceVMOnPrem PICTInputResource = "VMOnPrem" + ResourceVMCloud PICTInputResource = "VMCloud" + ResourceK8sOnPrem PICTInputResource = "K8sOnPrem" + ResourceK8sCloud PICTInputResource = "K8sCloud" + ResourceFaas PICTInputResource = "Faas" + ResourceExec PICTInputResource = "Exec" +) + +// Enumerates the number and kind of instrumentation library instances that can be generated. +type PICTInputInstrumentationLibrary string + +const ( + LibraryNone PICTInputInstrumentationLibrary = "None" + LibraryOne PICTInputInstrumentationLibrary = "One" + LibraryTwo PICTInputInstrumentationLibrary = "Two" +) + +// Enumerates the relative sizes of tracing spans that can be attached to an instrumentation library span instance. +type PICTInputSpans string + +const ( + LibrarySpansNone PICTInputSpans = "None" + LibrarySpansOne PICTInputSpans = "One" + LibrarySpansSeveral PICTInputSpans = "Several" + LibrarySpansAll PICTInputSpans = "All" +) + +// PICTTracingInputs defines one pairwise combination of ResourceSpans variations +type PICTTracingInputs struct { + // Specifies the category of attributes to populate the Resource field with + Resource PICTInputResource + // Specifies the number and library categories to populte the InstrumentationLibrarySpans field with + InstrumentationLibrary PICTInputInstrumentationLibrary + // Specifies the relative number of spans to populate the InstrumentationLibrarySpans' Spans field with + Spans PICTInputSpans +} + +// Start of PICT inputs for generating golden dataset Spans (pict_input_spans.txt) + +// Input columns in pict_input_spans.txt +const ( + SpansColumnParent = 0 + SpansColumnTracestate = 1 + SpansColumnKind = 2 + SpansColumnAttributes = 3 + SpansColumnEvents = 4 + SpansColumnLinks = 5 + SpansColumnStatus = 6 +) + +// Enumerates the parent/child types of spans that can be generated. +type PICTInputParent string + +const ( + SpanParentRoot PICTInputParent = "Root" + SpanParentChild PICTInputParent = "Child" +) + +// Enumerates the categories of tracestate values that can be generated for a span. +type PICTInputTracestate string + +const ( + TraceStateEmpty PICTInputTracestate = "Empty" + TraceStateOne PICTInputTracestate = "One" + TraceStateFour PICTInputTracestate = "Four" +) + +// Enumerates the span kind values that can be set for a span. +type PICTInputKind string + +const ( + SpanKindUnspecified PICTInputKind = "Unspecified" + SpanKindInternal PICTInputKind = "Internal" + SpanKindServer PICTInputKind = "Server" + SpanKindClient PICTInputKind = "Client" + SpanKindProducer PICTInputKind = "Producer" + SpanKindConsumer PICTInputKind = "Consumer" +) + +// Enumerates the categories of representative attributes a generated span can be populated with. +type PICTInputAttributes string + +const ( + SpanAttrNil PICTInputAttributes = "Nil" + SpanAttrEmpty PICTInputAttributes = "Empty" + SpanAttrDatabaseSQL PICTInputAttributes = "DatabaseSQL" + SpanAttrDatabaseNoSQL PICTInputAttributes = "DatabaseNoSQL" + SpanAttrFaaSDatasource PICTInputAttributes = "FaaSDatasource" + SpanAttrFaaSHTTP PICTInputAttributes = "FaaSHTTP" + SpanAttrFaaSPubSub PICTInputAttributes = "FaaSPubSub" + SpanAttrFaaSTimer PICTInputAttributes = "FaaSTimer" + SpanAttrFaaSOther PICTInputAttributes = "FaaSOther" + SpanAttrHTTPClient PICTInputAttributes = "HTTPClient" + SpanAttrHTTPServer PICTInputAttributes = "HTTPServer" + SpanAttrMessagingProducer PICTInputAttributes = "MessagingProducer" + SpanAttrMessagingConsumer PICTInputAttributes = "MessagingConsumer" + SpanAttrGRPCClient PICTInputAttributes = "gRPCClient" + SpanAttrGRPCServer PICTInputAttributes = "gRPCServer" + SpanAttrInternal PICTInputAttributes = "Internal" + SpanAttrMaxCount PICTInputAttributes = "MaxCount" +) + +// Enumerates the categories of events and/or links a generated span can be populated with. +type PICTInputSpanChild string + +const ( + SpanChildCountNil PICTInputSpanChild = "Nil" + SpanChildCountEmpty PICTInputSpanChild = "Empty" + SpanChildCountOne PICTInputSpanChild = "One" + SpanChildCountTwo PICTInputSpanChild = "Two" + SpanChildCountEight PICTInputSpanChild = "Eight" +) + +// Enumerates the status values a generated span can be populated with. +type PICTInputStatus string + +const ( + SpanStatusUnset PICTInputStatus = "Unset" + SpanStatusOk PICTInputStatus = "Ok" + SpanStatusError PICTInputStatus = "Error" +) + +// PICTSpanInputs defines one pairwise combination of Span variations +type PICTSpanInputs struct { + // Specifies whether the ParentSpanId field should be populated or not + Parent PICTInputParent + // Specifies the category of contents to populate the TraceState field with + Tracestate PICTInputTracestate + // Specifies the value to populate the Kind field with + Kind PICTInputKind + // Specifies the category of values to populate the Attributes field with + Attributes PICTInputAttributes + // Specifies the category of contents to populate the Events field with + Events PICTInputSpanChild + // Specifies the category of contents to populate the Links field with + Links PICTInputSpanChild + // Specifies the value to populate the Status field with + Status PICTInputStatus +} diff --git a/internal/otel_collector/internal/goldendataset/resource_generator.go b/internal/otel_collector/internal/goldendataset/resource_generator.go new file mode 100644 index 00000000000..aba7483f38a --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/resource_generator.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" + "go.opentelemetry.io/collector/translator/conventions" +) + +// GenerateResource generates a OTLP Resource object with representative attributes for the +// underlying resource type specified by the rscID input parameter. +func GenerateResource(rscID PICTInputResource) otlpresource.Resource { + var attrs map[string]interface{} + switch rscID { + case ResourceNil: + attrs = generateNilAttributes() + case ResourceEmpty: + attrs = generateEmptyAttributes() + case ResourceVMOnPrem: + attrs = generateOnpremVMAttributes() + case ResourceVMCloud: + attrs = generateCloudVMAttributes() + case ResourceK8sOnPrem: + attrs = generateOnpremK8sAttributes() + case ResourceK8sCloud: + attrs = generateCloudK8sAttributes() + case ResourceFaas: + attrs = generateFassAttributes() + case ResourceExec: + attrs = generateExecAttributes() + default: + attrs = generateEmptyAttributes() + } + var dropped uint32 + if len(attrs) < 10 { + dropped = 0 + } else { + dropped = uint32(len(attrs) % 4) + } + return otlpresource.Resource{ + Attributes: convertMapToAttributeKeyValues(attrs), + DroppedAttributesCount: dropped, + } +} + +func generateNilAttributes() map[string]interface{} { + return nil +} + +func generateEmptyAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + return attrMap +} + +func generateOnpremVMAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeServiceName] = "customers" + attrMap[conventions.AttributeServiceNamespace] = "production" + attrMap[conventions.AttributeServiceVersion] = "semver:0.7.3" + subMap := make(map[string]interface{}) + subMap["public"] = "tc-prod9.internal.example.com" + subMap["internal"] = "172.18.36.18" + attrMap[conventions.AttributeHostName] = &otlpcommon.KeyValueList{ + Values: convertMapToAttributeKeyValues(subMap), + } + attrMap[conventions.AttributeHostImageID] = "661ADFA6-E293-4870-9EFA-1AA052C49F18" + attrMap[conventions.AttributeTelemetrySDKLanguage] = conventions.AttributeSDKLangValueJava + attrMap[conventions.AttributeTelemetrySDKName] = "opentelemetry" + attrMap[conventions.AttributeTelemetrySDKVersion] = "0.3.0" + return attrMap +} + +func generateCloudVMAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeServiceName] = "shoppingcart" + attrMap[conventions.AttributeServiceName] = "customers" + attrMap[conventions.AttributeServiceNamespace] = "production" + attrMap[conventions.AttributeServiceVersion] = "semver:0.7.3" + attrMap[conventions.AttributeTelemetrySDKLanguage] = conventions.AttributeSDKLangValueJava + attrMap[conventions.AttributeTelemetrySDKName] = "opentelemetry" + attrMap[conventions.AttributeTelemetrySDKVersion] = "0.3.0" + attrMap[conventions.AttributeHostID] = "57e8add1f79a454bae9fb1f7756a009a" + attrMap[conventions.AttributeHostName] = "env-check" + attrMap[conventions.AttributeHostImageID] = "5.3.0-1020-azure" + attrMap[conventions.AttributeHostType] = "B1ms" + attrMap[conventions.AttributeCloudProvider] = "azure" + attrMap[conventions.AttributeCloudAccount] = "2f5b8278-4b80-4930-a6bb-d86fc63a2534" + attrMap[conventions.AttributeCloudRegion] = "South Central US" + return attrMap +} + +func generateOnpremK8sAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeContainerName] = "cert-manager" + attrMap[conventions.AttributeContainerImage] = "quay.io/jetstack/cert-manager-controller:v0.14.2" + attrMap[conventions.AttributeK8sCluster] = "docker-desktop" + attrMap[conventions.AttributeK8sNamespace] = "cert-manager" + attrMap[conventions.AttributeK8sDeployment] = "cm-1-cert-manager" + attrMap[conventions.AttributeK8sPod] = "cm-1-cert-manager-6448b4949b-t2jtd" + attrMap[conventions.AttributeHostName] = "docker-desktop" + return attrMap +} + +func generateCloudK8sAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeContainerName] = "otel-collector" + attrMap[conventions.AttributeContainerImage] = "otel/opentelemetry-collector-contrib" + attrMap[conventions.AttributeContainerTag] = "0.4.0" + attrMap[conventions.AttributeK8sCluster] = "erp-dev" + attrMap[conventions.AttributeK8sNamespace] = "monitoring" + attrMap[conventions.AttributeK8sDeployment] = "otel-collector" + attrMap[conventions.AttributeK8sDeploymentUID] = "4D614B27-EDAF-409B-B631-6963D8F6FCD4" + attrMap[conventions.AttributeK8sReplicaSet] = "otel-collector-2983fd34" + attrMap[conventions.AttributeK8sReplicaSetUID] = "EC7D59EF-D5B6-48B7-881E-DA6B7DD539B6" + attrMap[conventions.AttributeK8sPod] = "otel-collector-6484db5844-c6f9m" + attrMap[conventions.AttributeK8sPodUID] = "FDFD941E-2A7A-4945-B601-88DD486161A4" + attrMap[conventions.AttributeHostID] = "ec2e3fdaffa294348bdf355156b94cda" + attrMap[conventions.AttributeHostName] = "10.99.118.157" + attrMap[conventions.AttributeHostImageID] = "ami-011c865bf7da41a9d" + attrMap[conventions.AttributeHostType] = "m5.xlarge" + attrMap[conventions.AttributeCloudProvider] = "aws" + attrMap[conventions.AttributeCloudAccount] = "12345678901" + attrMap[conventions.AttributeCloudRegion] = "us-east-1" + attrMap[conventions.AttributeCloudZone] = "us-east-1c" + return attrMap +} + +func generateFassAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeFaasID] = "https://us-central1-dist-system-demo.cloudfunctions.net/env-vars-print" + attrMap[conventions.AttributeFaasName] = "env-vars-print" + attrMap[conventions.AttributeFaasVersion] = "semver:1.0.0" + attrMap[conventions.AttributeCloudProvider] = "gcp" + attrMap[conventions.AttributeCloudAccount] = "opentelemetry" + attrMap[conventions.AttributeCloudRegion] = "us-central1" + attrMap[conventions.AttributeCloudZone] = "us-central1-a" + return attrMap +} + +func generateExecAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeProcessExecutableName] = "otelcol" + parts := make([]otlpcommon.AnyValue, 3) + parts[0] = otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "otelcol"}} + parts[1] = otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "--config=/etc/otel-collector-config.yaml"}} + parts[2] = otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "--mem-ballast-size-mib=683"}} + attrMap[conventions.AttributeProcessCommandLine] = &otlpcommon.ArrayValue{ + Values: parts, + } + attrMap[conventions.AttributeProcessExecutablePath] = "/usr/local/bin/otelcol" + attrMap[conventions.AttributeProcessID] = 2020 + attrMap[conventions.AttributeProcessOwner] = "otel" + attrMap[conventions.AttributeOSType] = "LINUX" + attrMap[conventions.AttributeOSDescription] = + "Linux ubuntu 5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux" + return attrMap +} diff --git a/internal/otel_collector/internal/goldendataset/resource_generator_test.go b/internal/otel_collector/internal/goldendataset/resource_generator_test.go new file mode 100644 index 00000000000..f11b9ca0bfa --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/resource_generator_test.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" +) + +func TestGenerateResource(t *testing.T) { + resourceIds := []PICTInputResource{ResourceNil, ResourceEmpty, ResourceVMOnPrem, ResourceVMCloud, ResourceK8sOnPrem, + ResourceK8sCloud, ResourceFaas, ResourceExec} + for _, rscID := range resourceIds { + rsc := GenerateResource(rscID) + if rscID == ResourceNil { + assert.Nil(t, rsc.Attributes) + } else { + assert.NotNil(t, rsc.Attributes) + } + // test marshal/unmarshal + bytes, err := rsc.Marshal() + if err != nil { + assert.Fail(t, err.Error()) + } + if len(bytes) > 0 { + copy := &otlpresource.Resource{} + err = copy.Unmarshal(bytes) + if err != nil { + assert.Fail(t, err.Error()) + } + assert.EqualValues(t, len(rsc.Attributes), len(copy.Attributes)) + } + } +} diff --git a/internal/otel_collector/internal/goldendataset/span_generator.go b/internal/otel_collector/internal/goldendataset/span_generator.go new file mode 100644 index 00000000000..5c2557e07ff --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/span_generator.go @@ -0,0 +1,529 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "fmt" + "io" + "time" + + "go.opentelemetry.io/collector/internal/data" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/translator/conventions" +) + +var statusCodeMap = map[PICTInputStatus]otlptrace.Status_StatusCode{ + SpanStatusUnset: otlptrace.Status_STATUS_CODE_UNSET, + SpanStatusOk: otlptrace.Status_STATUS_CODE_OK, + SpanStatusError: otlptrace.Status_STATUS_CODE_ERROR, +} + +var statusMsgMap = map[PICTInputStatus]string{ + SpanStatusUnset: "Unset", + SpanStatusOk: "Ok", + SpanStatusError: "Error", +} + +// GenerateSpans generates a slice of OTLP Span objects with the number of spans specified by the count input +// parameter. The startPos parameter specifies the line in the PICT tool-generated, test parameter +// combination records file specified by the pictFile parameter to start reading from. When the end record +// is reached it loops back to the first record. The random parameter injects the random number generator +// to use in generating IDs and other random values. Using a random number generator with the same seed value +// enables reproducible tests. +// +// The return values are the slice with the generated spans, the starting position for the next generation +// run and the error which caused the spans generation to fail. If err is not nil, the spans slice will +// have nil values. +func GenerateSpans(count int, startPos int, pictFile string, random io.Reader) ([]*otlptrace.Span, int, error) { + pairsData, err := loadPictOutputFile(pictFile) + if err != nil { + return nil, 0, err + } + pairsTotal := len(pairsData) + spanList := make([]*otlptrace.Span, count) + index := startPos + 1 + var inputs []string + var spanInputs *PICTSpanInputs + var traceID data.TraceID + var parentID data.SpanID + for i := 0; i < count; i++ { + if index >= pairsTotal { + index = 1 + } + inputs = pairsData[index] + spanInputs = &PICTSpanInputs{ + Parent: PICTInputParent(inputs[SpansColumnParent]), + Tracestate: PICTInputTracestate(inputs[SpansColumnTracestate]), + Kind: PICTInputKind(inputs[SpansColumnKind]), + Attributes: PICTInputAttributes(inputs[SpansColumnAttributes]), + Events: PICTInputSpanChild(inputs[SpansColumnEvents]), + Links: PICTInputSpanChild(inputs[SpansColumnLinks]), + Status: PICTInputStatus(inputs[SpansColumnStatus]), + } + switch spanInputs.Parent { + case SpanParentRoot: + traceID = generateTraceID(random) + parentID = data.NewSpanID([8]byte{}) + case SpanParentChild: + // use existing if available + if !traceID.IsValid() { + traceID = generateTraceID(random) + } + if !parentID.IsValid() { + parentID = generateSpanID(random) + } + } + spanName := generateSpanName(spanInputs) + spanList[i] = GenerateSpan(traceID, parentID, spanName, spanInputs, random) + parentID = spanList[i].SpanId + index++ + } + return spanList, index, nil +} + +func generateSpanName(spanInputs *PICTSpanInputs) string { + return fmt.Sprintf("/%s/%s/%s/%s/%s/%s/%s", spanInputs.Parent, spanInputs.Tracestate, spanInputs.Kind, + spanInputs.Attributes, spanInputs.Events, spanInputs.Links, spanInputs.Status) +} + +// GenerateSpan generates a single OTLP Span based on the input values provided. They are: +// traceID - the trace ID to use, should not be nil +// parentID - the parent span ID or nil if it is a root span +// spanName - the span name, should not be blank +// spanInputs - the pairwise combination of field value variations for this span +// random - the random number generator to use in generating ID values +// +// The generated span is returned. +func GenerateSpan(traceID data.TraceID, parentID data.SpanID, spanName string, spanInputs *PICTSpanInputs, + random io.Reader) *otlptrace.Span { + endTime := time.Now().Add(-50 * time.Microsecond) + return &otlptrace.Span{ + TraceId: traceID, + SpanId: generateSpanID(random), + TraceState: generateTraceState(spanInputs.Tracestate), + ParentSpanId: parentID, + Name: spanName, + Kind: lookupSpanKind(spanInputs.Kind), + StartTimeUnixNano: uint64(endTime.Add(-215 * time.Millisecond).UnixNano()), + EndTimeUnixNano: uint64(endTime.UnixNano()), + Attributes: generateSpanAttributes(spanInputs.Attributes, spanInputs.Status), + DroppedAttributesCount: 0, + Events: generateSpanEvents(spanInputs.Events), + DroppedEventsCount: 0, + Links: generateSpanLinks(spanInputs.Links, random), + DroppedLinksCount: 0, + Status: generateStatus(spanInputs.Status), + } +} + +func generateTraceState(tracestate PICTInputTracestate) string { + switch tracestate { + case TraceStateOne: + return "lasterror=f39cd56cc44274fd5abd07ef1164246d10ce2955" + case TraceStateFour: + return "err@ck=80ee5638,rate@ck=1.62,rojo=00f067aa0ba902b7,congo=t61rcWkgMzE" + case TraceStateEmpty: + fallthrough + default: + return "" + } +} + +func lookupSpanKind(kind PICTInputKind) otlptrace.Span_SpanKind { + switch kind { + case SpanKindClient: + return otlptrace.Span_SPAN_KIND_CLIENT + case SpanKindServer: + return otlptrace.Span_SPAN_KIND_SERVER + case SpanKindProducer: + return otlptrace.Span_SPAN_KIND_PRODUCER + case SpanKindConsumer: + return otlptrace.Span_SPAN_KIND_CONSUMER + case SpanKindInternal: + return otlptrace.Span_SPAN_KIND_INTERNAL + case SpanKindUnspecified: + fallthrough + default: + return otlptrace.Span_SPAN_KIND_UNSPECIFIED + } +} + +func generateSpanAttributes(spanTypeID PICTInputAttributes, statusStr PICTInputStatus) []otlpcommon.KeyValue { + includeStatus := SpanStatusUnset != statusStr + var attrs map[string]interface{} + switch spanTypeID { + case SpanAttrNil: + attrs = nil + case SpanAttrEmpty: + attrs = make(map[string]interface{}) + case SpanAttrDatabaseSQL: + attrs = generateDatabaseSQLAttributes() + case SpanAttrDatabaseNoSQL: + attrs = generateDatabaseNoSQLAttributes() + case SpanAttrFaaSDatasource: + attrs = generateFaaSDatasourceAttributes() + case SpanAttrFaaSHTTP: + attrs = generateFaaSHTTPAttributes(includeStatus) + case SpanAttrFaaSPubSub: + attrs = generateFaaSPubSubAttributes() + case SpanAttrFaaSTimer: + attrs = generateFaaSTimerAttributes() + case SpanAttrFaaSOther: + attrs = generateFaaSOtherAttributes() + case SpanAttrHTTPClient: + attrs = generateHTTPClientAttributes(includeStatus) + case SpanAttrHTTPServer: + attrs = generateHTTPServerAttributes(includeStatus) + case SpanAttrMessagingProducer: + attrs = generateMessagingProducerAttributes() + case SpanAttrMessagingConsumer: + attrs = generateMessagingConsumerAttributes() + case SpanAttrGRPCClient: + attrs = generateGRPCClientAttributes() + case SpanAttrGRPCServer: + attrs = generateGRPCServerAttributes() + case SpanAttrInternal: + attrs = generateInternalAttributes() + case SpanAttrMaxCount: + attrs = generateMaxCountAttributes(includeStatus) + default: + attrs = generateGRPCClientAttributes() + } + return convertMapToAttributeKeyValues(attrs) +} + +func generateStatus(statusStr PICTInputStatus) otlptrace.Status { + if SpanStatusUnset == statusStr { + return otlptrace.Status{} + } + return otlptrace.Status{ + Code: statusCodeMap[statusStr], + Message: statusMsgMap[statusStr], + } +} + +func generateDatabaseSQLAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeDBSystem] = "mysql" + attrMap[conventions.AttributeDBConnectionString] = "Server=shopdb.example.com;Database=ShopDb;Uid=billing_user;TableCache=true;UseCompression=True;MinimumPoolSize=10;MaximumPoolSize=50;" + attrMap[conventions.AttributeDBUser] = "billing_user" + attrMap[conventions.AttributeNetHostIP] = "192.0.3.122" + attrMap[conventions.AttributeNetHostPort] = int64(51306) + attrMap[conventions.AttributeNetPeerName] = "shopdb.example.com" + attrMap[conventions.AttributeNetPeerIP] = "192.0.2.12" + attrMap[conventions.AttributeNetPeerPort] = int64(3306) + attrMap[conventions.AttributeNetTransport] = "IP.TCP" + attrMap[conventions.AttributeDBName] = "shopdb" + attrMap[conventions.AttributeDBStatement] = "SELECT * FROM orders WHERE order_id = 'o4711'" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateDatabaseNoSQLAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeDBSystem] = "mongodb" + attrMap[conventions.AttributeDBUser] = "the_user" + attrMap[conventions.AttributeNetPeerName] = "mongodb0.example.com" + attrMap[conventions.AttributeNetPeerIP] = "192.0.2.14" + attrMap[conventions.AttributeNetPeerPort] = int64(27017) + attrMap[conventions.AttributeNetTransport] = "IP.TCP" + attrMap[conventions.AttributeDBName] = "shopDb" + attrMap[conventions.AttributeDBOperation] = "findAndModify" + attrMap[conventions.AttributeDBMongoDBCollection] = "products" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateFaaSDatasourceAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeFaaSTrigger] = conventions.FaaSTriggerDataSource + attrMap[conventions.AttributeFaaSExecution] = "DB85AF51-5E13-473D-8454-1E2D59415EAB" + attrMap[conventions.AttributeFaaSDocumentCollection] = "faa-flight-delay-information-incoming" + attrMap[conventions.AttributeFaaSDocumentOperation] = "insert" + attrMap[conventions.AttributeFaaSDocumentTime] = "2020-05-09T19:50:06Z" + attrMap[conventions.AttributeFaaSDocumentName] = "delays-20200509-13.csv" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateFaaSHTTPAttributes(includeStatus bool) map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeFaaSTrigger] = conventions.FaaSTriggerHTTP + attrMap[conventions.AttributeHTTPMethod] = "POST" + attrMap[conventions.AttributeHTTPScheme] = "https" + attrMap[conventions.AttributeHTTPHost] = "api.opentelemetry.io" + attrMap[conventions.AttributeHTTPTarget] = "/blog/posts" + attrMap[conventions.AttributeHTTPFlavor] = "2" + if includeStatus { + attrMap[conventions.AttributeHTTPStatusCode] = int64(201) + } + attrMap[conventions.AttributeHTTPUserAgent] = + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Safari/605.1.15" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateFaaSPubSubAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeFaaSTrigger] = conventions.FaaSTriggerPubSub + attrMap[conventions.AttributeMessagingSystem] = "sqs" + attrMap[conventions.AttributeMessagingDestination] = "video-views-au" + attrMap[conventions.AttributeMessagingOperation] = "process" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateFaaSTimerAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeFaaSTrigger] = conventions.FaaSTriggerTimer + attrMap[conventions.AttributeFaaSExecution] = "73103A4C-E22F-4493-BDE8-EAE5CAB37B50" + attrMap[conventions.AttributeFaaSTime] = "2020-05-09T20:00:08Z" + attrMap[conventions.AttributeFaaSCron] = "0/15 * * * *" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateFaaSOtherAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeFaaSTrigger] = conventions.FaaSTriggerOther + attrMap["processed.count"] = int64(256) + attrMap["processed.data"] = 14.46 + attrMap["processed.errors"] = false + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateHTTPClientAttributes(includeStatus bool) map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeHTTPMethod] = "GET" + attrMap[conventions.AttributeHTTPURL] = "https://opentelemetry.io/registry/" + if includeStatus { + attrMap[conventions.AttributeHTTPStatusCode] = int64(200) + attrMap[conventions.AttributeHTTPStatusText] = "More Than OK" + } + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateHTTPServerAttributes(includeStatus bool) map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeHTTPMethod] = "POST" + attrMap[conventions.AttributeHTTPScheme] = "https" + attrMap[conventions.AttributeHTTPServerName] = "api22.opentelemetry.io" + attrMap[conventions.AttributeNetHostPort] = int64(443) + attrMap[conventions.AttributeHTTPTarget] = "/blog/posts" + attrMap[conventions.AttributeHTTPFlavor] = "2" + if includeStatus { + attrMap[conventions.AttributeHTTPStatusCode] = int64(201) + } + attrMap[conventions.AttributeHTTPUserAgent] = + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" + attrMap[conventions.AttributeHTTPRoute] = "/blog/posts" + attrMap[conventions.AttributeHTTPClientIP] = "2001:506:71f0:16e::1" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateMessagingProducerAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeMessagingSystem] = "nats" + attrMap[conventions.AttributeMessagingDestination] = "time.us.east.atlanta" + attrMap[conventions.AttributeMessagingDestinationKind] = "topic" + attrMap[conventions.AttributeMessagingMessageID] = "AA7C5438-D93A-43C8-9961-55613204648F" + attrMap["messaging.sequence"] = int64(1) + attrMap[conventions.AttributeNetPeerIP] = "10.10.212.33" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateMessagingConsumerAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeMessagingSystem] = "kafka" + attrMap[conventions.AttributeMessagingDestination] = "infrastructure-events-zone1" + attrMap[conventions.AttributeMessagingOperation] = "receive" + attrMap[conventions.AttributeNetPeerIP] = "2600:1700:1f00:11c0:4de0:c223:a800:4e87" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateGRPCClientAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeRPCService] = "PullRequestsService" + attrMap[conventions.AttributeNetPeerIP] = "2600:1700:1f00:11c0:4de0:c223:a800:4e87" + attrMap[conventions.AttributeNetHostPort] = int64(8443) + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateGRPCServerAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeRPCService] = "PullRequestsService" + attrMap[conventions.AttributeNetPeerIP] = "192.168.1.70" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateInternalAttributes() map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap["parameters"] = "account=7310,amount=1817.10" + attrMap[conventions.AttributeEnduserID] = "unittest" + return attrMap +} + +func generateMaxCountAttributes(includeStatus bool) map[string]interface{} { + attrMap := make(map[string]interface{}) + attrMap[conventions.AttributeHTTPMethod] = "POST" + attrMap[conventions.AttributeHTTPScheme] = "https" + attrMap[conventions.AttributeHTTPHost] = "api.opentelemetry.io" + attrMap[conventions.AttributeNetHostName] = "api22.opentelemetry.io" + attrMap[conventions.AttributeNetHostIP] = "2600:1700:1f00:11c0:1ced:afa5:fd88:9d48" + attrMap[conventions.AttributeNetHostPort] = int64(443) + attrMap[conventions.AttributeHTTPTarget] = "/blog/posts" + attrMap[conventions.AttributeHTTPFlavor] = "2" + if includeStatus { + attrMap[conventions.AttributeHTTPStatusCode] = int64(201) + attrMap[conventions.AttributeHTTPStatusText] = "Created" + } + attrMap[conventions.AttributeHTTPUserAgent] = + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" + attrMap[conventions.AttributeHTTPRoute] = "/blog/posts" + attrMap[conventions.AttributeHTTPClientIP] = "2600:1700:1f00:11c0:1ced:afa5:fd77:9d01" + attrMap[conventions.AttributePeerService] = "IdentifyImageService" + attrMap[conventions.AttributeNetPeerIP] = "2600:1700:1f00:11c0:1ced:afa5:fd77:9ddc" + attrMap[conventions.AttributeNetPeerPort] = int64(39111) + attrMap["ai-sampler.weight"] = 0.07 + attrMap["ai-sampler.absolute"] = false + attrMap["ai-sampler.maxhops"] = int64(6) + attrMap["application.create.location"] = "https://api.opentelemetry.io/blog/posts/806673B9-4F4D-4284-9635-3A3E3E3805BE" + stages := make([]otlpcommon.AnyValue, 3) + stages[0] = otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "Launch"}} + stages[1] = otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "Injestion"}} + stages[2] = otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "Validation"}} + attrMap["application.stages"] = &otlpcommon.ArrayValue{ + Values: stages, + } + subMap := make(map[string]interface{}) + subMap["UIx"] = false + subMap["UI4"] = true + subMap["flow-alt3"] = false + attrMap["application.abflags"] = &otlpcommon.KeyValueList{ + Values: convertMapToAttributeKeyValues(subMap), + } + attrMap["application.thread"] = "proc-pool-14" + attrMap["application.session"] = "" + attrMap["application.persist.size"] = int64(1172184) + attrMap["application.queue.size"] = int64(0) + attrMap["application.job.id"] = "0E38800B-9C4C-484E-8F2B-C7864D854321" + attrMap["application.service.sla"] = 0.34 + attrMap["application.service.slo"] = 0.55 + attrMap[conventions.AttributeEnduserID] = "unittest" + attrMap[conventions.AttributeEnduserRole] = "poweruser" + attrMap[conventions.AttributeEnduserScope] = "email profile administrator" + return attrMap +} + +func generateSpanEvents(eventCnt PICTInputSpanChild) []*otlptrace.Span_Event { + if SpanChildCountNil == eventCnt { + return nil + } + listSize := calculateListSize(eventCnt) + eventList := make([]*otlptrace.Span_Event, listSize) + for i := 0; i < listSize; i++ { + eventList[i] = generateSpanEvent(i) + } + return eventList +} + +func generateSpanLinks(linkCnt PICTInputSpanChild, random io.Reader) []*otlptrace.Span_Link { + if SpanChildCountNil == linkCnt { + return nil + } + listSize := calculateListSize(linkCnt) + linkList := make([]*otlptrace.Span_Link, listSize) + for i := 0; i < listSize; i++ { + linkList[i] = generateSpanLink(random, i) + } + return linkList +} + +func calculateListSize(listCnt PICTInputSpanChild) int { + switch listCnt { + case SpanChildCountOne: + return 1 + case SpanChildCountTwo: + return 2 + case SpanChildCountEight: + return 8 + case SpanChildCountEmpty: + fallthrough + default: + return 0 + } +} + +func generateSpanEvent(index int) *otlptrace.Span_Event { + t := time.Now().Add(-75 * time.Microsecond) + return &otlptrace.Span_Event{ + TimeUnixNano: uint64(t.UnixNano()), + Name: "message", + Attributes: generateEventAttributes(index), + DroppedAttributesCount: 0, + } +} + +func generateEventAttributes(index int) []otlpcommon.KeyValue { + if index%4 == 2 { + return nil + } + attrMap := make(map[string]interface{}) + if index%2 == 0 { + attrMap[conventions.AttributeMessageType] = "SENT" + } else { + attrMap[conventions.AttributeMessageType] = "RECEIVED" + } + attrMap[conventions.AttributeMessageID] = int64(index) + attrMap[conventions.AttributeMessageCompressedSize] = int64(17 * index) + attrMap[conventions.AttributeMessageUncompressedSize] = int64(24 * index) + if index%4 == 1 { + attrMap["app.inretry"] = true + attrMap["app.progress"] = 0.6 + attrMap["app.statemap"] = "14|5|202" + } + return convertMapToAttributeKeyValues(attrMap) +} + +func generateSpanLink(random io.Reader, index int) *otlptrace.Span_Link { + return &otlptrace.Span_Link{ + TraceId: generateTraceID(random), + SpanId: generateSpanID(random), + TraceState: "", + Attributes: generateLinkAttributes(index), + DroppedAttributesCount: 0, + } +} + +func generateLinkAttributes(index int) []otlpcommon.KeyValue { + if index%4 == 2 { + return nil + } + attrMap := generateMessagingConsumerAttributes() + if index%4 == 1 { + attrMap["app.inretry"] = true + attrMap["app.progress"] = 0.6 + attrMap["app.statemap"] = "14|5|202" + } + return convertMapToAttributeKeyValues(attrMap) +} diff --git a/internal/otel_collector/internal/goldendataset/span_generator_test.go b/internal/otel_collector/internal/goldendataset/span_generator_test.go new file mode 100644 index 00000000000..f7ebcdce642 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/span_generator_test.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "crypto/rand" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/internal/data" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" +) + +func TestGenerateParentSpan(t *testing.T) { + random := rand.Reader + traceID := generateTraceID(random) + spanInputs := &PICTSpanInputs{ + Parent: SpanParentRoot, + Tracestate: TraceStateEmpty, + Kind: SpanKindServer, + Attributes: SpanAttrHTTPServer, + Events: SpanChildCountTwo, + Links: SpanChildCountOne, + Status: SpanStatusOk, + } + span := GenerateSpan(traceID, data.NewSpanID([8]byte{}), "/gotest-parent", spanInputs, random) + assert.Equal(t, traceID, span.TraceId) + assert.False(t, span.ParentSpanId.IsValid()) + assert.Equal(t, 11, len(span.Attributes)) + assert.Equal(t, otlptrace.Status_STATUS_CODE_OK, span.Status.Code) +} + +func TestGenerateChildSpan(t *testing.T) { + random := rand.Reader + traceID := generateTraceID(random) + parentID := generateSpanID(random) + spanInputs := &PICTSpanInputs{ + Parent: SpanParentChild, + Tracestate: TraceStateEmpty, + Kind: SpanKindClient, + Attributes: SpanAttrDatabaseSQL, + Events: SpanChildCountEmpty, + Links: SpanChildCountNil, + Status: SpanStatusOk, + } + span := GenerateSpan(traceID, parentID, "get_test_info", spanInputs, random) + assert.Equal(t, traceID, span.TraceId) + assert.Equal(t, parentID, span.ParentSpanId) + assert.Equal(t, 12, len(span.Attributes)) + assert.Equal(t, otlptrace.Status_STATUS_CODE_OK, span.Status.Code) +} + +func TestGenerateSpans(t *testing.T) { + random := rand.Reader + count1 := 16 + spans, nextPos, err := GenerateSpans(count1, 0, "testdata/generated_pict_pairs_spans.txt", random) + assert.Nil(t, err) + assert.Equal(t, count1, len(spans)) + count2 := 256 + spans, nextPos, err = GenerateSpans(count2, nextPos, "testdata/generated_pict_pairs_spans.txt", random) + assert.Nil(t, err) + assert.Equal(t, count2, len(spans)) + count3 := 118 + spans, _, err = GenerateSpans(count3, nextPos, "testdata/generated_pict_pairs_spans.txt", random) + assert.Nil(t, err) + assert.Equal(t, count3, len(spans)) +} diff --git a/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_metrics.txt b/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_metrics.txt new file mode 100644 index 00000000000..22395eb4e6d --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_metrics.txt @@ -0,0 +1,26 @@ +NumPtsPerMetric MetricType NumLabels NumResourceAttrs +OnePt IntGauge NoLabels NoAttrs +ManyPts NonMonotonicDoubleSum OneLabel OneAttr +OnePt IntHistogram ManyLabels OneAttr +ManyPts NonMonotonicDoubleSum NoLabels TwoAttrs +OnePt IntGauge OneLabel TwoAttrs +ManyPts NonMonotonicDoubleSum ManyLabels NoAttrs +OnePt DoubleGauge NoLabels OneAttr +ManyPts IntGauge ManyLabels OneAttr +ManyPts NonMonotonicIntSum ManyLabels TwoAttrs +ManyPts IntHistogram OneLabel NoAttrs +ManyPts MonotonicIntSum ManyLabels NoAttrs +OnePt DoubleHistogram OneLabel OneAttr +OnePt NonMonotonicIntSum NoLabels NoAttrs +ManyPts DoubleHistogram NoLabels TwoAttrs +ManyPts MonotonicDoubleSum OneLabel OneAttr +ManyPts DoubleGauge ManyLabels NoAttrs +OnePt MonotonicIntSum NoLabels TwoAttrs +OnePt IntHistogram NoLabels TwoAttrs +OnePt MonotonicDoubleSum ManyLabels NoAttrs +ManyPts DoubleGauge OneLabel TwoAttrs +ManyPts MonotonicIntSum OneLabel OneAttr +OnePt DoubleHistogram ManyLabels NoAttrs +OnePt NonMonotonicIntSum OneLabel OneAttr +OnePt NonMonotonicDoubleSum ManyLabels NoAttrs +ManyPts MonotonicDoubleSum NoLabels TwoAttrs diff --git a/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_spans.txt b/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_spans.txt new file mode 100644 index 00000000000..61d1bae4452 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_spans.txt @@ -0,0 +1,307 @@ +Parent Tracestate Kind Attributes Events Links Status +Child One Consumer FaaSDatasource Empty Nil AlreadyExists +Child Empty Unspecified gRPCClient Two One ResourceExhausted +Child Four Client gRPCClient Eight Eight DataLoss +Root Four Server FaaSHTTP One Empty ResourceExhausted +Child One Server FaaSOther Nil Two Unimplemented +Child One Unspecified HTTPClient Nil Eight InternalError +Root One Producer FaaSPubSub Two Empty Cancelled +Child One Client DatabaseSQL One One PermissionDenied +Child Four Unspecified FaaSTimer Empty Two FailedPrecondition +Child Empty Unspecified MessagingConsumer Eight Nil InvalidArgument +Root Empty Server FaaSTimer Two Eight AlreadyExists +Child One Internal Nil Eight Two ResourceExhausted +Child Empty Unspecified FaaSHTTP Nil Nil DeadlineExceeded +Child Empty Producer MessagingProducer Empty Empty Ok +Root Four Server HTTPServer Nil One Ok +Root Four Producer Empty One Nil OutOfRange +Child Empty Consumer FaaSDatasource One Two Unavailable +Child One Client gRPCClient Nil Empty OutOfRange +Child Empty Internal Internal One Eight FailedPrecondition +Root Empty Server FaaSTimer Eight One OutOfRange +Child Four Consumer FaaSDatasource Empty One Unauthenticated +Child Empty Client HTTPClient Two Nil DataLoss +Child Empty Unspecified FaaSPubSub Empty Two UnknownError +Child Four Client gRPCClient Two Two Ok +Child Four Unspecified HTTPClient Eight Empty Ok +Root One Server FaaSHTTP Empty Eight Aborted +Child One Client DatabaseNoSQL Nil One FailedPrecondition +Child Empty Client HTTPClient Empty One ResourceExhausted +Child Four Internal Nil Nil Empty AlreadyExists +Root Four Producer FaaSPubSub Eight One AlreadyExists +Child Four Client HTTPClient One Two InvalidArgument +Root One Server FaaSTimer Nil Nil UnknownError +Child Empty Unspecified HTTPServer One Two Cancelled +Child Four Server FaaSHTTP Eight One Cancelled +Child Empty Server FaaSTimer Nil Eight ResourceExhausted +Root One Server gRPCServer Nil Eight InvalidArgument +Child Four Unspecified gRPCServer Two Two Nil +Child One Consumer MessagingConsumer Nil Eight ResourceExhausted +Child One Unspecified MessagingProducer Two Nil FailedPrecondition +Child Four Consumer MessagingConsumer Two Empty Unavailable +Child One Producer FaaSPubSub One Nil Ok +Root Four Server MaxCount Empty Nil Cancelled +Root One Server HTTPServer Empty Eight DeadlineExceeded +Child One Consumer MessagingConsumer Empty Two FailedPrecondition +Child Empty Unspecified MaxCount Two One InvalidArgument +Child One Unspecified FaaSHTTP Two Two OutOfRange +Child Four Unspecified DatabaseSQL Eight Two Aborted +Child One Unspecified MaxCount Eight Eight UnknownError +Child Four Unspecified FaaSOther Eight Empty FailedPrecondition +Root One Server HTTPServer Eight Nil Unavailable +Root Empty Server MaxCount Nil Two Ok +Child Empty Consumer MessagingConsumer One One Aborted +Child One Client Empty Eight One Nil +Root Four Producer MessagingProducer Eight Eight PermissionDenied +Child Empty Internal Nil Empty Nil Nil +Child Empty Unspecified DatabaseNoSQL Two Two NotFound +Child Empty Client DatabaseSQL Nil Empty Nil +Child Four Producer FaaSPubSub Nil Eight ResourceExhausted +Child Empty Unspecified FaaSOther Two One DeadlineExceeded +Child Four Consumer FaaSDatasource Eight Empty InternalError +Root Empty Producer Empty Two Two ResourceExhausted +Root Four Server FaaSOther One Eight Nil +Child Four Internal Internal Two Nil PermissionDenied +Child One Client DatabaseSQL Empty Eight FailedPrecondition +Child Four Producer MessagingProducer One One InvalidArgument +Child Four Unspecified DatabaseNoSQL Empty Empty InvalidArgument +Child Four Unspecified DatabaseNoSQL One Nil ResourceExhausted +Child Empty Producer MessagingProducer Nil Nil Aborted +Child Empty Server gRPCServer Empty Empty Aborted +Child One Unspecified DatabaseNoSQL Eight One DataLoss +Root One Producer MessagingProducer Nil Two DataLoss +Root Four Producer FaaSPubSub Empty One FailedPrecondition +Child Four Client DatabaseNoSQL Empty Eight Unavailable +Child Four Consumer Nil One One NotFound +Root One Server Nil Two Eight DataLoss +Child Four Internal Internal Nil One UnknownError +Child One Producer FaaSPubSub Nil One Unavailable +Child Four Client DatabaseNoSQL Two Empty Unimplemented +Child One Unspecified FaaSOther Empty Empty UnknownError +Child One Client gRPCClient Empty Nil Nil +Child One Unspecified Internal Eight Two Nil +Child Four Unspecified FaaSDatasource Two Eight Ok +Child One Unspecified Empty Nil Empty Ok +Child One Consumer FaaSDatasource Empty Eight OutOfRange +Child Empty Consumer MessagingConsumer Eight One Unimplemented +Child Empty Unspecified Nil One Eight Unimplemented +Child Four Client gRPCClient One Nil Unimplemented +Child Empty Unspecified DatabaseSQL Two Nil Ok +Child One Client DatabaseNoSQL Nil Eight Unauthenticated +Child Four Internal Internal One Empty DeadlineExceeded +Child One Unspecified gRPCServer One Nil OutOfRange +Child Empty Unspecified MaxCount One Two AlreadyExists +Root Empty Server FaaSOther Nil Empty PermissionDenied +Child Four Internal Internal Empty Two InvalidArgument +Root Four Producer MessagingProducer Eight Two DeadlineExceeded +Root One Server FaaSOther Eight Nil NotFound +Child Empty Unspecified Nil Two One Unavailable +Child Four Internal Internal Nil Eight Ok +Child Four Producer Empty Empty Eight FailedPrecondition +Child One Server gRPCServer Eight One DeadlineExceeded +Child Four Consumer MessagingConsumer Two Nil Nil +Root Four Server gRPCServer Eight Two FailedPrecondition +Root Four Producer Empty Empty Nil Unavailable +Root Empty Server HTTPServer Two Empty Unauthenticated +Child Empty Unspecified FaaSHTTP One Empty DataLoss +Child Four Client DatabaseNoSQL One Nil DeadlineExceeded +Root One Producer FaaSPubSub Empty Nil Unimplemented +Root Empty Producer MessagingProducer One One InternalError +Child Empty Unspecified FaaSOther Two Empty AlreadyExists +Child Empty Unspecified DatabaseSQL Empty Nil ResourceExhausted +Child Four Unspecified gRPCClient Eight Nil Unauthenticated +Child Four Client HTTPClient Two Nil UnknownError +Child Four Unspecified HTTPServer Empty Two PermissionDenied +Root Four Producer MessagingProducer One Two AlreadyExists +Child One Unspecified HTTPClient Eight Two PermissionDenied +Child Four Consumer Nil Nil Two Ok +Child Empty Internal Internal Nil Empty NotFound +Child Four Unspecified FaaSDatasource Nil Two FailedPrecondition +Root One Server MaxCount Empty Empty InternalError +Child One Consumer Nil One Eight InvalidArgument +Child One Unspecified HTTPClient Empty Nil OutOfRange +Child Four Client HTTPClient Empty One DeadlineExceeded +Child Empty Client DatabaseSQL Nil Eight Cancelled +Child Four Internal Internal Nil Two Cancelled +Child Four Consumer MessagingConsumer Two Nil InternalError +Child Empty Consumer MessagingConsumer Eight Nil OutOfRange +Root Four Producer MessagingProducer Empty Two Unimplemented +Root One Server FaaSTimer One Empty InvalidArgument +Child Empty Client Empty Empty Eight NotFound +Child Four Unspecified FaaSOther Two Two InternalError +Child One Client DatabaseNoSQL One One Ok +Child One Unspecified MessagingConsumer One Empty Ok +Child Four Unspecified FaaSHTTP Two Empty NotFound +Root Empty Server FaaSTimer Empty Eight Unimplemented +Child One Unspecified FaaSPubSub Nil Nil PermissionDenied +Root Empty Server HTTPServer Eight Two InvalidArgument +Child Four Client HTTPClient One Two Unauthenticated +Child Empty Server gRPCServer One Nil InternalError +Root Empty Producer MessagingProducer Empty Eight OutOfRange +Child Four Producer MessagingProducer Eight Nil Nil +Child Empty Consumer FaaSDatasource Eight Empty Unimplemented +Child Empty Unspecified FaaSPubSub Empty Eight DataLoss +Child Four Unspecified MessagingConsumer Empty Empty AlreadyExists +Child Empty Producer FaaSPubSub One One NotFound +Child One Internal Internal Two Nil InternalError +Root Four Server FaaSTimer Nil One NotFound +Child Four Unspecified FaaSOther Nil One Unavailable +Child Empty Unspecified FaaSHTTP One Nil InternalError +Child Empty Unspecified gRPCServer Eight Nil AlreadyExists +Child One Client HTTPClient Nil One Unimplemented +Child One Client HTTPClient Empty Eight NotFound +Child Four Consumer FaaSDatasource One Eight UnknownError +Root Empty Producer MessagingProducer Two Two Unauthenticated +Child Empty Unspecified FaaSDatasource Two One Aborted +Child One Consumer MessagingConsumer Empty Nil DataLoss +Child One Consumer MessagingConsumer Eight One Cancelled +Child Empty Unspecified FaaSDatasource One Two DataLoss +Child Empty Client gRPCClient Empty Eight FailedPrecondition +Child Empty Unspecified Internal Eight Two ResourceExhausted +Child Empty Client gRPCClient One Nil InternalError +Child Empty Consumer Nil Two Nil PermissionDenied +Child Empty Producer FaaSPubSub One Eight OutOfRange +Child One Unspecified gRPCServer One Nil Ok +Child One Consumer FaaSDatasource One Empty DeadlineExceeded +Child One Unspecified FaaSDatasource Nil Eight NotFound +Child Empty Unspecified DatabaseNoSQL Empty Two PermissionDenied +Child One Unspecified FaaSHTTP Empty Empty UnknownError +Child Empty Server HTTPServer Empty One Aborted +Child Empty Unspecified HTTPClient Eight Eight Cancelled +Child Four Producer MessagingProducer One Empty Cancelled +Child Four Server MaxCount One Eight FailedPrecondition +Child Empty Internal Nil One Eight OutOfRange +Child One Unspecified gRPCServer Empty Two Cancelled +Child Four Server HTTPServer Nil Empty AlreadyExists +Child Four Unspecified Empty Two Two InvalidArgument +Root Empty Server HTTPServer Eight Two DataLoss +Child Empty Client gRPCClient Two Two Unavailable +Child Four Unspecified HTTPServer One One Nil +Child One Client gRPCClient Nil Eight DeadlineExceeded +Root One Server FaaSTimer Empty Eight Cancelled +Child Empty Consumer Nil Eight Eight Cancelled +Child Four Server FaaSTimer Eight Nil Ok +Root One Producer Empty Eight Empty UnknownError +Child One Client Empty Eight Nil AlreadyExists +Child Empty Internal Nil Eight Nil Unauthenticated +Child One Internal Nil Nil Eight DeadlineExceeded +Child One Producer Empty Two Two Cancelled +Child One Unspecified FaaSHTTP Eight Nil InvalidArgument +Child Empty Unspecified HTTPClient One One FailedPrecondition +Child One Unspecified HTTPServer Nil Empty ResourceExhausted +Child One Server Nil One Eight InternalError +Child Four Unspecified Empty Eight Nil Unauthenticated +Child Empty Unspecified MessagingConsumer Eight Two NotFound +Child Four Unspecified MaxCount Empty Eight NotFound +Child One Client gRPCClient One Two InvalidArgument +Child Four Unspecified DatabaseSQL Nil Empty InvalidArgument +Child Four Unspecified FaaSOther One Two OutOfRange +Child Empty Unspecified HTTPServer Two Nil FailedPrecondition +Child Empty Consumer FaaSDatasource Two Eight Nil +Child One Server FaaSTimer Nil One Aborted +Child Four Unspecified DatabaseNoSQL Two Empty UnknownError +Child Empty Server MaxCount Nil Nil OutOfRange +Child Four Unspecified FaaSTimer Nil Nil Unavailable +Child One Unspecified FaaSHTTP Eight Eight AlreadyExists +Child Empty Client DatabaseSQL Empty Eight UnknownError +Child One Producer Empty Eight Nil DeadlineExceeded +Child Empty Producer FaaSPubSub Empty One InternalError +Child Empty Unspecified gRPCClient Two One PermissionDenied +Child One Unspecified DatabaseSQL One Eight Unauthenticated +Child Four Client gRPCClient One Empty Cancelled +Child One Server MaxCount Empty Two Unimplemented +Child Empty Server Nil One Eight UnknownError +Root One Server gRPCServer Eight Eight DataLoss +Child Four Unspecified FaaSPubSub Two One Nil +Root One Server gRPCServer Nil Eight Unimplemented +Child One Server FaaSTimer Two Two Nil +Child Four Unspecified gRPCServer Two Eight Unauthenticated +Child Empty Server FaaSOther One Eight Unauthenticated +Child One Unspecified FaaSDatasource One Eight PermissionDenied +Child Empty Server Nil Two Two FailedPrecondition +Child One Unspecified Empty One Nil PermissionDenied +Child Four Internal Internal One Two Unimplemented +Child Empty Unspecified Empty Eight Two DataLoss +Child Empty Unspecified FaaSTimer Two Empty DeadlineExceeded +Child Empty Unspecified FaaSOther One Eight Aborted +Child One Unspecified FaaSOther One Nil ResourceExhausted +Child Empty Unspecified gRPCServer Two Nil PermissionDenied +Child Empty Unspecified MaxCount Eight Eight Aborted +Child One Consumer MessagingConsumer Two Nil Unauthenticated +Child Four Client Empty One One Unimplemented +Child Four Server MaxCount Two Eight PermissionDenied +Child One Unspecified FaaSDatasource Nil Nil ResourceExhausted +Child Empty Unspecified gRPCServer Eight Empty Unavailable +Child One Unspecified HTTPServer Nil One UnknownError +Child Four Internal Internal Nil Eight OutOfRange +Child One Unspecified FaaSOther One Nil Ok +Child Four Client DatabaseSQL Eight Two InternalError +Child Empty Unspecified DatabaseSQL One Eight NotFound +Child Empty Client DatabaseSQL One Nil OutOfRange +Child Four Server FaaSTimer Eight Empty Unauthenticated +Child Four Client DatabaseSQL One Nil AlreadyExists +Child Empty Unspecified HTTPServer Empty One InternalError +Root One Server MaxCount One One Nil +Child Four Unspecified MessagingProducer Two Nil ResourceExhausted +Child Four Client HTTPClient One Two Aborted +Child Empty Client DatabaseNoSQL Two Nil AlreadyExists +Child One Unspecified MaxCount Nil Empty DataLoss +Child One Internal Internal Empty Nil DataLoss +Child One Producer MessagingProducer One Two NotFound +Child One Unspecified FaaSTimer Two Two PermissionDenied +Root One Server FaaSOther Eight Empty Cancelled +Child Empty Client DatabaseSQL Empty One DeadlineExceeded +Child One Unspecified HTTPServer Two Eight Unimplemented +Child Four Client HTTPClient Nil Eight Nil +Root Empty Server MaxCount Nil Nil Unavailable +Child Four Internal Internal One One Aborted +Child One Unspecified FaaSHTTP Empty Nil PermissionDenied +Child One Unspecified FaaSHTTP Nil Two Unimplemented +Child One Unspecified MessagingConsumer Two Two PermissionDenied +Root One Server FaaSOther Nil Nil InvalidArgument +Child Empty Unspecified HTTPClient Empty Eight Unavailable +Child One Unspecified FaaSPubSub Eight Empty Unauthenticated +Child Empty Client gRPCClient Empty Empty AlreadyExists +Child One Unspecified DatabaseNoSQL One Empty InternalError +Root One Server FaaSHTTP One Empty Unauthenticated +Child Empty Server MaxCount Empty Empty ResourceExhausted +Child Four Client DatabaseSQL One Nil Unavailable +Root Four Server gRPCServer Nil Eight ResourceExhausted +Child Empty Internal Internal Nil Empty Unauthenticated +Child Four Unspecified HTTPServer Two Empty NotFound +Child Four Server MaxCount Two Eight Unauthenticated +Child Empty Unspecified MessagingConsumer Empty Two DeadlineExceeded +Child Four Client HTTPClient Two Two AlreadyExists +Child One Unspecified gRPCClient Nil Two NotFound +Child Empty Unspecified FaaSPubSub Nil Nil InvalidArgument +Child One Internal Internal Two Two AlreadyExists +Child Empty Consumer FaaSDatasource One Two InvalidArgument +Child Empty Server FaaSOther Nil Eight DataLoss +Child One Unspecified gRPCClient Nil Empty UnknownError +Child One Server Nil One Empty Aborted +Child Four Unspecified FaaSTimer One Two DataLoss +Child Empty Unspecified FaaSPubSub Empty One Aborted +Child One Unspecified FaaSHTTP Eight One Nil +Child One Client DatabaseSQL Eight Nil DataLoss +Child Empty Server HTTPServer Nil Eight OutOfRange +Child One Client gRPCClient Eight Two Aborted +Child One Unspecified DatabaseNoSQL Two Eight Nil +Child Four Client DatabaseNoSQL Eight Empty Aborted +Child Empty Internal Internal Eight One Unavailable +Child One Unspecified gRPCServer One Eight NotFound +Child Empty Unspecified FaaSHTTP One Two Ok +Child Four Unspecified gRPCServer One Empty UnknownError +Child Four Client DatabaseNoSQL One Nil Cancelled +Child Four Unspecified MessagingProducer Two Empty Unavailable +Child Empty Unspecified Empty Nil Eight Aborted +Child Four Server MaxCount Nil Nil DeadlineExceeded +Child Empty Client DatabaseSQL One Nil Unimplemented +Child Four Unspecified FaaSTimer Two Empty InternalError +Child Empty Unspecified DatabaseNoSQL One Eight OutOfRange +Root One Server FaaSHTTP Empty Empty Unavailable +Child One Unspecified FaaSDatasource Two Empty Cancelled +Child Empty Consumer MessagingConsumer Two One UnknownError +Child Empty Unspecified FaaSHTTP Two One FailedPrecondition +Child One Client Empty Two Nil InternalError +Root One Producer FaaSPubSub Eight Two DeadlineExceeded +Root One Producer MessagingProducer Empty Two UnknownError diff --git a/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_traces.txt b/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_traces.txt new file mode 100644 index 00000000000..6d3647b966d --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/testdata/generated_pict_pairs_traces.txt @@ -0,0 +1,33 @@ +Resource InstrumentationLibrary Spans +VMOnPrem None None +Nil One None +Exec One Several +Exec None All +Nil Two One +Empty Two Several +VMCloud Two All +K8sOnPrem None One +Empty Two None +Nil None Several +K8sOnPrem One None +K8sCloud One All +VMCloud One One +Nil None All +K8sOnPrem Two Several +K8sCloud Two One +Exec Two None +VMOnPrem Two One +K8sCloud None None +Faas One None +Faas Two Several +Exec One One +VMCloud None Several +Faas None All +Empty One One +K8sCloud None Several +VMOnPrem One All +VMOnPrem One Several +K8sOnPrem Two All +VMCloud Two None +Empty None All +Faas One One diff --git a/internal/otel_collector/internal/goldendataset/testdata/pict_input_metrics.txt b/internal/otel_collector/internal/goldendataset/testdata/pict_input_metrics.txt new file mode 100644 index 00000000000..9351db55e01 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/testdata/pict_input_metrics.txt @@ -0,0 +1,4 @@ +NumPtsPerMetric: OnePt, ManyPts +MetricType: DoubleGauge, MonotonicDoubleSum, NonMonotonicDoubleSum, IntGauge, MonotonicIntSum, NonMonotonicIntSum, IntHistogram, DoubleHistogram +NumLabels: NoLabels, OneLabel, ManyLabels +NumResourceAttrs: NoAttrs, OneAttr, TwoAttrs diff --git a/internal/otel_collector/internal/goldendataset/testdata/pict_input_spans.txt b/internal/otel_collector/internal/goldendataset/testdata/pict_input_spans.txt new file mode 100644 index 00000000000..9430ce47e3e --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/testdata/pict_input_spans.txt @@ -0,0 +1,14 @@ +Parent: Root, Child +Tracestate: Empty, One, Four +Kind: Unspecified, Internal, Server, Client, Producer, Consumer +Attributes: Nil, Empty, DatabaseSQL, DatabaseNoSQL, FaaSDatasource, FaaSHTTP, FaaSPubSub, FaaSTimer, FaaSOther, HTTPClient, HTTPServer, MessagingProducer, MessagingConsumer, gRPCClient, gRPCServer, Internal, MaxCount +Events: Nil, Empty, One, Two, Eight +Links: Nil, Empty, One, Two, Eight +Status: Nil, Ok, Cancelled, UnknownError, InvalidArgument, DeadlineExceeded, NotFound, AlreadyExists, PermissionDenied, ResourceExhausted, FailedPrecondition, Aborted, OutOfRange, Unimplemented, InternalError, Unavailable, DataLoss, Unauthenticated + +IF [Parent] = "Root" THEN [Kind] in {"Server", "Producer"}; +IF [Kind] = "Internal" THEN [Attributes] in {"Nil", "Internal"}; +IF [Kind] = "Server" THEN [Attributes] in {"Nil", "FaaSHTTP", "FaaSTimer", "FaaSOther", "HTTPServer", "gRPCServer", "MaxCount"}; +IF [Kind] = "Client" THEN [Attributes] in {"Empty", "DatabaseSQL", "DatabaseNoSQL", "HTTPClient", "gRPCClient"}; +IF [Kind] = "Producer" THEN [Attributes] in {"Empty", "MessagingProducer", "FaaSPubSub"}; +IF [Kind] = "Consumer" THEN [Attributes] in {"Nil", "MessagingConsumer", "FaaSDatasource"}; diff --git a/internal/otel_collector/internal/goldendataset/testdata/pict_input_traces.txt b/internal/otel_collector/internal/goldendataset/testdata/pict_input_traces.txt new file mode 100644 index 00000000000..ea9f40ed8a8 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/testdata/pict_input_traces.txt @@ -0,0 +1,3 @@ +Resource: Nil, Empty, VMOnPrem, VMCloud, K8sOnPrem, K8sCloud, Faas, Exec +InstrumentationLibrary: None, One, Two +Spans: None, One, Several, All diff --git a/internal/otel_collector/internal/goldendataset/traces_generator.go b/internal/otel_collector/internal/goldendataset/traces_generator.go new file mode 100644 index 00000000000..623a127dffb --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/traces_generator.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "fmt" + "io" + + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" +) + +// GenerateResourceSpans generates a slice of OTLP ResourceSpans objects based on the PICT-generated pairwise +// parameters defined in the parameters file specified by the tracePairsFile parameter. The pairs to generate +// spans for for defined in the file specified by the spanPairsFile parameter. The random parameter injects the +// random number generator to use in generating IDs and other random values. +// The slice of ResourceSpans are returned. If an err is returned, the slice elements will be nil. +func GenerateResourceSpans(tracePairsFile string, spanPairsFile string, + random io.Reader) ([]*otlptrace.ResourceSpans, error) { + pairsData, err := loadPictOutputFile(tracePairsFile) + if err != nil { + return nil, err + } + pairsTotal := len(pairsData) - 1 + spans := make([]*otlptrace.ResourceSpans, pairsTotal) + for index, values := range pairsData { + if index == 0 { + continue + } + tracingInputs := &PICTTracingInputs{ + Resource: PICTInputResource(values[TracesColumnResource]), + InstrumentationLibrary: PICTInputInstrumentationLibrary(values[TracesColumnInstrumentationLibrary]), + Spans: PICTInputSpans(values[TracesColumnSpans]), + } + rscSpan, spanErr := GenerateResourceSpan(tracingInputs, spanPairsFile, random) + if spanErr != nil { + err = spanErr + } + spans[index-1] = rscSpan + } + return spans, err +} + +// GenerateResourceSpan generates a single OTLP ResourceSpans populated based on the provided inputs. They are: +// tracingInputs - the pairwise combination of field value variations for this ResourceSpans +// spanPairsFile - the file with the PICT-generated parameter combinations to generate spans for +// random - the random number generator to use in generating ID values +// +// The generated resource spans. If err is not nil, some or all of the resource spans fields will be nil. +func GenerateResourceSpan(tracingInputs *PICTTracingInputs, spanPairsFile string, + random io.Reader) (*otlptrace.ResourceSpans, error) { + libSpans, err := generateLibrarySpansArray(tracingInputs, spanPairsFile, random) + return &otlptrace.ResourceSpans{ + Resource: GenerateResource(tracingInputs.Resource), + InstrumentationLibrarySpans: libSpans, + }, err +} + +func generateLibrarySpansArray(tracingInputs *PICTTracingInputs, spanPairsFile string, + random io.Reader) ([]*otlptrace.InstrumentationLibrarySpans, error) { + var count int + switch tracingInputs.InstrumentationLibrary { + case LibraryNone: + count = 1 + case LibraryOne: + count = 1 + case LibraryTwo: + count = 2 + } + var err error + libSpans := make([]*otlptrace.InstrumentationLibrarySpans, count) + for i := 0; i < count; i++ { + libSpans[i], err = generateLibrarySpans(tracingInputs, i, spanPairsFile, random) + } + return libSpans, err +} + +func generateLibrarySpans(tracingInputs *PICTTracingInputs, index int, spanPairsFile string, + random io.Reader) (*otlptrace.InstrumentationLibrarySpans, error) { + spanCaseCount, err := countTotalSpanCases(spanPairsFile) + if err != nil { + return nil, err + } + var spans []*otlptrace.Span + switch tracingInputs.Spans { + case LibrarySpansNone: + spans = make([]*otlptrace.Span, 0) + case LibrarySpansOne: + spans, _, err = GenerateSpans(1, 0, spanPairsFile, random) + case LibrarySpansSeveral: + spans, _, err = GenerateSpans(spanCaseCount/4, 0, spanPairsFile, random) + case LibrarySpansAll: + spans, _, err = GenerateSpans(spanCaseCount, 0, spanPairsFile, random) + default: + spans, _, err = GenerateSpans(16, 0, spanPairsFile, random) + } + return &otlptrace.InstrumentationLibrarySpans{ + InstrumentationLibrary: generateInstrumentationLibrary(tracingInputs, index), + Spans: spans, + }, err +} + +func countTotalSpanCases(spanPairsFile string) (int, error) { + pairsData, err := loadPictOutputFile(spanPairsFile) + if err != nil { + return 0, err + } + count := len(pairsData) - 1 + return count, err +} + +func generateInstrumentationLibrary(tracingInputs *PICTTracingInputs, index int) otlpcommon.InstrumentationLibrary { + if LibraryNone == tracingInputs.InstrumentationLibrary { + return otlpcommon.InstrumentationLibrary{} + } + nameStr := fmt.Sprintf("%s-%s-%s-%d", tracingInputs.Resource, tracingInputs.InstrumentationLibrary, + tracingInputs.Spans, index) + verStr := "semver:1.1.7" + if index > 0 { + verStr = "" + } + return otlpcommon.InstrumentationLibrary{ + Name: nameStr, + Version: verStr, + } +} diff --git a/internal/otel_collector/internal/goldendataset/traces_generator_test.go b/internal/otel_collector/internal/goldendataset/traces_generator_test.go new file mode 100644 index 00000000000..dbb8a009999 --- /dev/null +++ b/internal/otel_collector/internal/goldendataset/traces_generator_test.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goldendataset + +import ( + "io" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGenerateTraces(t *testing.T) { + random := io.Reader(rand.New(rand.NewSource(42))) + rscSpans, err := GenerateResourceSpans("testdata/generated_pict_pairs_traces.txt", + "testdata/generated_pict_pairs_spans.txt", random) + assert.Nil(t, err) + assert.Equal(t, 32, len(rscSpans)) +} diff --git a/internal/otel_collector/internal/middleware/compression.go b/internal/otel_collector/internal/middleware/compression.go new file mode 100644 index 00000000000..13504e80953 --- /dev/null +++ b/internal/otel_collector/internal/middleware/compression.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "compress/gzip" + "compress/zlib" + "io" + "net/http" +) + +type ErrorHandler func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int) + +type decompressor struct { + errorHandler ErrorHandler +} + +type DecompressorOption func(d *decompressor) + +func WithErrorHandler(e ErrorHandler) DecompressorOption { + return func(d *decompressor) { + d.errorHandler = e + } +} + +// HTTPContentDecompressor is a middleware that offloads the task of handling compressed +// HTTP requests by identifying the compression format in the "Content-Encoding" header and re-writing +// request body so that the handlers further in the chain can work on decompressed data. +// It supports gzip and deflate/zlib compression. +func HTTPContentDecompressor(h http.Handler, opts ...DecompressorOption) http.Handler { + d := &decompressor{} + for _, o := range opts { + o(d) + } + if d.errorHandler == nil { + d.errorHandler = defaultErrorHandler + } + return d.wrap(h) +} + +func (d *decompressor) wrap(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + newBody, err := newBodyReader(r) + if err != nil { + d.errorHandler(w, r, err.Error(), http.StatusBadRequest) + return + } + if newBody != nil { + defer newBody.Close() + // "Content-Encoding" header is removed to avoid decompressing twice + // in case the next handler(s) have implemented a similar mechanism. + r.Header.Del("Content-Encoding") + // "Content-Length" is set to -1 as the size of the decompressed body is unknown. + r.Header.Del("Content-Length") + r.ContentLength = -1 + r.Body = newBody + } + h.ServeHTTP(w, r) + }) +} + +func newBodyReader(r *http.Request) (io.ReadCloser, error) { + switch r.Header.Get("Content-Encoding") { + case "gzip": + gr, err := gzip.NewReader(r.Body) + if err != nil { + return nil, err + } + return gr, nil + case "deflate", "zlib": + zr, err := zlib.NewReader(r.Body) + if err != nil { + return nil, err + } + return zr, nil + } + return nil, nil +} + +// defaultErrorHandler writes the error message in plain text. +func defaultErrorHandler(w http.ResponseWriter, _ *http.Request, errMsg string, statusCode int) { + http.Error(w, errMsg, statusCode) +} diff --git a/internal/otel_collector/internal/middleware/compression_test.go b/internal/otel_collector/internal/middleware/compression_test.go new file mode 100644 index 00000000000..e8770ff4c3e --- /dev/null +++ b/internal/otel_collector/internal/middleware/compression_test.go @@ -0,0 +1,157 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "fmt" + "io/ioutil" + "net" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/testutil" +) + +func TestHTTPContentDecompressionHandler(t *testing.T) { + testBody := []byte("uncompressed_text") + tests := []struct { + name string + encoding string + reqBodyFunc func() (*bytes.Buffer, error) + respCode int + respBody string + }{ + { + name: "NoCompression", + encoding: "", + reqBodyFunc: func() (*bytes.Buffer, error) { + return bytes.NewBuffer(testBody), nil + }, + respCode: 200, + }, + { + name: "ValidGzip", + encoding: "gzip", + reqBodyFunc: func() (*bytes.Buffer, error) { + return compressGzip(testBody) + }, + respCode: 200, + }, + { + name: "ValidZlib", + encoding: "zlib", + reqBodyFunc: func() (*bytes.Buffer, error) { + return compressZlib(testBody) + }, + respCode: 200, + }, + { + name: "InvalidGzip", + encoding: "gzip", + reqBodyFunc: func() (*bytes.Buffer, error) { + return bytes.NewBuffer(testBody), nil + }, + respCode: 400, + respBody: "gzip: invalid header\n", + }, + + { + name: "InvalidZlib", + encoding: "zlib", + reqBodyFunc: func() (*bytes.Buffer, error) { + return bytes.NewBuffer(testBody), nil + }, + respCode: 400, + respBody: "zlib: invalid header\n", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err, "failed to read request body: %v", err) + assert.EqualValues(t, testBody, string(body)) + w.WriteHeader(200) + }) + + addr := testutil.GetAvailableLocalAddress(t) + ln, err := net.Listen("tcp", addr) + require.NoError(t, err, "failed to create listener: %v", err) + srv := &http.Server{ + Handler: HTTPContentDecompressor(handler), + } + go func() { + _ = srv.Serve(ln) + }() + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + serverURL := fmt.Sprintf("http://%s", ln.Addr().String()) + reqBody, err := tt.reqBodyFunc() + require.NoError(t, err, "failed to generate request body: %v", err) + + req, err := http.NewRequest("GET", serverURL, reqBody) + require.NoError(t, err, "failed to create request to test handler") + req.Header.Set("Content-Encoding", tt.encoding) + + client := http.Client{} + res, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, tt.respCode, res.StatusCode, "test handler returned unexpected status code ") + if tt.respBody != "" { + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, res.Body.Close(), "failed to close request body: %v", err) + assert.Equal(t, tt.respBody, string(body)) + } + require.NoError(t, srv.Close()) + }) + } +} + +func compressGzip(body []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + + gw := gzip.NewWriter(&buf) + defer gw.Close() + + _, err := gw.Write(body) + if err != nil { + return nil, err + } + + return &buf, nil +} + +func compressZlib(body []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + + zw := zlib.NewWriter(&buf) + defer zw.Close() + + _, err := zw.Write(body) + if err != nil { + return nil, err + } + + return &buf, nil +} diff --git a/internal/otel_collector/internal/otlp_wrapper.go b/internal/otel_collector/internal/otlp_wrapper.go new file mode 100644 index 00000000000..5b1fa36bd09 --- /dev/null +++ b/internal/otel_collector/internal/otlp_wrapper.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" + +// OtlpLogsWrapper is an intermediary struct that is declared in an internal package +// as a way to prevent certain functions of pdata.Logs data type to be callable by +// any code outside of this module. +type OtlpLogsWrapper struct { + Orig *[]*otlplogs.ResourceLogs +} + +func LogsToOtlp(l OtlpLogsWrapper) []*otlplogs.ResourceLogs { + return *l.Orig +} + +func LogsFromOtlp(logs []*otlplogs.ResourceLogs) OtlpLogsWrapper { + return OtlpLogsWrapper{Orig: &logs} +} diff --git a/internal/otel_collector/internal/processor/filterconfig/config.go b/internal/otel_collector/internal/processor/filterconfig/config.go new file mode 100644 index 00000000000..c18f23beedf --- /dev/null +++ b/internal/otel_collector/internal/processor/filterconfig/config.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterconfig + +import ( + "errors" + + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +// MatchConfig has two optional MatchProperties one to define what is processed +// by the processor, captured under the 'include' and the second, exclude, to +// define what is excluded from the processor. +type MatchConfig struct { + // Include specifies the set of span/log properties that must be present in order + // for this processor to apply to it. + // Note: If `exclude` is specified, the span/log is compared against those + // properties after the `include` properties. + // This is an optional field. If neither `include` and `exclude` are set, all span/logs + // are processed. If `include` is set and `exclude` isn't set, then all + // span/logs matching the properties in this structure are processed. + Include *MatchProperties `mapstructure:"include"` + + // Exclude specifies when this processor will not be applied to the span/logs + // which match the specified properties. + // Note: The `exclude` properties are checked after the `include` properties, + // if they exist, are checked. + // If `include` isn't specified, the `exclude` properties are checked against + // all span/logs. + // This is an optional field. If neither `include` and `exclude` are set, all span/logs + // are processed. If `exclude` is set and `include` isn't set, then all + // span/logs that do no match the properties in this structure are processed. + Exclude *MatchProperties `mapstructure:"exclude"` +} + +// MatchProperties specifies the set of properties in a span/log to match +// against and if the span/log should be included or excluded from the +// processor. At least one of services (spans only), span/log names or +// attributes must be specified. It is supported to have all specified, but +// this requires all of the properties to match for the inclusion/exclusion to +// occur. +// The following are examples of invalid configurations: +// attributes/bad1: +// # This is invalid because include is specified with neither services or +// # attributes. +// include: +// actions: ... +// +// span/bad2: +// exclude: +// # This is invalid because services, span_names and attributes have empty values. +// services: +// span_names: +// attributes: +// actions: ... +// Please refer to processor/attributesprocessor/testdata/config.yaml and +// processor/spanprocessor/testdata/config.yaml for valid configurations. +type MatchProperties struct { + // Config configures the matching patterns used when matching span properties. + filterset.Config `mapstructure:",squash"` + + // Note: For spans, one of Services, SpanNames, Attributes, Resources or Libraries must be specified with a + // non-empty value for a valid configuration. + + // For logs, one of LogNames, Attributes, Resources or Libraries must be specified with a + // non-empty value for a valid configuration. + + // Services specify the list of of items to match service name against. + // A match occurs if the span's service name matches at least one item in this list. + // This is an optional field. + Services []string `mapstructure:"services"` + + // SpanNames specify the list of items to match span name against. + // A match occurs if the span name matches at least one item in this list. + // This is an optional field. + SpanNames []string `mapstructure:"span_names"` + + // LogNames is a list of strings that the LogRecord's name field must match + // against. + LogNames []string `mapstructure:"log_names"` + + // Attributes specifies the list of attributes to match against. + // All of these attributes must match exactly for a match to occur. + // Only match_type=strict is allowed if "attributes" are specified. + // This is an optional field. + Attributes []Attribute `mapstructure:"attributes"` + + // Resources specify the list of items to match the resources against. + // A match occurs if the span's resources matches at least one item in this list. + // This is an optional field. + Resources []Attribute `mapstructure:"resources"` + + // Libraries specify the list of items to match the implementation library against. + // A match occurs if the span's implementation library matches at least one item in this list. + // This is an optional field. + Libraries []InstrumentationLibrary `mapstructure:"libraries"` +} + +func (mp *MatchProperties) ValidateForSpans() error { + if len(mp.LogNames) > 0 { + return errors.New("log_names should not be specified for trace spans") + } + + if len(mp.Services) == 0 && len(mp.SpanNames) == 0 && len(mp.Attributes) == 0 && + len(mp.Libraries) == 0 && len(mp.Resources) == 0 { + return errors.New(`at least one of "services", "span_names", "attributes", "libraries" or "resources" field must be specified`) + } + + return nil +} + +func (mp *MatchProperties) ValidateForLogs() error { + if len(mp.SpanNames) > 0 || len(mp.Services) > 0 { + return errors.New("neither services nor span_names should be specified for log records") + } + + if len(mp.LogNames) == 0 && len(mp.Attributes) == 0 && len(mp.Libraries) == 0 && len(mp.Resources) == 0 { + return errors.New(`at least one of "log_names", "attributes", "libraries" or "resources" field must be specified`) + } + + return nil +} + +// MatchTypeFieldName is the mapstructure field name for MatchProperties.Attributes field. +const AttributesFieldName = "attributes" + +// Attribute specifies the attribute key and optional value to match against. +type Attribute struct { + // Key specifies the attribute key. + Key string `mapstructure:"key"` + + // Values specifies the value to match against. + // If it is not set, any value will match. + Value interface{} `mapstructure:"value"` +} + +// InstrumentationLibrary specifies the instrumentation library and optional version to match against. +type InstrumentationLibrary struct { + Name string `mapstructure:"name"` + // version match + // expected actual match + // nil yes + // nil 1 yes + // yes + // 1 no + // 1 no + // 1 1 yes + Version *string `mapstructure:"version"` +} diff --git a/internal/otel_collector/internal/processor/filterconfig/config_test.go b/internal/otel_collector/internal/processor/filterconfig/config_test.go new file mode 100644 index 00000000000..985f71ae908 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterconfig/config_test.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterconfig diff --git a/internal/otel_collector/internal/processor/filterexpr/matcher.go b/internal/otel_collector/internal/processor/filterexpr/matcher.go new file mode 100644 index 00000000000..8f92c59af3c --- /dev/null +++ b/internal/otel_collector/internal/processor/filterexpr/matcher.go @@ -0,0 +1,172 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterexpr + +import ( + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +type Matcher struct { + program *vm.Program + v vm.VM +} + +type env struct { + MetricName string + // TODO: replace this with GetLabel func(key string) (string,bool) + HasLabel func(key string) bool + Label func(key string) string +} + +func NewMatcher(expression string) (*Matcher, error) { + program, err := expr.Compile(expression) + if err != nil { + return nil, err + } + return &Matcher{program: program, v: vm.VM{}}, nil +} + +func (m *Matcher) MatchMetric(metric pdata.Metric) (bool, error) { + metricName := metric.Name() + switch metric.DataType() { + case pdata.MetricDataTypeIntGauge: + return m.matchIntGauge(metricName, metric.IntGauge()) + case pdata.MetricDataTypeDoubleGauge: + return m.matchDoubleGauge(metricName, metric.DoubleGauge()) + case pdata.MetricDataTypeIntSum: + return m.matchIntSum(metricName, metric.IntSum()) + case pdata.MetricDataTypeDoubleSum: + return m.matchDoubleSum(metricName, metric.DoubleSum()) + case pdata.MetricDataTypeIntHistogram: + return m.matchIntHistogram(metricName, metric.IntHistogram()) + case pdata.MetricDataTypeDoubleHistogram: + return m.matchDoubleHistogram(metricName, metric.DoubleHistogram()) + default: + return false, nil + } +} + +func (m *Matcher) matchIntGauge(metricName string, gauge pdata.IntGauge) (bool, error) { + pts := gauge.DataPoints() + for i := 0; i < pts.Len(); i++ { + matched, err := m.matchEnv(metricName, pts.At(i).LabelsMap()) + if err != nil { + return false, err + } + if matched { + return true, nil + } + } + return false, nil +} + +func (m *Matcher) matchDoubleGauge(metricName string, gauge pdata.DoubleGauge) (bool, error) { + pts := gauge.DataPoints() + for i := 0; i < pts.Len(); i++ { + matched, err := m.matchEnv(metricName, pts.At(i).LabelsMap()) + if err != nil { + return false, err + } + if matched { + return true, nil + } + } + return false, nil +} + +func (m *Matcher) matchDoubleSum(metricName string, sum pdata.DoubleSum) (bool, error) { + pts := sum.DataPoints() + for i := 0; i < pts.Len(); i++ { + matched, err := m.matchEnv(metricName, pts.At(i).LabelsMap()) + if err != nil { + return false, err + } + if matched { + return true, nil + } + } + return false, nil +} + +func (m *Matcher) matchIntSum(metricName string, sum pdata.IntSum) (bool, error) { + pts := sum.DataPoints() + for i := 0; i < pts.Len(); i++ { + matched, err := m.matchEnv(metricName, pts.At(i).LabelsMap()) + if err != nil { + return false, err + } + if matched { + return true, nil + } + } + return false, nil +} + +func (m *Matcher) matchIntHistogram(metricName string, histogram pdata.IntHistogram) (bool, error) { + pts := histogram.DataPoints() + for i := 0; i < pts.Len(); i++ { + matched, err := m.matchEnv(metricName, pts.At(i).LabelsMap()) + if err != nil { + return false, err + } + if matched { + return true, nil + } + } + return false, nil +} + +func (m *Matcher) matchDoubleHistogram(metricName string, histogram pdata.DoubleHistogram) (bool, error) { + pts := histogram.DataPoints() + for i := 0; i < pts.Len(); i++ { + matched, err := m.matchEnv(metricName, pts.At(i).LabelsMap()) + if err != nil { + return false, err + } + if matched { + return true, nil + } + } + return false, nil +} + +func (m *Matcher) matchEnv(metricName string, labelsMap pdata.StringMap) (bool, error) { + return m.match(createEnv(metricName, labelsMap)) +} + +func createEnv(metricName string, labelsMap pdata.StringMap) env { + return env{ + MetricName: metricName, + HasLabel: func(key string) bool { + _, ok := labelsMap.Get(key) + return ok + }, + Label: func(key string) string { + v, _ := labelsMap.Get(key) + return v + }, + } +} + +func (m *Matcher) match(env env) (bool, error) { + result, err := m.v.Run(m.program, env) + if err != nil { + return false, err + } + return result.(bool), nil +} diff --git a/internal/otel_collector/internal/processor/filterexpr/matcher_test.go b/internal/otel_collector/internal/processor/filterexpr/matcher_test.go new file mode 100644 index 00000000000..3eb5829bb45 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterexpr/matcher_test.go @@ -0,0 +1,345 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterexpr + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestCompileExprError(t *testing.T) { + _, err := NewMatcher("") + require.Error(t, err) +} + +func TestRunExprError(t *testing.T) { + matcher, err := NewMatcher("foo") + require.NoError(t, err) + matched, _ := matcher.match(env{}) + require.False(t, matched) +} + +func TestUnknownDataType(t *testing.T) { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(-1) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.False(t, matched) +} + +func TestNilIntGauge(t *testing.T) { + dataType := pdata.MetricDataTypeIntGauge + testNilValue(t, dataType) +} + +func TestNilDoubleGauge(t *testing.T) { + dataType := pdata.MetricDataTypeDoubleGauge + testNilValue(t, dataType) +} + +func TestNilDoubleSum(t *testing.T) { + dataType := pdata.MetricDataTypeDoubleSum + testNilValue(t, dataType) +} + +func TestNilIntSum(t *testing.T) { + dataType := pdata.MetricDataTypeIntSum + testNilValue(t, dataType) +} + +func TestNilIntHistogram(t *testing.T) { + dataType := pdata.MetricDataTypeIntHistogram + testNilValue(t, dataType) +} + +func TestNilDoubleHistogram(t *testing.T) { + dataType := pdata.MetricDataTypeDoubleHistogram + testNilValue(t, dataType) +} + +func testNilValue(t *testing.T, dataType pdata.MetricDataType) { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(dataType) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.False(t, matched) +} + +func TestIntGaugeEmptyDataPoint(t *testing.T) { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(pdata.MetricDataTypeIntGauge) + dps := m.IntGauge().DataPoints() + dps.Resize(1) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.True(t, matched) +} + +func TestDoubleGaugeEmptyDataPoint(t *testing.T) { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(pdata.MetricDataTypeDoubleGauge) + dps := m.DoubleGauge().DataPoints() + dps.Resize(1) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.True(t, matched) +} + +func TestDoubleSumEmptyDataPoint(t *testing.T) { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(pdata.MetricDataTypeDoubleSum) + dps := m.DoubleSum().DataPoints() + dps.Resize(1) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.True(t, matched) +} + +func TestIntSumEmptyDataPoint(t *testing.T) { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(pdata.MetricDataTypeIntSum) + dps := m.IntSum().DataPoints() + dps.Resize(1) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.True(t, matched) +} + +func TestIntHistogramEmptyDataPoint(t *testing.T) { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(pdata.MetricDataTypeIntHistogram) + dps := m.IntHistogram().DataPoints() + dps.Resize(1) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.True(t, matched) +} + +func TestDoubleHistogramEmptyDataPoint(t *testing.T) { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(pdata.MetricDataTypeDoubleHistogram) + dps := m.DoubleHistogram().DataPoints() + dps.Resize(1) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.True(t, matched) +} + +func TestMatchIntGaugeByMetricName(t *testing.T) { + expression := `MetricName == 'my.metric'` + assert.True(t, testMatchIntGauge(t, "my.metric", expression, nil)) +} + +func TestNonMatchIntGaugeByMetricName(t *testing.T) { + expression := `MetricName == 'my.metric'` + assert.False(t, testMatchIntGauge(t, "foo.metric", expression, nil)) +} + +func TestNonMatchIntGaugeDataPointByMetricAndHasLabel(t *testing.T) { + expression := `MetricName == 'my.metric' && HasLabel("foo")` + assert.False(t, testMatchIntGauge(t, "foo.metric", expression, nil)) +} + +func TestMatchIntGaugeDataPointByMetricAndHasLabel(t *testing.T) { + expression := `MetricName == 'my.metric' && HasLabel("foo")` + assert.True(t, testMatchIntGauge(t, "my.metric", expression, map[string]string{"foo": ""})) +} + +func TestMatchIntGaugeDataPointByMetricAndLabelValue(t *testing.T) { + expression := `MetricName == 'my.metric' && Label("foo") == "bar"` + assert.False(t, testMatchIntGauge(t, "my.metric", expression, map[string]string{"foo": ""})) +} + +func TestNonMatchIntGaugeDataPointByMetricAndLabelValue(t *testing.T) { + expression := `MetricName == 'my.metric' && Label("foo") == "bar"` + assert.False(t, testMatchIntGauge(t, "my.metric", expression, map[string]string{"foo": ""})) +} + +func testMatchIntGauge(t *testing.T, metricName, expression string, lbls map[string]string) bool { + matcher, err := NewMatcher(expression) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName(metricName) + m.SetDataType(pdata.MetricDataTypeIntGauge) + dps := m.IntGauge().DataPoints() + dps.Resize(1) + pt := dps.At(0) + if lbls != nil { + pt.LabelsMap().InitFromMap(lbls) + } + match, err := matcher.MatchMetric(m) + assert.NoError(t, err) + return match +} + +func TestMatchIntGaugeDataPointByMetricAndSecondPointLabelValue(t *testing.T) { + matcher, err := NewMatcher( + `MetricName == 'my.metric' && Label("baz") == "glarch"`, + ) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName("my.metric") + m.SetDataType(pdata.MetricDataTypeIntGauge) + dps := m.IntGauge().DataPoints() + dps.Resize(2) + + pt1 := dps.At(0) + pt1.LabelsMap().Insert("foo", "bar") + + pt2 := dps.At(1) + pt2.LabelsMap().Insert("baz", "glarch") + + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + assert.True(t, matched) +} + +func TestMatchDoubleGaugeByMetricName(t *testing.T) { + assert.True(t, testMatchDoubleGauge(t, "my.metric")) +} + +func TestNonMatchDoubleGaugeByMetricName(t *testing.T) { + assert.False(t, testMatchDoubleGauge(t, "foo.metric")) +} + +func testMatchDoubleGauge(t *testing.T, metricName string) bool { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName(metricName) + m.SetDataType(pdata.MetricDataTypeDoubleGauge) + dps := m.DoubleGauge().DataPoints() + pt := pdata.NewDoubleDataPoint() + dps.Append(pt) + match, err := matcher.MatchMetric(m) + assert.NoError(t, err) + return match +} + +func TestMatchDoubleSumByMetricName(t *testing.T) { + assert.True(t, matchDoubleSum(t, "my.metric")) +} + +func TestNonMatchDoubleSumByMetricName(t *testing.T) { + assert.False(t, matchDoubleSum(t, "foo.metric")) +} + +func matchDoubleSum(t *testing.T, metricName string) bool { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName(metricName) + m.SetDataType(pdata.MetricDataTypeDoubleSum) + dps := m.DoubleSum().DataPoints() + pt := pdata.NewDoubleDataPoint() + dps.Append(pt) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + return matched +} + +func TestMatchIntSumByMetricName(t *testing.T) { + assert.True(t, matchIntSum(t, "my.metric")) +} + +func TestNonMatchIntSumByMetricName(t *testing.T) { + assert.False(t, matchIntSum(t, "foo.metric")) +} + +func matchIntSum(t *testing.T, metricName string) bool { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName(metricName) + m.SetDataType(pdata.MetricDataTypeIntSum) + dps := m.IntSum().DataPoints() + pt := pdata.NewIntDataPoint() + dps.Append(pt) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + return matched +} + +func TestMatchIntHistogramByMetricName(t *testing.T) { + assert.True(t, matchIntHistogram(t, "my.metric")) +} + +func TestNonMatchIntHistogramByMetricName(t *testing.T) { + assert.False(t, matchIntHistogram(t, "foo.metric")) +} + +func matchIntHistogram(t *testing.T, metricName string) bool { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName(metricName) + m.SetDataType(pdata.MetricDataTypeIntHistogram) + dps := m.IntHistogram().DataPoints() + pt := pdata.NewIntHistogramDataPoint() + dps.Append(pt) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + return matched +} + +func TestMatchDoubleHistogramByMetricName(t *testing.T) { + assert.True(t, matchDoubleHistogram(t, "my.metric")) +} + +func TestNonMatchDoubleHistogramByMetricName(t *testing.T) { + assert.False(t, matchDoubleHistogram(t, "foo.metric")) +} + +func matchDoubleHistogram(t *testing.T, metricName string) bool { + matcher, err := NewMatcher(`MetricName == 'my.metric'`) + require.NoError(t, err) + m := pdata.NewMetric() + m.SetName(metricName) + m.SetDataType(pdata.MetricDataTypeDoubleHistogram) + dps := m.DoubleHistogram().DataPoints() + pt := pdata.NewDoubleHistogramDataPoint() + dps.Append(pt) + matched, err := matcher.MatchMetric(m) + assert.NoError(t, err) + return matched +} diff --git a/internal/otel_collector/internal/processor/filterhelper/filterhelper.go b/internal/otel_collector/internal/processor/filterhelper/filterhelper.go new file mode 100644 index 00000000000..fca006d36c4 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterhelper/filterhelper.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterhelper + +import ( + "fmt" + + "github.com/spf13/cast" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// NewAttributeValueRaw is used to convert the raw `value` from ActionKeyValue to the supported trace attribute values. +// If error different than nil the return value is invalid. Calling any functions on the invalid value will cause a panic. +func NewAttributeValueRaw(value interface{}) (pdata.AttributeValue, error) { + switch val := value.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return pdata.NewAttributeValueInt(cast.ToInt64(val)), nil + case float32, float64: + return pdata.NewAttributeValueDouble(cast.ToFloat64(val)), nil + case string: + return pdata.NewAttributeValueString(val), nil + case bool: + return pdata.NewAttributeValueBool(val), nil + default: + return pdata.AttributeValue{}, fmt.Errorf("error unsupported value type \"%T\"", value) + } +} diff --git a/internal/otel_collector/internal/processor/filterhelper/filterhelper_test.go b/internal/otel_collector/internal/processor/filterhelper/filterhelper_test.go new file mode 100644 index 00000000000..6d0eeba9886 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterhelper/filterhelper_test.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterhelper + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestHelper_AttributeValue(t *testing.T) { + val, err := NewAttributeValueRaw(uint8(123)) + assert.Equal(t, pdata.NewAttributeValueInt(123), val) + assert.NoError(t, err) + + val, err = NewAttributeValueRaw(uint16(123)) + assert.Equal(t, pdata.NewAttributeValueInt(123), val) + assert.NoError(t, err) + + val, err = NewAttributeValueRaw(int8(123)) + assert.Equal(t, pdata.NewAttributeValueInt(123), val) + assert.NoError(t, err) + + val, err = NewAttributeValueRaw(int16(123)) + assert.Equal(t, pdata.NewAttributeValueInt(123), val) + assert.NoError(t, err) + + val, err = NewAttributeValueRaw(float32(234.129312)) + assert.Equal(t, pdata.NewAttributeValueDouble(float64(float32(234.129312))), val) + assert.NoError(t, err) + + val, err = NewAttributeValueRaw(234.129312) + assert.Equal(t, pdata.NewAttributeValueDouble(234.129312), val) + assert.NoError(t, err) + + val, err = NewAttributeValueRaw(true) + assert.Equal(t, pdata.NewAttributeValueBool(true), val) + assert.NoError(t, err) + + val, err = NewAttributeValueRaw("bob the builder") + assert.Equal(t, pdata.NewAttributeValueString("bob the builder"), val) + assert.NoError(t, err) + + _, err = NewAttributeValueRaw(nil) + assert.Error(t, err) + + _, err = NewAttributeValueRaw(t) + assert.Error(t, err) +} diff --git a/internal/otel_collector/internal/processor/filterlog/filterlog.go b/internal/otel_collector/internal/processor/filterlog/filterlog.go new file mode 100644 index 00000000000..132afaaf1c1 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterlog/filterlog.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterlog + +import ( + "fmt" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filtermatcher" + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +// TODO: Modify Matcher to invoke both the include and exclude properties so +// calling processors will always have the same logic. +// Matcher is an interface that allows matching a log record against a +// configuration of a match. +type Matcher interface { + MatchLogRecord(lr pdata.LogRecord, resource pdata.Resource, library pdata.InstrumentationLibrary) bool +} + +// propertiesMatcher allows matching a log record against various log record properties. +type propertiesMatcher struct { + filtermatcher.PropertiesMatcher + + // log names to compare to. + nameFilters filterset.FilterSet +} + +// NewMatcher creates a LogRecord Matcher that matches based on the given MatchProperties. +func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { + if mp == nil { + return nil, nil + } + + if err := mp.ValidateForLogs(); err != nil { + return nil, err + } + + rm, err := filtermatcher.NewMatcher(mp) + if err != nil { + return nil, err + } + + var nameFS filterset.FilterSet = nil + if len(mp.LogNames) > 0 { + nameFS, err = filterset.CreateFilterSet(mp.LogNames, &mp.Config) + if err != nil { + return nil, fmt.Errorf("error creating log record name filters: %v", err) + } + } + + return &propertiesMatcher{ + PropertiesMatcher: rm, + nameFilters: nameFS, + }, nil +} + +// MatchLogRecord matches a log record to a set of properties. +// There are 3 sets of properties to match against. +// The log record names are matched, if specified. +// The attributes are then checked, if specified. +// At least one of log record names or attributes must be specified. It is +// supported to have more than one of these specified, and all specified must +// evaluate to true for a match to occur. +func (mp *propertiesMatcher) MatchLogRecord(lr pdata.LogRecord, resource pdata.Resource, library pdata.InstrumentationLibrary) bool { + if mp.nameFilters != nil && !mp.nameFilters.Matches(lr.Name()) { + return false + } + + return mp.PropertiesMatcher.Match(lr.Attributes(), resource, library) +} diff --git a/internal/otel_collector/internal/processor/filterlog/filterlog_test.go b/internal/otel_collector/internal/processor/filterlog/filterlog_test.go new file mode 100644 index 00000000000..ef14afed163 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterlog/filterlog_test.go @@ -0,0 +1,183 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterlog + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +func createConfig(matchType filterset.MatchType) *filterset.Config { + return &filterset.Config{ + MatchType: matchType, + } +} + +func TestLogRecord_validateMatchesConfiguration_InvalidConfig(t *testing.T) { + testcases := []struct { + name string + property filterconfig.MatchProperties + errorString string + }{ + { + name: "empty_property", + property: filterconfig.MatchProperties{}, + errorString: "at least one of \"log_names\", \"attributes\", \"libraries\" or \"resources\" field must be specified", + }, + { + name: "empty_log_names_and_attributes", + property: filterconfig.MatchProperties{ + LogNames: []string{}, + }, + errorString: "at least one of \"log_names\", \"attributes\", \"libraries\" or \"resources\" field must be specified", + }, + { + name: "span_properties", + property: filterconfig.MatchProperties{ + SpanNames: []string{"span"}, + }, + errorString: "neither services nor span_names should be specified for log records", + }, + { + name: "invalid_match_type", + property: filterconfig.MatchProperties{ + Config: *createConfig("wrong_match_type"), + LogNames: []string{"abc"}, + }, + errorString: "error creating log record name filters: unrecognized match_type: 'wrong_match_type', valid types are: [regexp strict]", + }, + { + name: "missing_match_type", + property: filterconfig.MatchProperties{ + LogNames: []string{"abc"}, + }, + errorString: "error creating log record name filters: unrecognized match_type: '', valid types are: [regexp strict]", + }, + { + name: "invalid_regexp_pattern", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + LogNames: []string{"["}, + }, + errorString: "error creating log record name filters: error parsing regexp: missing closing ]: `[`", + }, + { + name: "invalid_regexp_pattern2", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + LogNames: []string{"["}, + }, + errorString: "error creating log record name filters: error parsing regexp: missing closing ]: `[`", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + output, err := NewMatcher(&tc.property) + assert.Nil(t, output) + require.NotNil(t, err) + assert.Equal(t, tc.errorString, err.Error()) + }) + } +} + +func TestLogRecord_Matching_False(t *testing.T) { + testcases := []struct { + name string + properties *filterconfig.MatchProperties + }{ + { + name: "log_name_doesnt_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + LogNames: []string{"logNo.*Name"}, + Attributes: []filterconfig.Attribute{}, + }, + }, + + { + name: "log_name_doesnt_match_any", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + LogNames: []string{ + "logNo.*Name", + "non-matching?pattern", + "regular string", + }, + Attributes: []filterconfig.Attribute{}, + }, + }, + } + + lr := pdata.NewLogRecord() + lr.SetName("logName") + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + matcher, err := NewMatcher(tc.properties) + assert.Nil(t, err) + assert.NotNil(t, matcher) + + assert.False(t, matcher.MatchLogRecord(lr, pdata.Resource{}, pdata.InstrumentationLibrary{})) + }) + } +} + +func TestLogRecord_Matching_True(t *testing.T) { + testcases := []struct { + name string + properties *filterconfig.MatchProperties + }{ + { + name: "log_name_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + LogNames: []string{"log.*"}, + Attributes: []filterconfig.Attribute{}, + }, + }, + { + name: "log_name_second_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + LogNames: []string{ + "wrong.*pattern", + "log.*", + "yet another?pattern", + "regularstring", + }, + Attributes: []filterconfig.Attribute{}, + }, + }, + } + + lr := pdata.NewLogRecord() + lr.SetName("logName") + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + mp, err := NewMatcher(tc.properties) + assert.Nil(t, err) + assert.NotNil(t, mp) + + assert.NotNil(t, lr) + assert.True(t, mp.MatchLogRecord(lr, pdata.Resource{}, pdata.InstrumentationLibrary{})) + }) + } +} diff --git a/internal/otel_collector/internal/processor/filtermatcher/attributematcher.go b/internal/otel_collector/internal/processor/filtermatcher/attributematcher.go new file mode 100644 index 00000000000..7f02ec5809c --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermatcher/attributematcher.go @@ -0,0 +1,129 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermatcher + +import ( + "errors" + "fmt" + "strconv" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterhelper" + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +type attributesMatcher []attributeMatcher + +// attributeMatcher is a attribute key/value pair to match to. +type attributeMatcher struct { + Key string + // If both AttributeValue and StringFilter are nil only check for key existence. + AttributeValue *pdata.AttributeValue + // StringFilter is needed to match against a regular expression + StringFilter filterset.FilterSet +} + +var errUnexpectedAttributeType = errors.New("unexpected attribute type") + +func newAttributesMatcher(config filterset.Config, attributes []filterconfig.Attribute) (attributesMatcher, error) { + // Convert attribute values from mp representation to in-memory representation. + var rawAttributes []attributeMatcher + for _, attribute := range attributes { + + if attribute.Key == "" { + return nil, errors.New("can't have empty key in the list of attributes") + } + + entry := attributeMatcher{ + Key: attribute.Key, + } + if attribute.Value != nil { + val, err := filterhelper.NewAttributeValueRaw(attribute.Value) + if err != nil { + return nil, err + } + + if config.MatchType == filterset.Regexp { + if val.Type() != pdata.AttributeValueSTRING { + return nil, fmt.Errorf( + "%s=%s for %q only supports STRING, but found %s", + filterset.MatchTypeFieldName, filterset.Regexp, attribute.Key, val.Type(), + ) + } + + filter, err := filterset.CreateFilterSet([]string{val.StringVal()}, &config) + if err != nil { + return nil, err + } + entry.StringFilter = filter + } else { + entry.AttributeValue = &val + } + } + + rawAttributes = append(rawAttributes, entry) + } + return rawAttributes, nil +} + +// match attributes specification against a span/log. +func (ma attributesMatcher) Match(attrs pdata.AttributeMap) bool { + // If there are no attributes to match against, the span/log matches. + if len(ma) == 0 { + return true + } + + // At this point, it is expected of the span/log to have attributes because of + // len(ma) != 0. This means for spans/logs with no attributes, it does not match. + if attrs.Len() == 0 { + return false + } + + // Check that all expected properties are set. + for _, property := range ma { + attr, exist := attrs.Get(property.Key) + if !exist { + return false + } + + if property.StringFilter != nil { + value, err := attributeStringValue(attr) + if err != nil || !property.StringFilter.Matches(value) { + return false + } + } else if property.AttributeValue != nil { + if !attr.Equal(*property.AttributeValue) { + return false + } + } + } + return true +} + +func attributeStringValue(attr pdata.AttributeValue) (string, error) { + switch attr.Type() { + case pdata.AttributeValueSTRING: + return attr.StringVal(), nil + case pdata.AttributeValueBOOL: + return strconv.FormatBool(attr.BoolVal()), nil + case pdata.AttributeValueDOUBLE: + return strconv.FormatFloat(attr.DoubleVal(), 'f', -1, 64), nil + case pdata.AttributeValueINT: + return strconv.FormatInt(attr.IntVal(), 10), nil + default: + return "", errUnexpectedAttributeType + } +} diff --git a/internal/otel_collector/internal/processor/filtermatcher/filtermatcher.go b/internal/otel_collector/internal/processor/filtermatcher/filtermatcher.go new file mode 100644 index 00000000000..5edba8f49fe --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermatcher/filtermatcher.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermatcher + +import ( + "fmt" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +type instrumentationLibraryMatcher struct { + Name filterset.FilterSet + Version filterset.FilterSet +} + +// propertiesMatcher allows matching a span against various span properties. +type PropertiesMatcher struct { + // Instrumentation libraries to compare against + libraries []instrumentationLibraryMatcher + + // The attribute values are stored in the internal format. + attributes attributesMatcher + + // The attribute values are stored in the internal format. + resources attributesMatcher +} + +// NewMatcher creates a span Matcher that matches based on the given MatchProperties. +func NewMatcher(mp *filterconfig.MatchProperties) (PropertiesMatcher, error) { + var lm []instrumentationLibraryMatcher + for _, library := range mp.Libraries { + name, err := filterset.CreateFilterSet([]string{library.Name}, &mp.Config) + if err != nil { + return PropertiesMatcher{}, fmt.Errorf("error creating library name filters: %v", err) + } + + var version filterset.FilterSet + if library.Version != nil { + filter, err := filterset.CreateFilterSet([]string{*library.Version}, &mp.Config) + if err != nil { + return PropertiesMatcher{}, fmt.Errorf("error creating library version filters: %v", err) + } + version = filter + } + + lm = append(lm, instrumentationLibraryMatcher{Name: name, Version: version}) + } + + var err error + var am attributesMatcher + if len(mp.Attributes) > 0 { + am, err = newAttributesMatcher(mp.Config, mp.Attributes) + if err != nil { + return PropertiesMatcher{}, fmt.Errorf("error creating attribute filters: %v", err) + } + } + + var rm attributesMatcher + if len(mp.Resources) > 0 { + rm, err = newAttributesMatcher(mp.Config, mp.Resources) + if err != nil { + return PropertiesMatcher{}, fmt.Errorf("error creating resource filters: %v", err) + } + } + + return PropertiesMatcher{ + libraries: lm, + attributes: am, + resources: rm, + }, nil +} + +// Match matches a span or log to a set of properties. +func (mp *PropertiesMatcher) Match(attributes pdata.AttributeMap, resource pdata.Resource, library pdata.InstrumentationLibrary) bool { + for _, matcher := range mp.libraries { + if !matcher.Name.Matches(library.Name()) { + return false + } + if matcher.Version != nil && !matcher.Version.Matches(library.Version()) { + return false + } + } + + if mp.resources != nil && !mp.resources.Match(resource.Attributes()) { + return false + } + + return mp.attributes.Match(attributes) +} diff --git a/internal/otel_collector/internal/processor/filtermatcher/filtermatcher_test.go b/internal/otel_collector/internal/processor/filtermatcher/filtermatcher_test.go new file mode 100644 index 00000000000..31102381d71 --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermatcher/filtermatcher_test.go @@ -0,0 +1,394 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermatcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/translator/conventions" +) + +func createConfig(matchType filterset.MatchType) *filterset.Config { + return &filterset.Config{ + MatchType: matchType, + } +} + +func Test_validateMatchesConfiguration_InvalidConfig(t *testing.T) { + version := "[" + testcases := []struct { + name string + property filterconfig.MatchProperties + errorString string + }{ + { + name: "regexp_match_type_for_int_attribute", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Attributes: []filterconfig.Attribute{ + {Key: "key", Value: 1}, + }, + }, + errorString: `error creating attribute filters: match_type=regexp for "key" only supports STRING, but found INT`, + }, + { + name: "unknown_attribute_value", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Attributes: []filterconfig.Attribute{ + {Key: "key", Value: []string{}}, + }, + }, + errorString: `error creating attribute filters: error unsupported value type "[]string"`, + }, + { + name: "invalid_regexp_pattern_attribute", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Attributes: []filterconfig.Attribute{{Key: "key", Value: "["}}, + }, + errorString: "error creating attribute filters: error parsing regexp: missing closing ]: `[`", + }, + { + name: "invalid_regexp_pattern_resource", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Resources: []filterconfig.Attribute{{Key: "key", Value: "["}}, + }, + errorString: "error creating resource filters: error parsing regexp: missing closing ]: `[`", + }, + { + name: "invalid_regexp_pattern_library_name", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Libraries: []filterconfig.InstrumentationLibrary{{Name: "["}}, + }, + errorString: "error creating library name filters: error parsing regexp: missing closing ]: `[`", + }, + { + name: "invalid_regexp_pattern_library_version", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Libraries: []filterconfig.InstrumentationLibrary{{Name: "lib", Version: &version}}, + }, + errorString: "error creating library version filters: error parsing regexp: missing closing ]: `[`", + }, + { + name: "empty_key_name_in_attributes_list", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{"a"}, + Attributes: []filterconfig.Attribute{ + { + Key: "", + }, + }, + }, + errorString: "error creating attribute filters: can't have empty key in the list of attributes", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + output, err := NewMatcher(&tc.property) + assert.Zero(t, output) + assert.EqualError(t, err, tc.errorString) + }) + } +} + +func Test_Matching_False(t *testing.T) { + version := "wrong" + testcases := []struct { + name string + properties *filterconfig.MatchProperties + }{ + { + name: "wrong_library_name", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{}, + Libraries: []filterconfig.InstrumentationLibrary{{Name: "wrong"}}, + }, + }, + { + name: "wrong_library_version", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{}, + Libraries: []filterconfig.InstrumentationLibrary{{Name: "lib", Version: &version}}, + }, + }, + + { + name: "wrong_attribute_value", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{}, + Attributes: []filterconfig.Attribute{ + { + Key: "keyInt", + Value: 1234, + }, + }, + }, + }, + { + name: "wrong_resource_value", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{}, + Resources: []filterconfig.Attribute{ + { + Key: "keyInt", + Value: 1234, + }, + }, + }, + }, + { + name: "incompatible_attribute_value", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{}, + Attributes: []filterconfig.Attribute{ + { + Key: "keyInt", + Value: "123", + }, + }, + }, + }, + { + name: "unsupported_attribute_value", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Services: []string{}, + Attributes: []filterconfig.Attribute{ + { + Key: "keyMap", + Value: "123", + }, + }, + }, + }, + { + name: "property_key_does_not_exist", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{}, + Attributes: []filterconfig.Attribute{ + { + Key: "doesnotexist", + Value: nil, + }, + }, + }, + }, + } + + atts := pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + "keyInt": pdata.NewAttributeValueInt(123), + "keyMap": pdata.NewAttributeValueMap(), + }) + + library := pdata.NewInstrumentationLibrary() + library.SetName("lib") + library.SetVersion("ver") + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + matcher, err := NewMatcher(tc.properties) + require.NoError(t, err) + assert.NotNil(t, matcher) + + assert.False(t, matcher.Match(atts, resource("wrongSvc"), library)) + }) + } +} + +func Test_MatchingCornerCases(t *testing.T) { + cfg := &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Attributes: []filterconfig.Attribute{ + { + Key: "keyOne", + Value: nil, + }, + }, + } + + mp, err := NewMatcher(cfg) + assert.Nil(t, err) + assert.NotNil(t, mp) + + assert.False(t, mp.Match(pdata.NewAttributeMap(), resource("svcA"), pdata.NewInstrumentationLibrary())) +} + +func Test_Matching_True(t *testing.T) { + ver := "v.*" + + testcases := []struct { + name string + properties *filterconfig.MatchProperties + }{ + { + name: "library_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Libraries: []filterconfig.InstrumentationLibrary{{Name: "li.*"}}, + Attributes: []filterconfig.Attribute{}, + }, + }, + { + name: "library_match_with_version", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Libraries: []filterconfig.InstrumentationLibrary{{Name: "li.*", Version: &ver}}, + Attributes: []filterconfig.Attribute{}, + }, + }, + { + name: "attribute_exact_value_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{}, + Attributes: []filterconfig.Attribute{ + { + Key: "keyString", + Value: "arithmetic", + }, + { + Key: "keyInt", + Value: 123, + }, + { + Key: "keyDouble", + Value: 3245.6, + }, + { + Key: "keyBool", + Value: true, + }, + }, + }, + }, + { + name: "attribute_regex_value_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Attributes: []filterconfig.Attribute{ + { + Key: "keyString", + Value: "arith.*", + }, + { + Key: "keyInt", + Value: "12.*", + }, + { + Key: "keyDouble", + Value: "324.*", + }, + { + Key: "keyBool", + Value: "tr.*", + }, + }, + }, + }, + { + name: "resource_exact_value_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Resources: []filterconfig.Attribute{ + { + Key: "resString", + Value: "arithmetic", + }, + }, + }, + }, + { + name: "property_exists", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{"svcA"}, + Attributes: []filterconfig.Attribute{ + { + Key: "keyExists", + Value: nil, + }, + }, + }, + }, + { + name: "match_all_settings_exists", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{"svcA"}, + Attributes: []filterconfig.Attribute{ + { + Key: "keyExists", + Value: nil, + }, + { + Key: "keyString", + Value: "arithmetic", + }, + }, + }, + }, + } + + atts := pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + "keyString": pdata.NewAttributeValueString("arithmetic"), + "keyInt": pdata.NewAttributeValueInt(123), + "keyDouble": pdata.NewAttributeValueDouble(3245.6), + "keyBool": pdata.NewAttributeValueBool(true), + "keyExists": pdata.NewAttributeValueString("present"), + }) + + resource := pdata.NewResource() + resource.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + conventions.AttributeServiceName: pdata.NewAttributeValueString("svcA"), + "resString": pdata.NewAttributeValueString("arithmetic"), + }) + + library := pdata.NewInstrumentationLibrary() + library.SetName("lib") + library.SetVersion("ver") + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + mp, err := NewMatcher(tc.properties) + require.NoError(t, err) + assert.NotNil(t, mp) + + assert.True(t, mp.Match(atts, resource, library)) + }) + } +} + +func resource(service string) pdata.Resource { + r := pdata.NewResource() + r.Attributes().InitFromMap(map[string]pdata.AttributeValue{conventions.AttributeServiceName: pdata.NewAttributeValueString(service)}) + return r +} diff --git a/internal/otel_collector/internal/processor/filtermetric/config.go b/internal/otel_collector/internal/processor/filtermetric/config.go new file mode 100644 index 00000000000..5e164a73a0d --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/config.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermetric + +import ( + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/internal/processor/filterset/regexp" +) + +// MatchType specifies the strategy for matching against `pdata.Metric`s. This +// is distinct from filterset.MatchType which matches against metric (and +// tracing) names only. To support matching against metric names and +// `pdata.Metric`s, filtermetric.MatchType is effectively a superset of +// filterset.MatchType. +type MatchType string + +// These are the MatchTypes that users can specify for filtering +// `pdata.Metric`s. +const ( + Regexp = MatchType(filterset.Regexp) + Strict = MatchType(filterset.Strict) + Expr MatchType = "expr" +) + +// MatchProperties specifies the set of properties in a metric to match against and the +// type of string pattern matching to use. +type MatchProperties struct { + // MatchType specifies the type of matching desired + MatchType MatchType `mapstructure:"match_type"` + // RegexpConfig specifies options for the Regexp match type + RegexpConfig *regexp.Config `mapstructure:"regexp"` + + // MetricNames specifies the list of string patterns to match metric names against. + // A match occurs if the metric name matches at least one string pattern in this list. + MetricNames []string `mapstructure:"metric_names"` + + // Expressions specifies the list of expr expressions to match metrics against. + // A match occurs if any datapoint in a metric matches at least one expression in this list. + Expressions []string `mapstructure:"expressions"` +} diff --git a/internal/otel_collector/internal/processor/filtermetric/config_test.go b/internal/otel_collector/internal/processor/filtermetric/config_test.go new file mode 100644 index 00000000000..2c28d5d5753 --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/config_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermetric + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/internal/processor/filterset/regexp" +) + +var ( + // regexpNameMatches matches the metrics names specified in testdata/config.yaml + regexpNameMatches = []string{ + "prefix/.*", + ".*contains.*", + ".*_suffix", + "full_name_match", + } + + strictNameMatches = []string{ + "exact_string_match", + } +) + +func createConfigWithRegexpOptions(filters []string, rCfg *regexp.Config) *MatchProperties { + cfg := createConfig(filters, filterset.Regexp) + cfg.RegexpConfig = rCfg + return cfg +} + +func TestConfig(t *testing.T) { + testFile := path.Join(".", "testdata", "config.yaml") + v := configtest.NewViperFromYamlFile(t, testFile) + + testYamls := map[string]MatchProperties{} + require.NoErrorf(t, v.UnmarshalExact(&testYamls), "unable to unmarshal yaml from file %v", testFile) + + tests := []struct { + name string + expCfg *MatchProperties + }{ + { + name: "config/regexp", + expCfg: createConfig(regexpNameMatches, filterset.Regexp), + }, { + name: "config/regexpoptions", + expCfg: createConfigWithRegexpOptions( + regexpNameMatches, + ®exp.Config{ + CacheEnabled: true, + CacheMaxNumEntries: 5, + }, + ), + }, { + name: "config/strict", + expCfg: createConfig(strictNameMatches, filterset.Strict), + }, { + name: "config/emptyproperties", + expCfg: createConfig(nil, filterset.Regexp), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg := testYamls[test.name] + assert.Equal(t, *test.expCfg, cfg) + + matcher, err := NewMatcher(&cfg) + assert.NotNil(t, matcher) + assert.NoError(t, err) + }) + } +} diff --git a/internal/otel_collector/internal/processor/filtermetric/doc.go b/internal/otel_collector/internal/processor/filtermetric/doc.go new file mode 100644 index 00000000000..29a0f50b1c8 --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package filtermetric is a helper package for processing metrics. +package filtermetric diff --git a/internal/otel_collector/internal/processor/filtermetric/expr_matcher.go b/internal/otel_collector/internal/processor/filtermetric/expr_matcher.go new file mode 100644 index 00000000000..50352293d88 --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/expr_matcher.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermetric + +import ( + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterexpr" +) + +type exprMatcher struct { + matchers []*filterexpr.Matcher +} + +func newExprMatcher(expressions []string) (*exprMatcher, error) { + m := &exprMatcher{} + for _, expression := range expressions { + matcher, err := filterexpr.NewMatcher(expression) + if err != nil { + return nil, err + } + m.matchers = append(m.matchers, matcher) + } + return m, nil +} + +func (m *exprMatcher) MatchMetric(metric pdata.Metric) (bool, error) { + for _, matcher := range m.matchers { + matched, err := matcher.MatchMetric(metric) + if err != nil { + return false, err + } + if matched { + return true, nil + } + } + return false, nil +} diff --git a/internal/otel_collector/internal/processor/filtermetric/filtermetric.go b/internal/otel_collector/internal/processor/filtermetric/filtermetric.go new file mode 100644 index 00000000000..ec9c176ccf7 --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/filtermetric.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermetric + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +type Matcher interface { + MatchMetric(metric pdata.Metric) (bool, error) +} + +// NewMatcher constructs a metric Matcher. If an 'expr' match type is specified, +// returns an expr matcher, otherwise a name matcher. +func NewMatcher(config *MatchProperties) (Matcher, error) { + if config.MatchType == Expr { + return newExprMatcher(config.Expressions) + } + return newNameMatcher(config) +} diff --git a/internal/otel_collector/internal/processor/filtermetric/filtermetric_test.go b/internal/otel_collector/internal/processor/filtermetric/filtermetric_test.go new file mode 100644 index 00000000000..cdc82af1765 --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/filtermetric_test.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermetric + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +var ( + regexpFilters = []string{ + "prefix/.*", + "prefix_.*", + ".*/suffix", + ".*_suffix", + ".*/contains/.*", + ".*_contains_.*", + "full/name/match", + "full_name_match", + } + + strictFilters = []string{ + "exact_string_match", + ".*/suffix", + "(a|b)", + } +) + +func createMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + return metric +} + +func TestMatcherMatches(t *testing.T) { + tests := []struct { + name string + cfg *MatchProperties + metric pdata.Metric + shouldMatch bool + }{ + { + name: "regexpNameMatch", + cfg: createConfig(regexpFilters, filterset.Regexp), + metric: createMetric("test/match/suffix"), + shouldMatch: true, + }, { + name: "regexpNameMisatch", + cfg: createConfig(regexpFilters, filterset.Regexp), + metric: createMetric("test/match/wrongsuffix"), + shouldMatch: false, + }, { + name: "strictNameMatch", + cfg: createConfig(strictFilters, filterset.Strict), + metric: createMetric("exact_string_match"), + shouldMatch: true, + }, { + name: "strictNameMismatch", + cfg: createConfig(regexpFilters, filterset.Regexp), + metric: createMetric("wrong_string_match"), + shouldMatch: false, + }, { + name: "matcherWithNoPropertyFilters", + cfg: createConfig([]string{}, filterset.Strict), + metric: createMetric("metric"), + shouldMatch: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + matcher, err := NewMatcher(test.cfg) + assert.NotNil(t, matcher) + assert.NoError(t, err) + + matches, err := matcher.MatchMetric(test.metric) + assert.NoError(t, err) + assert.Equal(t, test.shouldMatch, matches) + }) + } +} diff --git a/internal/otel_collector/internal/processor/filtermetric/helpers_test.go b/internal/otel_collector/internal/processor/filtermetric/helpers_test.go new file mode 100644 index 00000000000..dc26b89724a --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/helpers_test.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermetric + +import ( + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +func createConfig(filters []string, matchType filterset.MatchType) *MatchProperties { + return &MatchProperties{ + MatchType: MatchType(matchType), + MetricNames: filters, + } +} diff --git a/internal/otel_collector/internal/processor/filtermetric/name_matcher.go b/internal/otel_collector/internal/processor/filtermetric/name_matcher.go new file mode 100644 index 00000000000..41b99407827 --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/name_matcher.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filtermetric + +import ( + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +// nameMatcher matches metrics by metric properties against prespecified values for each property. +type nameMatcher struct { + nameFilters filterset.FilterSet +} + +func newNameMatcher(config *MatchProperties) (*nameMatcher, error) { + nameFS, err := filterset.CreateFilterSet( + config.MetricNames, + &filterset.Config{ + MatchType: filterset.MatchType(config.MatchType), + RegexpConfig: config.RegexpConfig, + }, + ) + if err != nil { + return nil, err + } + return &nameMatcher{ + nameFilters: nameFS, + }, nil +} + +// MatchMetric matches a metric using the metric properties configured on the nameMatcher. +// A metric only matches if every metric property configured on the nameMatcher is a match. +func (m *nameMatcher) MatchMetric(metric pdata.Metric) (bool, error) { + return m.nameFilters.Matches(metric.Name()), nil +} diff --git a/internal/otel_collector/internal/processor/filtermetric/testdata/config.yaml b/internal/otel_collector/internal/processor/filtermetric/testdata/config.yaml new file mode 100644 index 00000000000..c3a058d58d1 --- /dev/null +++ b/internal/otel_collector/internal/processor/filtermetric/testdata/config.yaml @@ -0,0 +1,24 @@ +# Yaml form of the configuration for matching metrics +# This configuration can be embedded into other component's yamls +# The top level here are just test names and do not represent part of the actual configuration. + +config/regexp: + match_type: regexp + metric_names: [prefix/.*, .*contains.*, .*_suffix, full_name_match] +config/regexpoptions: + match_type: regexp + regexp: + cacheenabled: true + cachemaxnumentries: 5 + metric_names: + - prefix/.* + - .*contains.* + - .*_suffix + - full_name_match +config/strict: + match_type: strict + metric_names: + - exact_string_match +config/emptyproperties: + match_type: regexp + metric_names: \ No newline at end of file diff --git a/internal/otel_collector/internal/processor/filterset/config.go b/internal/otel_collector/internal/processor/filterset/config.go new file mode 100644 index 00000000000..df131f75397 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/config.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterset + +import ( + "fmt" + + "go.opentelemetry.io/collector/internal/processor/filterset/regexp" + "go.opentelemetry.io/collector/internal/processor/filterset/strict" +) + +// MatchType describes the type of pattern matching a FilterSet uses to filter strings. +type MatchType string + +const ( + // Regexp is the FilterType for filtering by regexp string matches. + Regexp MatchType = "regexp" + // Strict is the FilterType for filtering by exact string matches. + Strict MatchType = "strict" + // MatchTypeFieldName is the mapstructure field name for MatchType field. + MatchTypeFieldName = "match_type" +) + +var ( + validMatchTypes = []MatchType{Regexp, Strict} +) + +// Config configures the matching behavior of a FilterSet. +type Config struct { + MatchType MatchType `mapstructure:"match_type"` + RegexpConfig *regexp.Config `mapstructure:"regexp"` +} + +// CreateFilterSet creates a FilterSet from yaml config. +func CreateFilterSet(filters []string, cfg *Config) (FilterSet, error) { + switch cfg.MatchType { + case Regexp: + return regexp.NewFilterSet(filters, cfg.RegexpConfig) + case Strict: + // Strict FilterSets do not have any extra configuration options, so call the constructor directly. + return strict.NewFilterSet(filters) + default: + return nil, fmt.Errorf("unrecognized %v: '%v', valid types are: %v", MatchTypeFieldName, cfg.MatchType, validMatchTypes) + } +} diff --git a/internal/otel_collector/internal/processor/filterset/config_test.go b/internal/otel_collector/internal/processor/filterset/config_test.go new file mode 100644 index 00000000000..0ce9f7c43ed --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/config_test.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterset + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/internal/processor/filterset/regexp" +) + +func readTestdataConfigYamls(t *testing.T, filename string) map[string]*Config { + testFile := path.Join(".", "testdata", filename) + v := configtest.NewViperFromYamlFile(t, testFile) + + cfgs := map[string]*Config{} + require.NoErrorf(t, v.UnmarshalExact(&cfgs), "unable to unmarshal yaml from file %v", testFile) + return cfgs +} + +func TestConfig(t *testing.T) { + actualConfigs := readTestdataConfigYamls(t, "config.yaml") + expectedConfigs := map[string]*Config{ + "regexp/default": { + MatchType: Regexp, + }, + "regexp/emptyoptions": { + MatchType: Regexp, + }, + "regexp/withoptions": { + MatchType: Regexp, + RegexpConfig: ®exp.Config{ + CacheEnabled: false, + CacheMaxNumEntries: 10, + }, + }, + "strict/default": { + MatchType: Strict, + }, + } + + for testName, actualCfg := range actualConfigs { + t.Run(testName, func(t *testing.T) { + expCfg, ok := expectedConfigs[testName] + assert.True(t, ok) + assert.Equal(t, expCfg, actualCfg) + + fs, err := CreateFilterSet([]string{}, actualCfg) + assert.NoError(t, err) + assert.NotNil(t, fs) + }) + } +} + +func TestConfigInvalid(t *testing.T) { + actualConfigs := readTestdataConfigYamls(t, "config_invalid.yaml") + expectedConfigs := map[string]*Config{ + "invalid/matchtype": { + MatchType: "invalid", + }, + } + + for testName, actualCfg := range actualConfigs { + t.Run(testName, func(t *testing.T) { + expCfg, ok := expectedConfigs[testName] + assert.True(t, ok) + assert.Equal(t, expCfg, actualCfg) + + fs, err := CreateFilterSet([]string{}, actualCfg) + assert.NotNil(t, err) + assert.Nil(t, fs) + }) + } +} diff --git a/internal/otel_collector/internal/processor/filterset/doc.go b/internal/otel_collector/internal/processor/filterset/doc.go new file mode 100644 index 00000000000..6dc697bc0cb --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package filterset provides an interface for matching strings against a set of string filters. +package filterset diff --git a/internal/otel_collector/internal/processor/filterset/filterset.go b/internal/otel_collector/internal/processor/filterset/filterset.go new file mode 100644 index 00000000000..137c5e5746b --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/filterset.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterset + +// FilterSet is an interface for matching strings against a set of filters. +type FilterSet interface { + // Matches returns true if the given string matches at least one + // of the filters encapsulated by the FilterSet. + Matches(string) bool +} diff --git a/internal/otel_collector/internal/processor/filterset/regexp/config.go b/internal/otel_collector/internal/processor/filterset/regexp/config.go new file mode 100644 index 00000000000..93839162764 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/regexp/config.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package regexp + +// RegexpConfig represents the options for a NewFilterSet. +type Config struct { + // CacheEnabled determines whether match results are LRU cached to make subsequent matches faster. + // Cache size is unlimited unless CacheMaxNumEntries is also specified. + CacheEnabled bool `mapstructure:"cacheenabled"` + // CacheMaxNumEntries is the max number of entries of the LRU cache that stores match results. + // CacheMaxNumEntries is ignored if CacheEnabled is false. + CacheMaxNumEntries int `mapstructure:"cachemaxnumentries"` +} diff --git a/internal/otel_collector/internal/processor/filterset/regexp/config_test.go b/internal/otel_collector/internal/processor/filterset/regexp/config_test.go new file mode 100644 index 00000000000..00465d742b5 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/regexp/config_test.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package regexp + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/config/configtest" +) + +func TestConfig(t *testing.T) { + testFile := path.Join(".", "testdata", "config.yaml") + v := configtest.NewViperFromYamlFile(t, testFile) + + actualConfigs := map[string]*Config{} + require.NoErrorf(t, v.UnmarshalExact(&actualConfigs), "unable to unmarshal yaml from file %v", testFile) + + expectedConfigs := map[string]*Config{ + "regexp/default": {}, + "regexp/cachedisabledwithsize": { + CacheEnabled: false, + CacheMaxNumEntries: 10, + }, + "regexp/cacheenablednosize": { + CacheEnabled: true, + }, + } + + for testName, actualCfg := range actualConfigs { + t.Run(testName, func(t *testing.T) { + expCfg, ok := expectedConfigs[testName] + assert.True(t, ok) + assert.Equal(t, expCfg, actualCfg) + + fs, err := NewFilterSet([]string{}, actualCfg) + assert.NoError(t, err) + assert.NotNil(t, fs) + }) + } +} diff --git a/internal/otel_collector/internal/processor/filterset/regexp/doc.go b/internal/otel_collector/internal/processor/filterset/regexp/doc.go new file mode 100644 index 00000000000..4c8ab33f066 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/regexp/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package regexp provides an implementation to match strings against a set of regexp string filters. +package regexp diff --git a/internal/otel_collector/internal/processor/filterset/regexp/regexpfilterset.go b/internal/otel_collector/internal/processor/filterset/regexp/regexpfilterset.go new file mode 100644 index 00000000000..d6ced6b00e3 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/regexp/regexpfilterset.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package regexp + +import ( + "regexp" + + "github.com/golang/groupcache/lru" +) + +// FilterSet encapsulates a set of filters and caches match results. +// Filters are re2 regex strings. +// FilterSet is exported for convenience, but has unexported fields and should be constructed through NewFilterSet. +// +// FilterSet satisfies the FilterSet interface from +// "go.opentelemetry.io/collector/internal/processor/filterset" +type FilterSet struct { + regexes []*regexp.Regexp + cacheEnabled bool + cache *lru.Cache +} + +// NewFilterSet constructs a FilterSet of re2 regex strings. +// If any of the given filters fail to compile into re2, an error is returned. +func NewFilterSet(filters []string, cfg *Config) (*FilterSet, error) { + fs := &FilterSet{ + regexes: make([]*regexp.Regexp, 0, len(filters)), + } + + if cfg != nil && cfg.CacheEnabled { + fs.cacheEnabled = true + fs.cache = lru.New(cfg.CacheMaxNumEntries) + } + + if err := fs.addFilters(filters); err != nil { + return nil, err + } + + return fs, nil +} + +// Matches returns true if the given string matches any of the FilterSet's filters. +// The given string must be fully matched by at least one filter's re2 regex. +func (rfs *FilterSet) Matches(toMatch string) bool { + if rfs.cacheEnabled { + if v, ok := rfs.cache.Get(toMatch); ok { + return v.(bool) + } + } + + for _, r := range rfs.regexes { + if r.MatchString(toMatch) { + if rfs.cacheEnabled { + rfs.cache.Add(toMatch, true) + } + return true + } + } + + if rfs.cacheEnabled { + rfs.cache.Add(toMatch, false) + } + return false +} + +// addFilters compiles all the given filters and stores them as regexes. +// All regexes are automatically anchored to enforce full string matches. +func (rfs *FilterSet) addFilters(filters []string) error { + dedup := make(map[string]struct{}, len(filters)) + for _, f := range filters { + if _, ok := dedup[f]; ok { + continue + } + + re, err := regexp.Compile(f) + if err != nil { + return err + } + rfs.regexes = append(rfs.regexes, re) + dedup[f] = struct{}{} + } + + return nil +} diff --git a/internal/otel_collector/internal/processor/filterset/regexp/regexpfilterset_test.go b/internal/otel_collector/internal/processor/filterset/regexp/regexpfilterset_test.go new file mode 100644 index 00000000000..de9bae05d81 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/regexp/regexpfilterset_test.go @@ -0,0 +1,208 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package regexp + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + validRegexpFilters = []string{ + "prefix/.*", + "prefix_.*", + ".*/suffix", + ".*_suffix", + ".*/contains/.*", + ".*_contains_.*", + "full/name/match", + "full_name_match", + } +) + +func TestNewRegexpFilterSet(t *testing.T) { + tests := []struct { + name string + filters []string + success bool + }{ + { + name: "validFilters", + filters: validRegexpFilters, + success: true, + }, { + name: "invalidFilter", + filters: []string{ + "exact_string_match", + "(a|b))", // invalid regex + }, + success: false, + }, { + name: "emptyFilter", + filters: []string{}, + success: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fs, err := NewFilterSet(test.filters, nil) + assert.Equal(t, test.success, fs != nil) + assert.Equal(t, test.success, err == nil) + + if err == nil { + // sanity call + fs.Matches("test") + } + }) + } +} + +func TestRegexpMatches(t *testing.T) { + fs, err := NewFilterSet(validRegexpFilters, &Config{}) + assert.NotNil(t, fs) + assert.NoError(t, err) + assert.False(t, fs.cacheEnabled) + + matches := []string{ + "full/name/match", + "extra/full/name/match/extra", + "full_name_match", + "prefix/test/match", + "prefix_test_match", + "extra/prefix/test/match", + "test/match/suffix", + "test/match/suffixextra", + "test_match_suffix", + "test/contains/match", + "test_contains_match", + } + + for _, m := range matches { + t.Run(m, func(t *testing.T) { + assert.True(t, fs.Matches(m)) + }) + } + + mismatches := []string{ + "not_exact_string_match", + "random", + "c", + } + + for _, m := range mismatches { + t.Run(m, func(t *testing.T) { + assert.False(t, fs.Matches(m)) + }) + } +} + +func TestRegexpDeDup(t *testing.T) { + dupRegexpFilters := []string{ + "prefix/.*", + "prefix/.*", + } + fs, err := NewFilterSet(dupRegexpFilters, &Config{}) + assert.NotNil(t, fs) + assert.NoError(t, err) + assert.False(t, fs.cacheEnabled) + assert.EqualValues(t, 1, len(fs.regexes)) +} + +func TestRegexpMatchesCaches(t *testing.T) { + // 0 means unlimited cache + fs, err := NewFilterSet(validRegexpFilters, &Config{ + CacheEnabled: true, + CacheMaxNumEntries: 0, + }) + assert.NotNil(t, fs) + assert.NoError(t, err) + assert.True(t, fs.cacheEnabled) + + matches := []string{ + "full/name/match", + "extra/full/name/match/extra", + "full_name_match", + "prefix/test/match", + "prefix_test_match", + "extra/prefix/test/match", + "test/match/suffix", + "test/match/suffixextra", + "test_match_suffix", + "test/contains/match", + "test_contains_match", + } + + for _, m := range matches { + t.Run(m, func(t *testing.T) { + assert.True(t, fs.Matches(m)) + + matched, ok := fs.cache.Get(m) + assert.True(t, matched.(bool) && ok) + }) + } + + mismatches := []string{ + "not_exact_string_match", + "random", + "c", + } + + for _, m := range mismatches { + t.Run(m, func(t *testing.T) { + assert.False(t, fs.Matches(m)) + + matched, ok := fs.cache.Get(m) + assert.True(t, !matched.(bool) && ok) + }) + } +} + +func TestWithCacheSize(t *testing.T) { + size := 3 + fs, err := NewFilterSet(validRegexpFilters, &Config{ + CacheEnabled: true, + CacheMaxNumEntries: size, + }) + assert.NotNil(t, fs) + assert.NoError(t, err) + + matches := []string{ + "prefix/test/match", + "prefix_test_match", + "test/match/suffix", + } + + // fill cache + for _, m := range matches { + fs.Matches(m) + _, ok := fs.cache.Get(m) + assert.True(t, ok) + } + + // refresh oldest entry + fs.Matches(matches[0]) + + // cause LRU cache eviction + newest := "new" + fs.Matches(newest) + + _, evictedOk := fs.cache.Get(matches[1]) + assert.False(t, evictedOk) + + _, newOk := fs.cache.Get(newest) + assert.True(t, newOk) +} diff --git a/internal/otel_collector/internal/processor/filterset/regexp/testdata/config.yaml b/internal/otel_collector/internal/processor/filterset/regexp/testdata/config.yaml new file mode 100644 index 00000000000..8d838cbe4b6 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/regexp/testdata/config.yaml @@ -0,0 +1,10 @@ +# Yaml form of the configuration for regexp FilterSets +# This configuration can be embedded into other component's yamls +# The top level here are just test names and do not represent part of the actual configuration. + +regexp/default: +regexp/cachedisabledwithsize: + cacheenabled: false + cachemaxnumentries: 10 +regexp/cacheenablednosize: + cacheenabled: true \ No newline at end of file diff --git a/internal/otel_collector/internal/processor/filterset/strict/doc.go b/internal/otel_collector/internal/processor/filterset/strict/doc.go new file mode 100644 index 00000000000..9a069ab9a9c --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/strict/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package strict provides an implementation to match strings against a set of exact match string filters. +package strict diff --git a/internal/otel_collector/internal/processor/filterset/strict/strictfilterset.go b/internal/otel_collector/internal/processor/filterset/strict/strictfilterset.go new file mode 100644 index 00000000000..459f55866ca --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/strict/strictfilterset.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strict + +// FilterSet encapsulates a set of exact string match filters. +// FilterSet is exported for convenience, but has unexported fields and should be constructed through NewFilterSet. +// +// regexpFilterSet satisfies the FilterSet interface from +// "go.opentelemetry.io/collector/internal/processor/filterset" +type FilterSet struct { + filters map[string]struct{} +} + +// NewFilterSet constructs a FilterSet of exact string matches. +func NewFilterSet(filters []string) (*FilterSet, error) { + fs := &FilterSet{ + filters: make(map[string]struct{}, len(filters)), + } + + fs.addFilters(filters) + return fs, nil +} + +// Matches returns true if the given string matches any of the FilterSet's filters. +func (sfs *FilterSet) Matches(toMatch string) bool { + _, ok := sfs.filters[toMatch] + return ok +} + +// addFilters all the given filters. +func (sfs *FilterSet) addFilters(filters []string) { + for _, f := range filters { + sfs.filters[f] = struct{}{} + } +} diff --git a/internal/otel_collector/internal/processor/filterset/strict/strictfilterset_test.go b/internal/otel_collector/internal/processor/filterset/strict/strictfilterset_test.go new file mode 100644 index 00000000000..0d5a51cc161 --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/strict/strictfilterset_test.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strict + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + validStrictFilters = []string{ + "exact_string_match", + ".*/suffix", + "(a|b)", + } +) + +func TestNewStrictFilterSet(t *testing.T) { + tests := []struct { + name string + filters []string + success bool + }{ + { + name: "validFilters", + filters: validStrictFilters, + success: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fs, err := NewFilterSet(test.filters) + assert.Equal(t, test.success, fs != nil) + assert.Equal(t, test.success, err == nil) + }) + } +} + +func TestStrictMatches(t *testing.T) { + fs, err := NewFilterSet(validStrictFilters) + assert.NotNil(t, fs) + assert.NoError(t, err) + + matches := []string{ + "exact_string_match", + ".*/suffix", + "(a|b)", + } + + for _, m := range matches { + t.Run(m, func(t *testing.T) { + assert.True(t, fs.Matches(m)) + }) + } + + mismatches := []string{ + "not_exact_string_match", + "random", + "test/match/suffix", + "prefix/metric/one", + "c", + } + + for _, m := range mismatches { + t.Run(m, func(t *testing.T) { + assert.False(t, fs.Matches(m)) + }) + } +} diff --git a/internal/otel_collector/internal/processor/filterset/testdata/config.yaml b/internal/otel_collector/internal/processor/filterset/testdata/config.yaml new file mode 100644 index 00000000000..6fe12bd442c --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/testdata/config.yaml @@ -0,0 +1,16 @@ +# Yaml form of the configuration for FilterSets +# This configuration can be embedded into other component's yamls +# The top level here are just test names and do not represent part of the actual configuration. + +regexp/default: + match_type: regexp +regexp/emptyoptions: + match_type: regexp + regexp: +regexp/withoptions: + match_type: regexp + regexp: + cacheenabled: false + cachemaxnumentries: 10 +strict/default: + match_type: strict \ No newline at end of file diff --git a/internal/otel_collector/internal/processor/filterset/testdata/config_invalid.yaml b/internal/otel_collector/internal/processor/filterset/testdata/config_invalid.yaml new file mode 100644 index 00000000000..bc854ccc63a --- /dev/null +++ b/internal/otel_collector/internal/processor/filterset/testdata/config_invalid.yaml @@ -0,0 +1,6 @@ +# Yaml form of the configuration for FilterSets +# This configuration can be embedded into other component's yamls +# The top level here are just test names and do not represent part of the actual configuration. + +invalid/matchtype: + match_type: "invalid" \ No newline at end of file diff --git a/internal/otel_collector/internal/processor/filterspan/filterspan.go b/internal/otel_collector/internal/processor/filterspan/filterspan.go new file mode 100644 index 00000000000..f4686c35fee --- /dev/null +++ b/internal/otel_collector/internal/processor/filterspan/filterspan.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterspan + +import ( + "fmt" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filtermatcher" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/translator/conventions" +) + +// TODO: Modify Matcher to invoke both the include and exclude properties so +// calling processors will always have the same logic. +// Matcher is an interface that allows matching a span against a configuration +// of a match. +type Matcher interface { + MatchSpan(span pdata.Span, resource pdata.Resource, library pdata.InstrumentationLibrary) bool +} + +// propertiesMatcher allows matching a span against various span properties. +type propertiesMatcher struct { + filtermatcher.PropertiesMatcher + + // Service names to compare to. + serviceFilters filterset.FilterSet + + // Span names to compare to. + nameFilters filterset.FilterSet +} + +// NewMatcher creates a span Matcher that matches based on the given MatchProperties. +func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { + if mp == nil { + return nil, nil + } + + if err := mp.ValidateForSpans(); err != nil { + return nil, err + } + + rm, err := filtermatcher.NewMatcher(mp) + if err != nil { + return nil, err + } + + var serviceFS filterset.FilterSet = nil + if len(mp.Services) > 0 { + serviceFS, err = filterset.CreateFilterSet(mp.Services, &mp.Config) + if err != nil { + return nil, fmt.Errorf("error creating service name filters: %v", err) + } + } + + var nameFS filterset.FilterSet = nil + if len(mp.SpanNames) > 0 { + nameFS, err = filterset.CreateFilterSet(mp.SpanNames, &mp.Config) + if err != nil { + return nil, fmt.Errorf("error creating span name filters: %v", err) + } + } + + return &propertiesMatcher{ + PropertiesMatcher: rm, + serviceFilters: serviceFS, + nameFilters: nameFS, + }, nil +} + +// SkipSpan determines if a span should be processed. +// True is returned when a span should be skipped. +// False is returned when a span should not be skipped. +// The logic determining if a span should be processed is set +// in the attribute configuration with the include and exclude settings. +// Include properties are checked before exclude settings are checked. +func SkipSpan(include Matcher, exclude Matcher, span pdata.Span, resource pdata.Resource, library pdata.InstrumentationLibrary) bool { + if include != nil { + // A false returned in this case means the span should not be processed. + if i := include.MatchSpan(span, resource, library); !i { + return true + } + } + + if exclude != nil { + // A true returned in this case means the span should not be processed. + if e := exclude.MatchSpan(span, resource, library); e { + return true + } + } + + return false +} + +// MatchSpan matches a span and service to a set of properties. +// see filterconfig.MatchProperties for more details +func (mp *propertiesMatcher) MatchSpan(span pdata.Span, resource pdata.Resource, library pdata.InstrumentationLibrary) bool { + // If a set of properties was not in the mp, all spans are considered to match on that property + if mp.serviceFilters != nil { + serviceName := serviceNameForResource(resource) + if !mp.serviceFilters.Matches(serviceName) { + return false + } + } + + if mp.nameFilters != nil && !mp.nameFilters.Matches(span.Name()) { + return false + } + + return mp.PropertiesMatcher.Match(span.Attributes(), resource, library) +} + +// serviceNameForResource gets the service name for a specified Resource. +func serviceNameForResource(resource pdata.Resource) string { + service, found := resource.Attributes().Get(conventions.AttributeServiceName) + if !found { + return "" + } + + return service.StringVal() +} diff --git a/internal/otel_collector/internal/processor/filterspan/filterspan_test.go b/internal/otel_collector/internal/processor/filterspan/filterspan_test.go new file mode 100644 index 00000000000..5c4cd68fd7a --- /dev/null +++ b/internal/otel_collector/internal/processor/filterspan/filterspan_test.go @@ -0,0 +1,261 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterspan + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +func createConfig(matchType filterset.MatchType) *filterset.Config { + return &filterset.Config{ + MatchType: matchType, + } +} + +func TestSpan_validateMatchesConfiguration_InvalidConfig(t *testing.T) { + testcases := []struct { + name string + property filterconfig.MatchProperties + errorString string + }{ + { + name: "empty_property", + property: filterconfig.MatchProperties{}, + errorString: "at least one of \"services\", \"span_names\", \"attributes\", \"libraries\" or \"resources\" field must be specified", + }, + { + name: "empty_service_span_names_and_attributes", + property: filterconfig.MatchProperties{ + Services: []string{}, + }, + errorString: "at least one of \"services\", \"span_names\", \"attributes\", \"libraries\" or \"resources\" field must be specified", + }, + { + name: "log_properties", + property: filterconfig.MatchProperties{ + LogNames: []string{"log"}, + }, + errorString: "log_names should not be specified for trace spans", + }, + { + name: "invalid_match_type", + property: filterconfig.MatchProperties{ + Config: *createConfig("wrong_match_type"), + Services: []string{"abc"}, + }, + errorString: "error creating service name filters: unrecognized match_type: 'wrong_match_type', valid types are: [regexp strict]", + }, + { + name: "missing_match_type", + property: filterconfig.MatchProperties{ + Services: []string{"abc"}, + }, + errorString: "error creating service name filters: unrecognized match_type: '', valid types are: [regexp strict]", + }, + { + name: "invalid_regexp_pattern_service", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Services: []string{"["}, + }, + errorString: "error creating service name filters: error parsing regexp: missing closing ]: `[`", + }, + { + name: "invalid_regexp_pattern_span", + property: filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + SpanNames: []string{"["}, + }, + errorString: "error creating span name filters: error parsing regexp: missing closing ]: `[`", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + output, err := NewMatcher(&tc.property) + assert.Nil(t, output) + assert.EqualError(t, err, tc.errorString) + }) + } +} + +func TestSpan_Matching_False(t *testing.T) { + testcases := []struct { + name string + properties *filterconfig.MatchProperties + }{ + { + name: "service_name_doesnt_match_regexp", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Services: []string{"svcA"}, + Attributes: []filterconfig.Attribute{}, + }, + }, + + { + name: "service_name_doesnt_match_strict", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{"svcA"}, + Attributes: []filterconfig.Attribute{}, + }, + }, + + { + name: "span_name_doesnt_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + SpanNames: []string{"spanNo.*Name"}, + Attributes: []filterconfig.Attribute{}, + }, + }, + + { + name: "span_name_doesnt_match_any", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + SpanNames: []string{ + "spanNo.*Name", + "non-matching?pattern", + "regular string", + }, + Attributes: []filterconfig.Attribute{}, + }, + }, + } + + span := pdata.NewSpan() + span.SetName("spanName") + library := pdata.NewInstrumentationLibrary() + resource := pdata.NewResource() + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + matcher, err := NewMatcher(tc.properties) + require.NoError(t, err) + assert.NotNil(t, matcher) + + assert.False(t, matcher.MatchSpan(span, resource, library)) + }) + } +} + +func TestSpan_MissingServiceName(t *testing.T) { + cfg := &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Services: []string{"svcA"}, + } + + mp, err := NewMatcher(cfg) + assert.Nil(t, err) + assert.NotNil(t, mp) + + emptySpan := pdata.NewSpan() + assert.False(t, mp.MatchSpan(emptySpan, pdata.NewResource(), pdata.NewInstrumentationLibrary())) +} + +func TestSpan_Matching_True(t *testing.T) { + testcases := []struct { + name string + properties *filterconfig.MatchProperties + }{ + { + name: "service_name_match_regexp", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Services: []string{"svcA"}, + Attributes: []filterconfig.Attribute{}, + }, + }, + { + name: "service_name_match_strict", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{"svcA"}, + Attributes: []filterconfig.Attribute{}, + }, + }, + { + name: "span_name_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + SpanNames: []string{"span.*"}, + Attributes: []filterconfig.Attribute{}, + }, + }, + { + name: "span_name_second_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + SpanNames: []string{ + "wrong.*pattern", + "span.*", + "yet another?pattern", + "regularstring", + }, + Attributes: []filterconfig.Attribute{}, + }, + }, + } + + span := pdata.NewSpan() + span.SetName("spanName") + span.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "keyString": pdata.NewAttributeValueString("arithmetic"), + "keyInt": pdata.NewAttributeValueInt(123), + "keyDouble": pdata.NewAttributeValueDouble(3245.6), + "keyBool": pdata.NewAttributeValueBool(true), + "keyExists": pdata.NewAttributeValueString("present"), + }) + assert.NotNil(t, span) + + resource := pdata.NewResource() + resource.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + conventions.AttributeServiceName: pdata.NewAttributeValueString("svcA"), + }) + + library := pdata.NewInstrumentationLibrary() + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + mp, err := NewMatcher(tc.properties) + require.NoError(t, err) + assert.NotNil(t, mp) + + assert.True(t, mp.MatchSpan(span, resource, library)) + }) + } +} + +func TestServiceNameForResource(t *testing.T) { + td := testdata.GenerateTraceDataOneSpanNoResource() + require.Equal(t, serviceNameForResource(td.ResourceSpans().At(0).Resource()), "") + + td = testdata.GenerateTraceDataOneSpan() + resource := td.ResourceSpans().At(0).Resource() + require.Equal(t, serviceNameForResource(resource), "") + + resource.Attributes().InsertString(conventions.AttributeServiceName, "test-service") + require.Equal(t, serviceNameForResource(resource), "test-service") +} diff --git a/internal/otel_collector/internal/testdata/common.go b/internal/otel_collector/internal/testdata/common.go new file mode 100644 index 00000000000..ae61c3d2039 --- /dev/null +++ b/internal/otel_collector/internal/testdata/common.go @@ -0,0 +1,179 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +var ( + resourceAttributes1 = map[string]pdata.AttributeValue{"resource-attr": pdata.NewAttributeValueString("resource-attr-val-1")} + resourceAttributes2 = map[string]pdata.AttributeValue{"resource-attr": pdata.NewAttributeValueString("resource-attr-val-2")} + spanEventAttributes = map[string]pdata.AttributeValue{"span-event-attr": pdata.NewAttributeValueString("span-event-attr-val")} + spanLinkAttributes = map[string]pdata.AttributeValue{"span-link-attr": pdata.NewAttributeValueString("span-link-attr-val")} + spanAttributes = map[string]pdata.AttributeValue{"span-attr": pdata.NewAttributeValueString("span-attr-val")} +) + +const ( + TestLabelKey = "label" + TestLabelKey1 = "label-1" + TestLabelValue1 = "label-value-1" + TestLabelKey2 = "label-2" + TestLabelValue2 = "label-value-2" + TestLabelKey3 = "label-3" + TestLabelValue3 = "label-value-3" + TestAttachmentKey = "exemplar-attachment" + TestAttachmentValue = "exemplar-attachment-value" +) + +func initResourceAttributes1(dest pdata.AttributeMap) { + dest.InitFromMap(resourceAttributes1) +} + +func generateOtlpResourceAttributes1() []otlpcommon.KeyValue { + return []otlpcommon.KeyValue{ + { + Key: "resource-attr", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "resource-attr-val-1"}}, + }, + } +} + +func initResourceAttributes2(dest pdata.AttributeMap) { + dest.InitFromMap(resourceAttributes2) +} + +func generateOtlpResourceAttributes2() []otlpcommon.KeyValue { + return []otlpcommon.KeyValue{ + { + Key: "resource-attr", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "resource-attr-val-2"}}, + }, + } +} + +func initSpanAttributes(dest pdata.AttributeMap) { + dest.InitFromMap(spanAttributes) +} + +func generateOtlpSpanAttributes() []otlpcommon.KeyValue { + return []otlpcommon.KeyValue{ + { + Key: "span-attr", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "span-attr-val"}}, + }, + } +} + +func initSpanEventAttributes(dest pdata.AttributeMap) { + dest.InitFromMap(spanEventAttributes) +} + +func generateOtlpSpanEventAttributes() []otlpcommon.KeyValue { + return []otlpcommon.KeyValue{ + { + Key: "span-event-attr", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "span-event-attr-val"}}, + }, + } +} + +func initSpanLinkAttributes(dest pdata.AttributeMap) { + dest.InitFromMap(spanLinkAttributes) +} + +func generateOtlpSpanLinkAttributes() []otlpcommon.KeyValue { + return []otlpcommon.KeyValue{ + { + Key: "span-link-attr", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "span-link-attr-val"}}, + }, + } +} + +func initMetricLabels1(dest pdata.StringMap) { + dest.InitFromMap(map[string]string{TestLabelKey1: TestLabelValue1}) +} + +func generateOtlpMetricLabels1() []otlpcommon.StringKeyValue { + return []otlpcommon.StringKeyValue{ + { + Key: TestLabelKey1, + Value: TestLabelValue1, + }, + } +} + +func initMetricLabels12(dest pdata.StringMap) { + dest.InitFromMap(map[string]string{TestLabelKey1: TestLabelValue1, TestLabelKey2: TestLabelValue2}).Sort() +} + +func generateOtlpMetricLabels12() []otlpcommon.StringKeyValue { + return []otlpcommon.StringKeyValue{ + { + Key: TestLabelKey1, + Value: TestLabelValue1, + }, + { + Key: TestLabelKey2, + Value: TestLabelValue2, + }, + } +} + +func initMetricLabels13(dest pdata.StringMap) { + dest.InitFromMap(map[string]string{TestLabelKey1: TestLabelValue1, TestLabelKey3: TestLabelValue3}).Sort() +} + +func generateOtlpMetricLabels13() []otlpcommon.StringKeyValue { + return []otlpcommon.StringKeyValue{ + { + Key: TestLabelKey1, + Value: TestLabelValue1, + }, + { + Key: TestLabelKey3, + Value: TestLabelValue3, + }, + } +} + +func initMetricLabels2(dest pdata.StringMap) { + dest.InitFromMap(map[string]string{TestLabelKey2: TestLabelValue2}) +} + +func generateOtlpMetricLabels2() []otlpcommon.StringKeyValue { + return []otlpcommon.StringKeyValue{ + { + Key: TestLabelKey2, + Value: TestLabelValue2, + }, + } +} + +func initMetricAttachment(dest pdata.StringMap) { + dest.InitFromMap(map[string]string{TestAttachmentKey: TestAttachmentValue}) +} + +func generateOtlpMetricAttachment() []otlpcommon.StringKeyValue { + return []otlpcommon.StringKeyValue{ + { + Key: TestAttachmentKey, + Value: TestAttachmentValue, + }, + } +} diff --git a/internal/otel_collector/internal/testdata/log.go b/internal/otel_collector/internal/testdata/log.go new file mode 100644 index 00000000000..232ef728ee4 --- /dev/null +++ b/internal/otel_collector/internal/testdata/log.go @@ -0,0 +1,318 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "time" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/data" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" +) + +var ( + TestLogTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) + TestLogTimestamp = pdata.TimestampUnixNano(TestLogTime.UnixNano()) +) + +func GenerateLogDataEmpty() pdata.Logs { + ld := pdata.NewLogs() + return ld +} + +func generateLogOtlpEmpty() []*otlplogs.ResourceLogs { + return []*otlplogs.ResourceLogs(nil) +} + +func GenerateLogDataOneEmptyResourceLogs() pdata.Logs { + ld := GenerateLogDataEmpty() + ld.ResourceLogs().Resize(1) + return ld +} + +func generateLogOtlpOneEmptyResourceLogs() []*otlplogs.ResourceLogs { + return []*otlplogs.ResourceLogs{ + {}, + } +} + +func GenerateLogDataNoLogRecords() pdata.Logs { + ld := GenerateLogDataOneEmptyResourceLogs() + rs0 := ld.ResourceLogs().At(0) + initResource1(rs0.Resource()) + return ld +} + +func generateLogOtlpNoLogRecords() []*otlplogs.ResourceLogs { + return []*otlplogs.ResourceLogs{ + { + Resource: generateOtlpResource1(), + }, + } +} + +func GenerateLogDataOneEmptyLogs() pdata.Logs { + ld := GenerateLogDataNoLogRecords() + rs0 := ld.ResourceLogs().At(0) + rs0.InstrumentationLibraryLogs().Resize(1) + rs0.InstrumentationLibraryLogs().At(0).Logs().Resize(1) + return ld +} + +func generateLogOtlpOneEmptyLogs() []*otlplogs.ResourceLogs { + return []*otlplogs.ResourceLogs{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ + { + Logs: []*otlplogs.LogRecord{ + {}, + }, + }, + }, + }, + } +} + +func GenerateLogDataOneLogNoResource() pdata.Logs { + ld := GenerateLogDataOneEmptyResourceLogs() + rs0 := ld.ResourceLogs().At(0) + rs0.InstrumentationLibraryLogs().Resize(1) + rs0.InstrumentationLibraryLogs().At(0).Logs().Resize(1) + rs0lr0 := rs0.InstrumentationLibraryLogs().At(0).Logs().At(0) + fillLogOne(rs0lr0) + return ld +} + +func generateLogOtlpOneLogNoResource() []*otlplogs.ResourceLogs { + return []*otlplogs.ResourceLogs{ + { + InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ + { + Logs: []*otlplogs.LogRecord{ + generateOtlpLogOne(), + }, + }, + }, + }, + } +} + +func GenerateLogDataOneLog() pdata.Logs { + ld := GenerateLogDataOneEmptyLogs() + rs0 := ld.ResourceLogs().At(0) + rs0.InstrumentationLibraryLogs().Resize(1) + rs0.InstrumentationLibraryLogs().At(0).Logs().Resize(1) + rs0lr0 := rs0.InstrumentationLibraryLogs().At(0).Logs().At(0) + fillLogOne(rs0lr0) + return ld +} + +func generateLogOtlpOneLog() []*otlplogs.ResourceLogs { + return []*otlplogs.ResourceLogs{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ + { + Logs: []*otlplogs.LogRecord{ + generateOtlpLogOne(), + }, + }, + }, + }, + } +} + +func GenerateLogDataTwoLogsSameResource() pdata.Logs { + ld := GenerateLogDataOneEmptyLogs() + rs0 := ld.ResourceLogs().At(0) + rs0.InstrumentationLibraryLogs().Resize(1) + rs0.InstrumentationLibraryLogs().At(0).Logs().Resize(2) + fillLogOne(rs0.InstrumentationLibraryLogs().At(0).Logs().At(0)) + fillLogTwo(rs0.InstrumentationLibraryLogs().At(0).Logs().At(1)) + return ld +} + +// GenerateLogOtlpSameResourceTwologs returns the OTLP representation of the GenerateLogOtlpSameResourceTwologs. +func GenerateLogOtlpSameResourceTwoLogs() []*otlplogs.ResourceLogs { + return []*otlplogs.ResourceLogs{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ + { + Logs: []*otlplogs.LogRecord{ + generateOtlpLogOne(), + generateOtlpLogTwo(), + }, + }, + }, + }, + } +} + +func GenerateLogDataTwoLogsSameResourceOneDifferent() pdata.Logs { + ld := pdata.NewLogs() + ld.ResourceLogs().Resize(2) + rl0 := ld.ResourceLogs().At(0) + initResource1(rl0.Resource()) + rl0.InstrumentationLibraryLogs().Resize(1) + rl0.InstrumentationLibraryLogs().At(0).Logs().Resize(2) + fillLogOne(rl0.InstrumentationLibraryLogs().At(0).Logs().At(0)) + fillLogTwo(rl0.InstrumentationLibraryLogs().At(0).Logs().At(1)) + rl1 := ld.ResourceLogs().At(1) + initResource2(rl1.Resource()) + rl1.InstrumentationLibraryLogs().Resize(1) + rl1.InstrumentationLibraryLogs().At(0).Logs().Resize(1) + fillLogThree(rl1.InstrumentationLibraryLogs().At(0).Logs().At(0)) + return ld +} + +func generateLogOtlpTwoLogsSameResourceOneDifferent() []*otlplogs.ResourceLogs { + return []*otlplogs.ResourceLogs{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ + { + Logs: []*otlplogs.LogRecord{ + generateOtlpLogOne(), + generateOtlpLogTwo(), + }, + }, + }, + }, + { + Resource: generateOtlpResource2(), + InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ + { + Logs: []*otlplogs.LogRecord{ + generateOtlpLogThree(), + }, + }, + }, + }, + } +} + +func fillLogOne(log pdata.LogRecord) { + log.SetName("logA") + log.SetTimestamp(TestLogTimestamp) + log.SetDroppedAttributesCount(1) + log.SetSeverityNumber(pdata.SeverityNumberINFO) + log.SetSeverityText("Info") + log.SetSpanID(pdata.NewSpanID([8]byte{0x01, 0x02, 0x04, 0x08})) + log.SetTraceID(pdata.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01})) + + attrs := log.Attributes() + attrs.InsertString("app", "server") + attrs.InsertInt("instance_num", 1) + + log.Body().SetStringVal("This is a log message") +} + +func generateOtlpLogOne() *otlplogs.LogRecord { + return &otlplogs.LogRecord{ + Name: "logA", + TimeUnixNano: uint64(TestLogTimestamp), + DroppedAttributesCount: 1, + SeverityNumber: otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO, + SeverityText: "Info", + Body: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "This is a log message"}}, + SpanId: data.NewSpanID([8]byte{0x01, 0x02, 0x04, 0x08}), + TraceId: data.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01}), + Attributes: []otlpcommon.KeyValue{ + { + Key: "app", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "server"}}, + }, + { + Key: "instance_num", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: 1}}, + }, + }, + } +} + +func fillLogTwo(log pdata.LogRecord) { + log.SetName("logB") + log.SetTimestamp(TestLogTimestamp) + log.SetDroppedAttributesCount(1) + log.SetSeverityNumber(pdata.SeverityNumberINFO) + log.SetSeverityText("Info") + + attrs := log.Attributes() + attrs.InsertString("customer", "acme") + attrs.InsertString("env", "dev") + + log.Body().SetStringVal("something happened") +} + +func generateOtlpLogTwo() *otlplogs.LogRecord { + return &otlplogs.LogRecord{ + Name: "logB", + TimeUnixNano: uint64(TestLogTimestamp), + DroppedAttributesCount: 1, + SeverityNumber: otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO, + SeverityText: "Info", + Body: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "something happened"}}, + Attributes: []otlpcommon.KeyValue{ + { + Key: "customer", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "acme"}}, + }, + { + Key: "env", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "dev"}}, + }, + }, + } +} + +func fillLogThree(log pdata.LogRecord) { + log.SetName("logC") + log.SetTimestamp(TestLogTimestamp) + log.SetDroppedAttributesCount(1) + log.SetSeverityNumber(pdata.SeverityNumberWARN) + log.SetSeverityText("Warning") + + log.Body().SetStringVal("something else happened") +} + +func generateOtlpLogThree() *otlplogs.LogRecord { + return &otlplogs.LogRecord{ + Name: "logC", + TimeUnixNano: uint64(TestLogTimestamp), + DroppedAttributesCount: 1, + SeverityNumber: otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN, + SeverityText: "Warning", + Body: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "something else happened"}}, + } +} + +func GenerateLogDataManyLogsSameResource(count int) pdata.Logs { + ld := GenerateLogDataOneEmptyLogs() + rs0 := ld.ResourceLogs().At(0) + rs0.InstrumentationLibraryLogs().Resize(1) + rs0.InstrumentationLibraryLogs().At(0).Logs().Resize(count) + for i := 0; i < count; i++ { + l := rs0.InstrumentationLibraryLogs().At(0).Logs().At(i) + if i%2 == 0 { + fillLogOne(l) + } else { + fillLogTwo(l) + } + } + return ld +} diff --git a/internal/otel_collector/internal/testdata/log_test.go b/internal/otel_collector/internal/testdata/log_test.go new file mode 100644 index 00000000000..2ceb87ede33 --- /dev/null +++ b/internal/otel_collector/internal/testdata/log_test.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal" + otlplogs "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" +) + +type logTestCase struct { + name string + ld pdata.Logs + otlp []*otlplogs.ResourceLogs +} + +func generateAllLogTestCases() []logTestCase { + return []logTestCase{ + { + name: "empty", + ld: GenerateLogDataEmpty(), + otlp: generateLogOtlpEmpty(), + }, + { + name: "one-empty-resource-logs", + ld: GenerateLogDataOneEmptyResourceLogs(), + otlp: generateLogOtlpOneEmptyResourceLogs(), + }, + { + name: "no-log-records", + ld: GenerateLogDataNoLogRecords(), + otlp: generateLogOtlpNoLogRecords(), + }, + { + name: "one-empty-log-record", + ld: GenerateLogDataOneEmptyLogs(), + otlp: generateLogOtlpOneEmptyLogs(), + }, + { + name: "one-log-record-no-resource", + ld: GenerateLogDataOneLogNoResource(), + otlp: generateLogOtlpOneLogNoResource(), + }, + { + name: "one-log-record", + ld: GenerateLogDataOneLog(), + otlp: generateLogOtlpOneLog(), + }, + { + name: "two-records-same-resource", + ld: GenerateLogDataTwoLogsSameResource(), + otlp: GenerateLogOtlpSameResourceTwoLogs(), + }, + { + name: "two-records-same-resource-one-different", + ld: GenerateLogDataTwoLogsSameResourceOneDifferent(), + otlp: generateLogOtlpTwoLogsSameResourceOneDifferent(), + }, + } +} + +func TestToFromOtlpLog(t *testing.T) { + allTestCases := generateAllLogTestCases() + // Ensure NumLogTests gets updated. + for i := range allTestCases { + test := allTestCases[i] + t.Run(test.name, func(t *testing.T) { + ld := pdata.LogsFromInternalRep(internal.LogsFromOtlp(test.otlp)) + assert.EqualValues(t, test.ld, ld) + otlp := internal.LogsToOtlp(ld.InternalRep()) + assert.EqualValues(t, test.otlp, otlp) + }) + } +} diff --git a/internal/otel_collector/internal/testdata/metric.go b/internal/otel_collector/internal/testdata/metric.go new file mode 100644 index 00000000000..4395a5bc312 --- /dev/null +++ b/internal/otel_collector/internal/testdata/metric.go @@ -0,0 +1,635 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "time" + + "go.opentelemetry.io/collector/consumer/pdata" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +var ( + TestMetricStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) + TestMetricStartTimestamp = pdata.TimestampUnixNano(TestMetricStartTime.UnixNano()) + + TestMetricExemplarTime = time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC) + TestMetricExemplarTimestamp = pdata.TimestampUnixNano(TestMetricExemplarTime.UnixNano()) + + TestMetricTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) + TestMetricTimestamp = pdata.TimestampUnixNano(TestMetricTime.UnixNano()) +) + +const ( + TestGaugeDoubleMetricName = "gauge-double" + TestGaugeIntMetricName = "gauge-int" + TestCounterDoubleMetricName = "counter-double" + TestCounterIntMetricName = "counter-int" + TestDoubleHistogramMetricName = "double-histogram" + TestIntHistogramMetricName = "int-histogram" + TestDoubleSummaryMetricName = "double-summary" +) + +func GenerateMetricsEmpty() pdata.Metrics { + md := pdata.NewMetrics() + return md +} + +func generateMetricsOtlpEmpty() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics(nil) +} + +func GenerateMetricsOneEmptyResourceMetrics() pdata.Metrics { + md := GenerateMetricsEmpty() + md.ResourceMetrics().Resize(1) + return md +} + +func generateMetricsOtlpOneEmptyResourceMetrics() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics{ + {}, + } +} + +func GenerateMetricsNoLibraries() pdata.Metrics { + md := GenerateMetricsOneEmptyResourceMetrics() + ms0 := md.ResourceMetrics().At(0) + initResource1(ms0.Resource()) + return md +} + +func generateMetricsOtlpNoLibraries() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics{ + { + Resource: generateOtlpResource1(), + }, + } +} + +func GenerateMetricsOneEmptyInstrumentationLibrary() pdata.Metrics { + md := GenerateMetricsNoLibraries() + md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().Resize(1) + return md +} + +// generateMetricsOtlpOneEmptyInstrumentationLibrary returns the OTLP representation of the GenerateMetricsOneEmptyInstrumentationLibrary. +func generateMetricsOtlpOneEmptyInstrumentationLibrary() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + {}, + }, + }, + } +} + +func GenerateMetricsOneMetricNoResource() pdata.Metrics { + md := GenerateMetricsOneEmptyResourceMetrics() + rm0 := md.ResourceMetrics().At(0) + rm0.InstrumentationLibraryMetrics().Resize(1) + rm0ils0 := rm0.InstrumentationLibraryMetrics().At(0) + rm0ils0.Metrics().Resize(1) + initCounterIntMetric(rm0ils0.Metrics().At(0)) + return md +} + +func generateMetricsOtlpOneMetricNoResource() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + generateOtlpCounterIntMetric(), + }, + }, + }, + }, + } +} + +func GenerateMetricsOneMetric() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + rm0ils0.Metrics().Resize(1) + initCounterIntMetric(rm0ils0.Metrics().At(0)) + return md +} + +func generateMetricsOtlpOneMetric() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + generateOtlpCounterIntMetric(), + }, + }, + }, + }, + } +} + +func GenerateMetricsOneMetricOneDataPoint() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + rm0ils0.Metrics().Resize(1) + initGaugeIntMetricOneDataPoint(rm0ils0.Metrics().At(0)) + return md +} + +func GenerateMetricsTwoMetrics() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + rm0ils0.Metrics().Resize(2) + initCounterIntMetric(rm0ils0.Metrics().At(0)) + initCounterIntMetric(rm0ils0.Metrics().At(1)) + return md +} + +func GenerateMetricsOneCounterOneSummaryMetrics() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + rm0ils0.Metrics().Resize(2) + initCounterIntMetric(rm0ils0.Metrics().At(0)) + initDoubleSummaryMetric(rm0ils0.Metrics().At(1)) + return md +} + +func GenerateMetricsOtlpTwoMetrics() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + generateOtlpCounterIntMetric(), + generateOtlpCounterIntMetric(), + }, + }, + }, + }, + } +} + +func GenerateMetricsOneMetricNoLabels() pdata.Metrics { + md := GenerateMetricsOneMetric() + dps := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).IntSum().DataPoints() + dps.At(0).LabelsMap().InitFromMap(map[string]string{}) + dps.At(1).LabelsMap().InitFromMap(map[string]string{}) + return md +} + +func generateMetricsOtlpOneMetricNoLabels() []*otlpmetrics.ResourceMetrics { + md := generateMetricsOtlpOneMetric() + mis := md[0].InstrumentationLibraryMetrics[0].Metrics[0].Data.(*otlpmetrics.Metric_IntSum).IntSum + mis.DataPoints[0].Labels = nil + mis.DataPoints[1].Labels = nil + return md +} + +func GenerateMetricsAllTypesNoDataPoints() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + ms := ilm0.Metrics() + ms.Resize(7) + initMetric(ms.At(0), TestGaugeDoubleMetricName, pdata.MetricDataTypeDoubleGauge) + initMetric(ms.At(1), TestGaugeIntMetricName, pdata.MetricDataTypeIntGauge) + initMetric(ms.At(2), TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum) + initMetric(ms.At(3), TestCounterIntMetricName, pdata.MetricDataTypeIntSum) + initMetric(ms.At(4), TestDoubleHistogramMetricName, pdata.MetricDataTypeDoubleHistogram) + initMetric(ms.At(5), TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram) + initMetric(ms.At(6), TestDoubleSummaryMetricName, pdata.MetricDataTypeDoubleSummary) + return md +} + +func GenerateMetricsAllTypesEmptyDataPoint() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + ms := ilm0.Metrics() + ms.Resize(7) + + initMetric(ms.At(0), TestGaugeDoubleMetricName, pdata.MetricDataTypeDoubleGauge) + ms.At(0).DoubleGauge().DataPoints().Resize(1) + initMetric(ms.At(1), TestGaugeIntMetricName, pdata.MetricDataTypeIntGauge) + ms.At(1).IntGauge().DataPoints().Resize(1) + initMetric(ms.At(2), TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum) + ms.At(2).DoubleSum().DataPoints().Resize(1) + initMetric(ms.At(3), TestCounterIntMetricName, pdata.MetricDataTypeIntSum) + ms.At(3).IntSum().DataPoints().Resize(1) + initMetric(ms.At(4), TestDoubleHistogramMetricName, pdata.MetricDataTypeDoubleHistogram) + ms.At(4).DoubleHistogram().DataPoints().Resize(1) + initMetric(ms.At(5), TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram) + ms.At(5).IntHistogram().DataPoints().Resize(1) + initMetric(ms.At(6), TestDoubleSummaryMetricName, pdata.MetricDataTypeDoubleSummary) + ms.At(6).DoubleSummary().DataPoints().Resize(1) + return md +} + +func GenerateMetricsMetricTypeInvalid() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + ms := ilm0.Metrics() + ms.Resize(1) + + initMetric(ms.At(0), TestCounterIntMetricName, pdata.MetricDataTypeNone) + return md +} + +func generateMetricsOtlpAllTypesNoDataPoints() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + generateOtlpMetric(TestGaugeDoubleMetricName, pdata.MetricDataTypeDoubleGauge), + generateOtlpMetric(TestGaugeIntMetricName, pdata.MetricDataTypeIntGauge), + generateOtlpMetric(TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum), + generateOtlpMetric(TestCounterIntMetricName, pdata.MetricDataTypeIntSum), + generateOtlpMetric(TestDoubleHistogramMetricName, pdata.MetricDataTypeDoubleHistogram), + generateOtlpMetric(TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram), + generateOtlpMetric(TestDoubleSummaryMetricName, pdata.MetricDataTypeDoubleSummary), + }, + }, + }, + }, + } +} + +func GeneratMetricsAllTypesWithSampleDatapoints() pdata.Metrics { + metricData := pdata.NewMetrics() + metricData.ResourceMetrics().Resize(1) + + rms := metricData.ResourceMetrics() + initResource1(rms.At(0).Resource()) + rms.At(0).InstrumentationLibraryMetrics().Resize(1) + + ilms := rms.At(0).InstrumentationLibraryMetrics() + ilms.At(0).Metrics().Resize(5) + ms := ilms.At(0).Metrics() + initCounterIntMetric(ms.At(0)) + initSumDoubleMetric(ms.At(1)) + initDoubleHistogramMetric(ms.At(2)) + initIntHistogramMetric(ms.At(3)) + initDoubleSummaryMetric(ms.At(4)) + + return metricData +} + +func generateMetricsOtlpAllTypesWithSampleDatapoints() []*otlpmetrics.ResourceMetrics { + return []*otlpmetrics.ResourceMetrics{ + { + Resource: generateOtlpResource1(), + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + generateOtlpCounterIntMetric(), + generateOtlpSumDoubleMetric(), + generateOtlpDoubleHistogramMetric(), + generateOtlpIntHistogramMetric(), + generateOTLPDoubleSummaryMetric(), + }, + }, + }, + }, + } +} + +func initCounterIntMetric(im pdata.Metric) { + initMetric(im, TestCounterIntMetricName, pdata.MetricDataTypeIntSum) + + idps := im.IntSum().DataPoints() + idps.Resize(2) + idp0 := idps.At(0) + initMetricLabels1(idp0.LabelsMap()) + idp0.SetStartTime(TestMetricStartTimestamp) + idp0.SetTimestamp(TestMetricTimestamp) + idp0.SetValue(123) + idp1 := idps.At(1) + initMetricLabels2(idp1.LabelsMap()) + idp1.SetStartTime(TestMetricStartTimestamp) + idp1.SetTimestamp(TestMetricTimestamp) + idp1.SetValue(456) +} + +func initGaugeIntMetricOneDataPoint(im pdata.Metric) { + initMetric(im, TestGaugeIntMetricName, pdata.MetricDataTypeIntGauge) + + idps := im.IntGauge().DataPoints() + idps.Resize(1) + idp0 := idps.At(0) + initMetricLabels1(idp0.LabelsMap()) + idp0.SetStartTime(TestMetricStartTimestamp) + idp0.SetTimestamp(TestMetricTimestamp) + idp0.SetValue(123) +} + +func generateOtlpCounterIntMetric() *otlpmetrics.Metric { + m := generateOtlpMetric(TestCounterIntMetricName, pdata.MetricDataTypeIntSum) + m.Data.(*otlpmetrics.Metric_IntSum).IntSum.DataPoints = + []*otlpmetrics.IntDataPoint{ + { + Labels: generateOtlpMetricLabels1(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Value: 123, + }, + { + Labels: generateOtlpMetricLabels2(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Value: 456, + }, + } + return m +} + +func initSumDoubleMetric(dm pdata.Metric) { + initMetric(dm, TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum) + + ddps := dm.DoubleSum().DataPoints() + ddps.Resize(2) + ddp0 := ddps.At(0) + initMetricLabels12(ddp0.LabelsMap()) + ddp0.SetStartTime(TestMetricStartTimestamp) + ddp0.SetTimestamp(TestMetricTimestamp) + ddp0.SetValue(1.23) + + ddp1 := ddps.At(1) + initMetricLabels13(ddp1.LabelsMap()) + ddp1.SetStartTime(TestMetricStartTimestamp) + ddp1.SetTimestamp(TestMetricTimestamp) + ddp1.SetValue(4.56) +} + +func generateOtlpSumDoubleMetric() *otlpmetrics.Metric { + m := generateOtlpMetric(TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum) + m.Data.(*otlpmetrics.Metric_DoubleSum).DoubleSum.DataPoints = + []*otlpmetrics.DoubleDataPoint{ + { + Labels: generateOtlpMetricLabels12(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Value: 1.23, + }, + { + Labels: generateOtlpMetricLabels13(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Value: 4.56, + }, + } + return m +} + +func initDoubleHistogramMetric(hm pdata.Metric) { + initMetric(hm, TestDoubleHistogramMetricName, pdata.MetricDataTypeDoubleHistogram) + + hdps := hm.DoubleHistogram().DataPoints() + hdps.Resize(2) + hdp0 := hdps.At(0) + initMetricLabels13(hdp0.LabelsMap()) + hdp0.SetStartTime(TestMetricStartTimestamp) + hdp0.SetTimestamp(TestMetricTimestamp) + hdp0.SetCount(1) + hdp0.SetSum(15) + hdp1 := hdps.At(1) + initMetricLabels2(hdp1.LabelsMap()) + hdp1.SetStartTime(TestMetricStartTimestamp) + hdp1.SetTimestamp(TestMetricTimestamp) + hdp1.SetCount(1) + hdp1.SetSum(15) + hdp1.SetBucketCounts([]uint64{0, 1}) + exemplars := hdp1.Exemplars() + exemplars.Resize(1) + exemplar := exemplars.At(0) + exemplar.SetTimestamp(TestMetricExemplarTimestamp) + exemplar.SetValue(15) + initMetricAttachment(exemplar.FilteredLabels()) + hdp1.SetExplicitBounds([]float64{1}) +} + +func generateOtlpDoubleHistogramMetric() *otlpmetrics.Metric { + m := generateOtlpMetric(TestDoubleHistogramMetricName, pdata.MetricDataTypeDoubleHistogram) + m.Data.(*otlpmetrics.Metric_DoubleHistogram).DoubleHistogram.DataPoints = + []*otlpmetrics.DoubleHistogramDataPoint{ + { + Labels: generateOtlpMetricLabels13(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Count: 1, + Sum: 15, + }, + { + Labels: generateOtlpMetricLabels2(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Count: 1, + Sum: 15, + BucketCounts: []uint64{0, 1}, + ExplicitBounds: []float64{1}, + Exemplars: []*otlpmetrics.DoubleExemplar{ + { + FilteredLabels: generateOtlpMetricAttachment(), + TimeUnixNano: uint64(TestMetricExemplarTimestamp), + Value: 15, + }, + }, + }, + } + return m +} + +func initIntHistogramMetric(hm pdata.Metric) { + initMetric(hm, TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram) + + hdps := hm.IntHistogram().DataPoints() + hdps.Resize(2) + hdp0 := hdps.At(0) + initMetricLabels13(hdp0.LabelsMap()) + hdp0.SetStartTime(TestMetricStartTimestamp) + hdp0.SetTimestamp(TestMetricTimestamp) + hdp0.SetCount(1) + hdp0.SetSum(15) + hdp1 := hdps.At(1) + initMetricLabels2(hdp1.LabelsMap()) + hdp1.SetStartTime(TestMetricStartTimestamp) + hdp1.SetTimestamp(TestMetricTimestamp) + hdp1.SetCount(1) + hdp1.SetSum(15) + hdp1.SetBucketCounts([]uint64{0, 1}) + exemplars := hdp1.Exemplars() + exemplars.Resize(1) + exemplar := exemplars.At(0) + exemplar.SetTimestamp(TestMetricExemplarTimestamp) + exemplar.SetValue(15) + initMetricAttachment(exemplar.FilteredLabels()) + hdp1.SetExplicitBounds([]float64{1}) +} + +func generateOtlpIntHistogramMetric() *otlpmetrics.Metric { + m := generateOtlpMetric(TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram) + m.Data.(*otlpmetrics.Metric_IntHistogram).IntHistogram.DataPoints = + []*otlpmetrics.IntHistogramDataPoint{ + { + Labels: generateOtlpMetricLabels13(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Count: 1, + Sum: 15, + }, + { + Labels: generateOtlpMetricLabels2(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Count: 1, + Sum: 15, + BucketCounts: []uint64{0, 1}, + ExplicitBounds: []float64{1}, + Exemplars: []*otlpmetrics.IntExemplar{ + { + FilteredLabels: generateOtlpMetricAttachment(), + TimeUnixNano: uint64(TestMetricExemplarTimestamp), + Value: 15, + }, + }, + }, + } + return m +} + +func initDoubleSummaryMetric(sm pdata.Metric) { + initMetric(sm, TestDoubleSummaryMetricName, pdata.MetricDataTypeDoubleSummary) + + sdps := sm.DoubleSummary().DataPoints() + sdps.Resize(2) + sdp0 := sdps.At(0) + initMetricLabels13(sdp0.LabelsMap()) + sdp0.SetStartTime(TestMetricStartTimestamp) + sdp0.SetTimestamp(TestMetricTimestamp) + sdp0.SetCount(1) + sdp0.SetSum(15) + sdp1 := sdps.At(1) + initMetricLabels2(sdp1.LabelsMap()) + sdp1.SetStartTime(TestMetricStartTimestamp) + sdp1.SetTimestamp(TestMetricTimestamp) + sdp1.SetCount(1) + sdp1.SetSum(15) + + quantiles := pdata.NewValueAtQuantileSlice() + quantiles.Resize(1) + quantiles.At(0).SetQuantile(0.01) + quantiles.At(0).SetValue(15) + + quantiles.CopyTo(sdp1.QuantileValues()) +} + +func generateOTLPDoubleSummaryMetric() *otlpmetrics.Metric { + m := generateOtlpMetric(TestDoubleSummaryMetricName, pdata.MetricDataTypeDoubleSummary) + m.Data.(*otlpmetrics.Metric_DoubleSummary).DoubleSummary.DataPoints = + []*otlpmetrics.DoubleSummaryDataPoint{ + { + Labels: generateOtlpMetricLabels13(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Count: 1, + Sum: 15, + }, + { + Labels: generateOtlpMetricLabels2(), + StartTimeUnixNano: uint64(TestMetricStartTimestamp), + TimeUnixNano: uint64(TestMetricTimestamp), + Count: 1, + Sum: 15, + QuantileValues: []*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile{ + { + Quantile: 0.01, + Value: 15, + }, + }, + }, + } + return m +} + +func initMetric(m pdata.Metric, name string, ty pdata.MetricDataType) { + m.SetName(name) + m.SetDescription("") + m.SetUnit("1") + m.SetDataType(ty) + switch ty { + case pdata.MetricDataTypeIntSum: + sum := m.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + case pdata.MetricDataTypeDoubleSum: + sum := m.DoubleSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + case pdata.MetricDataTypeIntHistogram: + histo := m.IntHistogram() + histo.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + case pdata.MetricDataTypeDoubleHistogram: + histo := m.DoubleHistogram() + histo.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + } +} + +func generateOtlpMetric(name string, ty pdata.MetricDataType) *otlpmetrics.Metric { + m := &otlpmetrics.Metric{ + Name: name, + Description: "", + Unit: "1", + } + switch ty { + case pdata.MetricDataTypeIntGauge: + m.Data = &otlpmetrics.Metric_IntGauge{IntGauge: &otlpmetrics.IntGauge{}} + case pdata.MetricDataTypeDoubleGauge: + m.Data = &otlpmetrics.Metric_DoubleGauge{DoubleGauge: &otlpmetrics.DoubleGauge{}} + case pdata.MetricDataTypeIntSum: + m.Data = &otlpmetrics.Metric_IntSum{IntSum: &otlpmetrics.IntSum{ + IsMonotonic: true, + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }} + case pdata.MetricDataTypeDoubleSum: + m.Data = &otlpmetrics.Metric_DoubleSum{DoubleSum: &otlpmetrics.DoubleSum{ + IsMonotonic: true, + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }} + case pdata.MetricDataTypeIntHistogram: + m.Data = &otlpmetrics.Metric_IntHistogram{IntHistogram: &otlpmetrics.IntHistogram{ + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }} + case pdata.MetricDataTypeDoubleHistogram: + m.Data = &otlpmetrics.Metric_DoubleHistogram{DoubleHistogram: &otlpmetrics.DoubleHistogram{ + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + }} + case pdata.MetricDataTypeDoubleSummary: + m.Data = &otlpmetrics.Metric_DoubleSummary{DoubleSummary: &otlpmetrics.DoubleSummary{}} + } + return m +} + +func GenerateMetricsManyMetricsSameResource(metricsCount int) pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rs0ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + rs0ilm0.Metrics().Resize(metricsCount) + for i := 0; i < metricsCount; i++ { + initCounterIntMetric(rs0ilm0.Metrics().At(i)) + } + return md +} diff --git a/internal/otel_collector/internal/testdata/metric_test.go b/internal/otel_collector/internal/testdata/metric_test.go new file mode 100644 index 00000000000..b686cb9f98e --- /dev/null +++ b/internal/otel_collector/internal/testdata/metric_test.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" +) + +type traceMetricsCase struct { + name string + td pdata.Metrics + otlp []*otlpmetrics.ResourceMetrics +} + +func generateAllMetricsTestCases() []traceMetricsCase { + return []traceMetricsCase{ + { + name: "empty", + td: GenerateMetricsEmpty(), + otlp: generateMetricsOtlpEmpty(), + }, + { + name: "one-empty-resource-metrics", + td: GenerateMetricsOneEmptyResourceMetrics(), + otlp: generateMetricsOtlpOneEmptyResourceMetrics(), + }, + { + name: "no-libraries", + td: GenerateMetricsNoLibraries(), + otlp: generateMetricsOtlpNoLibraries(), + }, + { + name: "one-empty-instrumentation-library", + td: GenerateMetricsOneEmptyInstrumentationLibrary(), + otlp: generateMetricsOtlpOneEmptyInstrumentationLibrary(), + }, + { + name: "one-metric-no-resource", + td: GenerateMetricsOneMetricNoResource(), + otlp: generateMetricsOtlpOneMetricNoResource(), + }, + { + name: "one-metric", + td: GenerateMetricsOneMetric(), + otlp: generateMetricsOtlpOneMetric(), + }, + { + name: "two-metrics", + td: GenerateMetricsTwoMetrics(), + otlp: GenerateMetricsOtlpTwoMetrics(), + }, + { + name: "one-metric-no-labels", + td: GenerateMetricsOneMetricNoLabels(), + otlp: generateMetricsOtlpOneMetricNoLabels(), + }, + { + name: "all-types-no-data-points", + td: GenerateMetricsAllTypesNoDataPoints(), + otlp: generateMetricsOtlpAllTypesNoDataPoints(), + }, + { + name: "all-metric-types", + td: GeneratMetricsAllTypesWithSampleDatapoints(), + otlp: generateMetricsOtlpAllTypesWithSampleDatapoints(), + }, + } +} + +func TestToFromOtlpMetrics(t *testing.T) { + allTestCases := generateAllMetricsTestCases() + // Ensure NumMetricTests gets updated. + for i := range allTestCases { + test := allTestCases[i] + t.Run(test.name, func(t *testing.T) { + td := pdata.MetricsFromOtlp(test.otlp) + assert.EqualValues(t, test.td, td) + otlp := pdata.MetricsToOtlp(td) + assert.EqualValues(t, test.otlp, otlp) + }) + } +} + +func TestGenerateMetricsManyMetricsSameResource(t *testing.T) { + md := GenerateMetricsManyMetricsSameResource(100) + assert.EqualValues(t, 1, md.ResourceMetrics().Len()) + assert.EqualValues(t, 100, md.MetricCount()) +} diff --git a/internal/otel_collector/internal/testdata/resource.go b/internal/otel_collector/internal/testdata/resource.go new file mode 100644 index 00000000000..f4870274180 --- /dev/null +++ b/internal/otel_collector/internal/testdata/resource.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func initResource1(r pdata.Resource) { + initResourceAttributes1(r.Attributes()) +} + +func generateOtlpResource1() otlpresource.Resource { + return otlpresource.Resource{ + Attributes: generateOtlpResourceAttributes1(), + } +} + +func initResource2(r pdata.Resource) { + initResourceAttributes2(r.Attributes()) +} + +func generateOtlpResource2() otlpresource.Resource { + return otlpresource.Resource{ + Attributes: generateOtlpResourceAttributes2(), + } +} diff --git a/internal/otel_collector/internal/testdata/trace.go b/internal/otel_collector/internal/testdata/trace.go new file mode 100644 index 00000000000..94a73f637e6 --- /dev/null +++ b/internal/otel_collector/internal/testdata/trace.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "time" + + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +var ( + TestSpanStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) + TestSpanStartTimestamp = pdata.TimestampUnixNano(TestSpanStartTime.UnixNano()) + + TestSpanEventTime = time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC) + TestSpanEventTimestamp = pdata.TimestampUnixNano(TestSpanEventTime.UnixNano()) + + TestSpanEndTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) + TestSpanEndTimestamp = pdata.TimestampUnixNano(TestSpanEndTime.UnixNano()) +) + +func GenerateTraceDataEmpty() pdata.Traces { + td := pdata.NewTraces() + return td +} + +func generateTraceOtlpEmpty() []*otlptrace.ResourceSpans { + return []*otlptrace.ResourceSpans(nil) +} + +func GenerateTraceDataOneEmptyResourceSpans() pdata.Traces { + td := GenerateTraceDataEmpty() + td.ResourceSpans().Resize(1) + return td +} + +func generateTraceOtlpOneEmptyResourceSpans() []*otlptrace.ResourceSpans { + return []*otlptrace.ResourceSpans{ + {}, + } +} + +func GenerateTraceDataNoLibraries() pdata.Traces { + td := GenerateTraceDataOneEmptyResourceSpans() + rs0 := td.ResourceSpans().At(0) + initResource1(rs0.Resource()) + return td +} + +func generateTraceOtlpNoLibraries() []*otlptrace.ResourceSpans { + return []*otlptrace.ResourceSpans{ + { + Resource: generateOtlpResource1(), + }, + } +} + +func GenerateTraceDataOneEmptyInstrumentationLibrary() pdata.Traces { + td := GenerateTraceDataNoLibraries() + rs0 := td.ResourceSpans().At(0) + rs0.InstrumentationLibrarySpans().Resize(1) + return td +} + +func generateTraceOtlpOneEmptyInstrumentationLibrary() []*otlptrace.ResourceSpans { + return []*otlptrace.ResourceSpans{ + { + Resource: generateOtlpResource1(), + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + {}, + }, + }, + } +} + +func GenerateTraceDataOneSpanNoResource() pdata.Traces { + td := GenerateTraceDataOneEmptyResourceSpans() + rs0 := td.ResourceSpans().At(0) + rs0.InstrumentationLibrarySpans().Resize(1) + rs0ils0 := rs0.InstrumentationLibrarySpans().At(0) + rs0ils0.Spans().Resize(1) + fillSpanOne(rs0ils0.Spans().At(0)) + return td +} + +func generateTraceOtlpOneSpanNoResource() []*otlptrace.ResourceSpans { + return []*otlptrace.ResourceSpans{ + { + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + generateOtlpSpanOne(), + }, + }, + }, + }, + } +} + +func GenerateTraceDataOneSpan() pdata.Traces { + td := GenerateTraceDataOneEmptyInstrumentationLibrary() + rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) + rs0ils0.Spans().Resize(1) + fillSpanOne(rs0ils0.Spans().At(0)) + return td +} + +func generateTraceOtlpOneSpan() []*otlptrace.ResourceSpans { + return []*otlptrace.ResourceSpans{ + { + Resource: generateOtlpResource1(), + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + generateOtlpSpanOne(), + }, + }, + }, + }, + } +} + +func GenerateTraceDataTwoSpansSameResource() pdata.Traces { + td := GenerateTraceDataOneEmptyInstrumentationLibrary() + rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) + rs0ils0.Spans().Resize(2) + fillSpanOne(rs0ils0.Spans().At(0)) + fillSpanTwo(rs0ils0.Spans().At(1)) + return td +} + +// GenerateTraceOtlpSameResourceTwoSpans returns the OTLP representation of the GenerateTraceOtlpSameResourceTwoSpans. +func GenerateTraceOtlpSameResourceTwoSpans() []*otlptrace.ResourceSpans { + return []*otlptrace.ResourceSpans{ + { + Resource: generateOtlpResource1(), + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + generateOtlpSpanOne(), + generateOtlpSpanTwo(), + }, + }, + }, + }, + } +} + +func GenerateTraceDataTwoSpansSameResourceOneDifferent() pdata.Traces { + td := pdata.NewTraces() + td.ResourceSpans().Resize(2) + rs0 := td.ResourceSpans().At(0) + initResource1(rs0.Resource()) + rs0.InstrumentationLibrarySpans().Resize(1) + rs0ils0 := rs0.InstrumentationLibrarySpans().At(0) + rs0ils0.Spans().Resize(2) + fillSpanOne(rs0ils0.Spans().At(0)) + fillSpanTwo(rs0ils0.Spans().At(1)) + rs1 := td.ResourceSpans().At(1) + initResource2(rs1.Resource()) + rs1.InstrumentationLibrarySpans().Resize(1) + rs1ils0 := rs1.InstrumentationLibrarySpans().At(0) + rs1ils0.Spans().Resize(1) + fillSpanThree(rs1ils0.Spans().At(0)) + return td +} + +func GenerateTraceDataManySpansSameResource(spansCount int) pdata.Traces { + td := GenerateTraceDataOneEmptyInstrumentationLibrary() + rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) + rs0ils0.Spans().Resize(spansCount) + for i := 0; i < spansCount; i++ { + fillSpanOne(rs0ils0.Spans().At(i)) + } + return td +} + +func generateTraceOtlpTwoSpansSameResourceOneDifferent() []*otlptrace.ResourceSpans { + return []*otlptrace.ResourceSpans{ + { + Resource: generateOtlpResource1(), + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + generateOtlpSpanOne(), + generateOtlpSpanTwo(), + }, + }, + }, + }, + { + Resource: generateOtlpResource2(), + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + generateOtlpSpanThree(), + }, + }, + }, + }, + } +} + +func fillSpanOne(span pdata.Span) { + span.SetName("operationA") + span.SetStartTime(TestSpanStartTimestamp) + span.SetEndTime(TestSpanEndTimestamp) + span.SetDroppedAttributesCount(1) + evs := span.Events() + evs.Resize(2) + ev0 := evs.At(0) + ev0.SetTimestamp(TestSpanEventTimestamp) + ev0.SetName("event-with-attr") + initSpanEventAttributes(ev0.Attributes()) + ev0.SetDroppedAttributesCount(2) + ev1 := evs.At(1) + ev1.SetTimestamp(TestSpanEventTimestamp) + ev1.SetName("event") + ev1.SetDroppedAttributesCount(2) + span.SetDroppedEventsCount(1) + status := span.Status() + status.SetCode(pdata.StatusCodeError) + status.SetMessage("status-cancelled") +} + +func generateOtlpSpanOne() *otlptrace.Span { + return &otlptrace.Span{ + Name: "operationA", + StartTimeUnixNano: uint64(TestSpanStartTimestamp), + EndTimeUnixNano: uint64(TestSpanEndTimestamp), + DroppedAttributesCount: 1, + Events: []*otlptrace.Span_Event{ + { + Name: "event-with-attr", + TimeUnixNano: uint64(TestSpanEventTimestamp), + Attributes: generateOtlpSpanEventAttributes(), + DroppedAttributesCount: 2, + }, + { + Name: "event", + TimeUnixNano: uint64(TestSpanEventTimestamp), + DroppedAttributesCount: 2, + }, + }, + DroppedEventsCount: 1, + Status: otlptrace.Status{ + Code: otlptrace.Status_STATUS_CODE_ERROR, + DeprecatedCode: otlptrace.Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR, + Message: "status-cancelled", + }, + } +} + +func fillSpanTwo(span pdata.Span) { + span.SetName("operationB") + span.SetStartTime(TestSpanStartTimestamp) + span.SetEndTime(TestSpanEndTimestamp) + span.Links().Resize(2) + initSpanLinkAttributes(span.Links().At(0).Attributes()) + span.Links().At(0).SetDroppedAttributesCount(4) + span.Links().At(1).SetDroppedAttributesCount(4) + span.SetDroppedLinksCount(3) +} + +func generateOtlpSpanTwo() *otlptrace.Span { + return &otlptrace.Span{ + Name: "operationB", + StartTimeUnixNano: uint64(TestSpanStartTimestamp), + EndTimeUnixNano: uint64(TestSpanEndTimestamp), + Links: []*otlptrace.Span_Link{ + { + Attributes: generateOtlpSpanLinkAttributes(), + DroppedAttributesCount: 4, + }, + { + DroppedAttributesCount: 4, + }, + }, + DroppedLinksCount: 3, + } +} + +func fillSpanThree(span pdata.Span) { + span.SetName("operationC") + span.SetStartTime(TestSpanStartTimestamp) + span.SetEndTime(TestSpanEndTimestamp) + initSpanAttributes(span.Attributes()) + span.SetDroppedAttributesCount(5) +} + +func generateOtlpSpanThree() *otlptrace.Span { + return &otlptrace.Span{ + Name: "operationC", + StartTimeUnixNano: uint64(TestSpanStartTimestamp), + EndTimeUnixNano: uint64(TestSpanEndTimestamp), + Attributes: generateOtlpSpanAttributes(), + DroppedAttributesCount: 5, + } +} diff --git a/internal/otel_collector/internal/testdata/trace_test.go b/internal/otel_collector/internal/testdata/trace_test.go new file mode 100644 index 00000000000..aebddf6d0dd --- /dev/null +++ b/internal/otel_collector/internal/testdata/trace_test.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +type traceTestCase struct { + name string + td pdata.Traces + otlp []*otlptrace.ResourceSpans +} + +func generateAllTraceTestCases() []traceTestCase { + return []traceTestCase{ + { + name: "empty", + td: GenerateTraceDataEmpty(), + otlp: generateTraceOtlpEmpty(), + }, + { + name: "one-empty-resource-spans", + td: GenerateTraceDataOneEmptyResourceSpans(), + otlp: generateTraceOtlpOneEmptyResourceSpans(), + }, + { + name: "no-libraries", + td: GenerateTraceDataNoLibraries(), + otlp: generateTraceOtlpNoLibraries(), + }, + { + name: "one-empty-instrumentation-library", + td: GenerateTraceDataOneEmptyInstrumentationLibrary(), + otlp: generateTraceOtlpOneEmptyInstrumentationLibrary(), + }, + { + name: "one-span-no-resource", + td: GenerateTraceDataOneSpanNoResource(), + otlp: generateTraceOtlpOneSpanNoResource(), + }, + { + name: "one-span", + td: GenerateTraceDataOneSpan(), + otlp: generateTraceOtlpOneSpan(), + }, + { + name: "two-spans-same-resource", + td: GenerateTraceDataTwoSpansSameResource(), + otlp: GenerateTraceOtlpSameResourceTwoSpans(), + }, + { + name: "two-spans-same-resource-one-different", + td: GenerateTraceDataTwoSpansSameResourceOneDifferent(), + otlp: generateTraceOtlpTwoSpansSameResourceOneDifferent(), + }, + } +} + +func TestToFromOtlpTrace(t *testing.T) { + allTestCases := generateAllTraceTestCases() + // Ensure NumTraceTests gets updated. + for i := range allTestCases { + test := allTestCases[i] + t.Run(test.name, func(t *testing.T) { + td := pdata.TracesFromOtlp(test.otlp) + assert.EqualValues(t, test.td, td) + otlp := pdata.TracesToOtlp(td) + assert.EqualValues(t, test.otlp, otlp) + }) + } +} diff --git a/internal/otel_collector/internal/version/version.go b/internal/otel_collector/internal/version/version.go new file mode 100644 index 00000000000..9e797c896c0 --- /dev/null +++ b/internal/otel_collector/internal/version/version.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "bytes" + "fmt" + "runtime" +) + +const ( + buildDev = "dev" + buildRelease = "release" +) + +// Version variable will be replaced at link time after `make` has been run. +var Version = "latest" + +// GitHash variable will be replaced at link time after `make` has been run. +var GitHash = "" + +// BuildType should be one of (dev, release). +var BuildType = buildDev + +// IsDevBuild returns true if this is a development (local) build. +func IsDevBuild() bool { + return BuildType == buildDev +} + +// IsReleaseBuild returns true if this is a release build. +func IsReleaseBuild() bool { + return BuildType == buildRelease +} + +// InfoVar is a singleton instance of the Info struct. +var InfoVar = Info([][2]string{ + {"Version", Version}, + {"GitHash", GitHash}, + {"BuildType", BuildType}, + {"Goversion", runtime.Version()}, + {"OS", runtime.GOOS}, + {"Architecture", runtime.GOARCH}, + // Add other valuable build-time information here. +}) + +// Info has properties about the build and runtime. +type Info [][2]string + +// String returns a formatted string, with linebreaks, intended to be displayed +// on stdout. +func (i Info) String() string { + buf := new(bytes.Buffer) + maxRow1Alignment := 0 + for _, prop := range i { + if cl0 := len(prop[0]); cl0 > maxRow1Alignment { + maxRow1Alignment = cl0 + } + } + + for _, prop := range i { + // Then finally print them with left alignment + fmt.Fprintf(buf, "%*s %s\n", -maxRow1Alignment, prop[0], prop[1]) + } + return buf.String() +} diff --git a/internal/otel_collector/internal/version/version_test.go b/internal/otel_collector/internal/version/version_test.go new file mode 100644 index 00000000000..0cf8886ec9b --- /dev/null +++ b/internal/otel_collector/internal/version/version_test.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestInfoString(t *testing.T) { + infoString := InfoVar.String() + for _, el := range InfoVar { + assert.True(t, strings.Contains(infoString, el[0])) + assert.True(t, strings.Contains(infoString, el[1])) + } +} diff --git a/internal/otel_collector/obsreport/doc.go b/internal/otel_collector/obsreport/doc.go new file mode 100644 index 00000000000..e8124eedcdf --- /dev/null +++ b/internal/otel_collector/obsreport/doc.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package obsreport provides unified and consistent observability signals ( +// metrics, tracing, etc) for components of the OpenTelemetry collector. +// +// The function Configure is used to control which signals are going to be +// generated. It provides functions for the typical operations of receivers, +// processors, and exporters. +// +// Receivers should use the respective start and end according to the data type +// being received, ie.: +// +// * TraceData receive operations should use the pair: +// StartTraceDataReceiveOp/EndTraceDataReceiveOp +// +// * Metrics receive operations should use the pair: +// StartMetricsReceiveOp/EndMetricsReceiveOp +// +// Similar for exporters: +// +// * TraceData export operations should use the pair: +// StartTraceDataExportOp/EndTraceDataExportOp +// +// * Metrics export operations should use the pair: +// StartMetricsExportOp/EndMetricsExportOp +// +// The package is capable of generating legacy metrics by using the +// observability package allowing a controlled transition from legacy to the +// new metrics. The goal is to eventually remove the legacy metrics and use only +// the new metrics. +// +// The main differences regarding the legacy metrics are: +// +// 1. "Amount of metric data" is measured as metric points (ie.: a single value +// in time), contrast it with number of time series used legacy. Number of +// metric data points is a more general concept regarding various metric +// formats. +// +// 2. Exporters measure the number of items, ie.: number of spans or metric +// points, that were sent and the ones for which the attempt to send failed. +// For more information about this see Notes below about reporting data loss. +// +// 3. All measurements of "amount of data" used in the new metrics for receivers +// and exporters should reflect their native formats, not the internal format +// used in the Collector. This is to facilitate reconciliation between Collector, +// client and backend. For instance: certain metric formats do not provide +// direct support for histograms and have predefined conventions to represent +// those, this conversion may end with a different number of time series and +// data points than the internal Collector format. +// +// Notes: +// +// * Data loss should be recorded only when the component itself remove the data +// from the pipeline. Legacy metrics for receivers used "dropped" in their names +// but these could be non-zero under normal operations and reflected no actual +// data loss when components like the "queued_retry" are used. New metrics +// were renamed to avoid this misunderstanding. Here are the general +// recommendations to report data loss: +// +// * Receivers reporting errors to clients typically result in the client +// re-sending the same data so it is more correct to report "receive errors", +// not actual data loss. +// +// * Exporters need to report individual failures to send data, but on +// typical production pipelines processors usually take care of retries, +// so these should be reported as "send errors". +// +// * Data "filtered out" should have its own metrics and not be confused +// with dropped data. +// +// Naming Convention for New Metrics +// +// Common Metrics: +// Metrics shared by different components should follow the convention below: +// +// `/` +// +// As a label the metric should have at least `{=""}` where +// `` is the name used in the configuration for the instance of the +// component, eg.: +// +// `receiver/accepted_spans{receiver="opencensus",...}` +// `exporter/sent_spans{exporter="jaeger/prod",...}` +// +// Component Specific Metrics: +// These metrics are implemented by specific components, eg.: batch processor. +// The general pattern is the same as the common metrics but with the addition +// of the component type (as it appears in the configuration) before the actual +// metric: +// +// `//` +// +// Even metrics exclusive to a single type should follow the conventions above +// and also include the type (as written in the configuration) as part of the +// metric name since there could be multiple instances of the same type in +// different pipelines, eg.: +// +// `processor/batch/batch_size_trigger_send{processor="batch/dev",...}` +// +package obsreport diff --git a/internal/otel_collector/obsreport/observability.go b/internal/otel_collector/obsreport/observability.go new file mode 100644 index 00000000000..8f04bce2c5d --- /dev/null +++ b/internal/otel_collector/obsreport/observability.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +// This file contains helpers that are useful to add observability +// with metrics and tracing using OpenCensus to the various pieces +// of the service. + +import ( + "go.opencensus.io/plugin/ocgrpc" + "google.golang.org/grpc" +) + +// GRPCServerWithObservabilityEnabled creates a gRPC server that at a bare minimum has +// the OpenCensus ocgrpc server stats handler enabled for tracing and stats. +// Use it instead of invoking grpc.NewServer directly. +func GRPCServerWithObservabilityEnabled(extraOpts ...grpc.ServerOption) *grpc.Server { + opts := append(extraOpts, grpc.StatsHandler(&ocgrpc.ServerHandler{})) + return grpc.NewServer(opts...) +} diff --git a/internal/otel_collector/obsreport/obsreport.go b/internal/otel_collector/obsreport/obsreport.go new file mode 100644 index 00000000000..552c11e34a5 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport.go @@ -0,0 +1,163 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + "strings" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/config/configtelemetry" +) + +const ( + nameSep = "/" +) + +var ( + gLevel = configtelemetry.LevelBasic + + okStatus = trace.Status{Code: trace.StatusCodeOK} +) + +// setParentLink tries to retrieve a span from parentCtx and if one exists +// sets its SpanID, TraceID as a link to the given child Span. +// It returns true only if it retrieved a parent span from the context. +// +// This is typically used when the parentCtx may already have a trace and is +// long lived (eg.: an gRPC stream, or TCP connection) and one desires distinct +// traces for individual operations under the long lived trace associated to +// the parentCtx. This function is a helper that encapsulates the work of +// linking the short lived trace/span to the longer one. +func setParentLink(parentCtx context.Context, childSpan *trace.Span) bool { + parentSpanFromRPC := trace.FromContext(parentCtx) + if parentSpanFromRPC == nil { + return false + } + + psc := parentSpanFromRPC.SpanContext() + childSpan.AddLink(trace.Link{ + SpanID: psc.SpanID, + TraceID: psc.TraceID, + Type: trace.LinkTypeParent, + }) + return true +} + +// Configure is used to control the settings that will be used by the obsreport +// package. +func Configure(level configtelemetry.Level) (views []*view.View) { + gLevel = level + + if gLevel != configtelemetry.LevelNone { + gProcessorObsReport.level = level + views = append(views, AllViews()...) + } + + return views +} + +func buildComponentPrefix(componentPrefix, configType string) string { + if !strings.HasSuffix(componentPrefix, nameSep) { + componentPrefix += nameSep + } + if configType == "" { + return componentPrefix + } + return componentPrefix + configType + nameSep +} + +// AllViews return the list of all views that needs to be configured. +func AllViews() (views []*view.View) { + // Receiver views. + measures := []*stats.Int64Measure{ + mReceiverAcceptedSpans, + mReceiverRefusedSpans, + mReceiverAcceptedMetricPoints, + mReceiverRefusedMetricPoints, + mReceiverAcceptedLogRecords, + mReceiverRefusedLogRecords, + } + tagKeys := []tag.Key{ + tagKeyReceiver, tagKeyTransport, + } + views = append(views, genViews(measures, tagKeys, view.Sum())...) + + // Scraper views. + measures = []*stats.Int64Measure{ + mScraperScrapedMetricPoints, + mScraperErroredMetricPoints, + } + tagKeys = []tag.Key{tagKeyReceiver, tagKeyScraper} + views = append(views, genViews(measures, tagKeys, view.Sum())...) + + // Exporter views. + measures = []*stats.Int64Measure{ + mExporterSentSpans, + mExporterFailedToSendSpans, + mExporterSentMetricPoints, + mExporterFailedToSendMetricPoints, + mExporterSentLogRecords, + mExporterFailedToSendLogRecords, + } + tagKeys = []tag.Key{tagKeyExporter} + views = append(views, genViews(measures, tagKeys, view.Sum())...) + + // Processor views. + measures = []*stats.Int64Measure{ + mProcessorAcceptedSpans, + mProcessorRefusedSpans, + mProcessorDroppedSpans, + mProcessorAcceptedMetricPoints, + mProcessorRefusedMetricPoints, + mProcessorDroppedMetricPoints, + mProcessorAcceptedLogRecords, + mProcessorRefusedLogRecords, + mProcessorDroppedLogRecords, + } + tagKeys = []tag.Key{tagKeyProcessor} + views = append(views, genViews(measures, tagKeys, view.Sum())...) + + return views +} + +func genViews( + measures []*stats.Int64Measure, + tagKeys []tag.Key, + aggregation *view.Aggregation, +) []*view.View { + views := make([]*view.View, 0, len(measures)) + for _, measure := range measures { + views = append(views, &view.View{ + Name: measure.Name(), + Description: measure.Description(), + TagKeys: tagKeys, + Measure: measure, + Aggregation: aggregation, + }) + } + return views +} + +func errToStatus(err error) trace.Status { + if err != nil { + return trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()} + } + return okStatus +} diff --git a/internal/otel_collector/obsreport/obsreport_exporter.go b/internal/otel_collector/obsreport/obsreport_exporter.go new file mode 100644 index 00000000000..5059b5114b3 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_exporter.go @@ -0,0 +1,190 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/config/configtelemetry" +) + +const ( + // Key used to identify exporters in metrics and traces. + ExporterKey = "exporter" + + // Key used to track spans sent by exporters. + SentSpansKey = "sent_spans" + // Key used to track spans that failed to be sent by exporters. + FailedToSendSpansKey = "send_failed_spans" + + // Key used to track metric points sent by exporters. + SentMetricPointsKey = "sent_metric_points" + // Key used to track metric points that failed to be sent by exporters. + FailedToSendMetricPointsKey = "send_failed_metric_points" + + // Key used to track logs sent by exporters. + SentLogRecordsKey = "sent_log_records" + // Key used to track logs that failed to be sent by exporters. + FailedToSendLogRecordsKey = "send_failed_log_records" +) + +var ( + tagKeyExporter, _ = tag.NewKey(ExporterKey) + + exporterPrefix = ExporterKey + nameSep + exportTraceDataOperationSuffix = nameSep + "traces" + exportMetricsOperationSuffix = nameSep + "metrics" + exportLogsOperationSuffix = nameSep + "logs" + + // Exporter metrics. Any count of data items below is in the final format + // that they were sent, reasoning: reconciliation is easier if measurements + // on backend and exporter are expected to be the same. Translation issues + // that result in a different number of elements should be reported in a + // separate way. + mExporterSentSpans = stats.Int64( + exporterPrefix+SentSpansKey, + "Number of spans successfully sent to destination.", + stats.UnitDimensionless) + mExporterFailedToSendSpans = stats.Int64( + exporterPrefix+FailedToSendSpansKey, + "Number of spans in failed attempts to send to destination.", + stats.UnitDimensionless) + mExporterSentMetricPoints = stats.Int64( + exporterPrefix+SentMetricPointsKey, + "Number of metric points successfully sent to destination.", + stats.UnitDimensionless) + mExporterFailedToSendMetricPoints = stats.Int64( + exporterPrefix+FailedToSendMetricPointsKey, + "Number of metric points in failed attempts to send to destination.", + stats.UnitDimensionless) + mExporterSentLogRecords = stats.Int64( + exporterPrefix+SentLogRecordsKey, + "Number of log record successfully sent to destination.", + stats.UnitDimensionless) + mExporterFailedToSendLogRecords = stats.Int64( + exporterPrefix+FailedToSendLogRecordsKey, + "Number of log records in failed attempts to send to destination.", + stats.UnitDimensionless) +) + +// ExporterContext adds the keys used when recording observability metrics to +// the given context returning the newly created context. This context should +// be used in related calls to the obsreport functions so metrics are properly +// recorded. +func ExporterContext(ctx context.Context, exporterName string) context.Context { + ctx, _ = tag.New(ctx, tag.Upsert(tagKeyExporter, exporterName, tag.WithTTL(tag.TTLNoPropagation))) + return ctx +} + +type ExporterObsReport struct { + level configtelemetry.Level + exporterName string + mutators []tag.Mutator +} + +func NewExporterObsReport(level configtelemetry.Level, exporterName string) *ExporterObsReport { + return &ExporterObsReport{ + level: level, + exporterName: exporterName, + mutators: []tag.Mutator{tag.Upsert(tagKeyProcessor, exporterName, tag.WithTTL(tag.TTLNoPropagation))}, + } +} + +// StartTracesExportOp is called at the start of an Export operation. +// The returned context should be used in other calls to the ExporterObsReport functions +// dealing with the same export operation. +func (eor *ExporterObsReport) StartTracesExportOp(ctx context.Context) context.Context { + return eor.startSpan(ctx, exportTraceDataOperationSuffix) +} + +// EndTracesExportOp completes the export operation that was started with StartTracesExportOp. +func (eor *ExporterObsReport) EndTracesExportOp(ctx context.Context, numSpans int, err error) { + numSent, numFailedToSend := toNumItems(numSpans, err) + recordMetrics(ctx, numSent, numFailedToSend, mExporterSentSpans, mExporterFailedToSendSpans) + endSpan(ctx, err, numSent, numFailedToSend, SentSpansKey, FailedToSendSpansKey) +} + +// StartMetricsExportOp is called at the start of an Export operation. +// The returned context should be used in other calls to the ExporterObsReport functions +// dealing with the same export operation. +func (eor *ExporterObsReport) StartMetricsExportOp(ctx context.Context) context.Context { + return eor.startSpan(ctx, exportMetricsOperationSuffix) +} + +// EndMetricsExportOp completes the export operation that was started with +// StartMetricsExportOp. +func (eor *ExporterObsReport) EndMetricsExportOp(ctx context.Context, numMetricPoints int, err error) { + numSent, numFailedToSend := toNumItems(numMetricPoints, err) + recordMetrics(ctx, numSent, numFailedToSend, mExporterSentMetricPoints, mExporterFailedToSendMetricPoints) + endSpan(ctx, err, numSent, numFailedToSend, SentMetricPointsKey, FailedToSendMetricPointsKey) +} + +// StartLogsExportOp is called at the start of an Export operation. +// The returned context should be used in other calls to the ExporterObsReport functions +// dealing with the same export operation. +func (eor *ExporterObsReport) StartLogsExportOp(ctx context.Context) context.Context { + return eor.startSpan(ctx, exportLogsOperationSuffix) +} + +// EndLogsExportOp completes the export operation that was started with StartLogsExportOp. +func (eor *ExporterObsReport) EndLogsExportOp(ctx context.Context, numLogRecords int, err error) { + numSent, numFailedToSend := toNumItems(numLogRecords, err) + recordMetrics(ctx, numSent, numFailedToSend, mExporterSentLogRecords, mExporterFailedToSendLogRecords) + endSpan(ctx, err, numSent, numFailedToSend, SentLogRecordsKey, FailedToSendLogRecordsKey) +} + +// startSpan creates the span used to trace the operation. Returning +// the updated context and the created span. +func (eor *ExporterObsReport) startSpan(ctx context.Context, operationSuffix string) context.Context { + spanName := exporterPrefix + eor.exporterName + operationSuffix + ctx, _ = trace.StartSpan(ctx, spanName) + return ctx +} + +func recordMetrics(ctx context.Context, numSent, numFailedToSend int64, sentMeasure, failedToSendMeasure *stats.Int64Measure) { + if gLevel == configtelemetry.LevelNone { + return + } + stats.Record( + ctx, + sentMeasure.M(numSent), + failedToSendMeasure.M(numFailedToSend)) +} + +func endSpan(ctx context.Context, err error, numSent, numFailedToSend int64, sentItemsKey, failedToSendItemsKey string) { + span := trace.FromContext(ctx) + // End span according to errors. + if span.IsRecordingEvents() { + span.AddAttributes( + trace.Int64Attribute( + sentItemsKey, numSent), + trace.Int64Attribute( + failedToSendItemsKey, numFailedToSend), + ) + span.SetStatus(errToStatus(err)) + } + span.End() +} + +func toNumItems(numExportedItems int, err error) (int64, int64) { + if err != nil { + return 0, int64(numExportedItems) + } + return int64(numExportedItems), 0 +} diff --git a/internal/otel_collector/obsreport/obsreport_processor.go b/internal/otel_collector/obsreport/obsreport_processor.go new file mode 100644 index 00000000000..2725e83d72d --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_processor.go @@ -0,0 +1,246 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/config/configtelemetry" +) + +const ( + // Key used to identify processors in metrics and traces. + ProcessorKey = "processor" + + // Key used to identify spans dropped by the Collector. + DroppedSpansKey = "dropped_spans" + + // Key used to identify metric points dropped by the Collector. + DroppedMetricPointsKey = "dropped_metric_points" + + // Key used to identify log records dropped by the Collector. + DroppedLogRecordsKey = "dropped_log_records" +) + +var ( + tagKeyProcessor, _ = tag.NewKey(ProcessorKey) + + processorPrefix = ProcessorKey + nameSep + + // Processor metrics. Any count of data items below is in the internal format + // of the collector since processors only deal with internal format. + mProcessorAcceptedSpans = stats.Int64( + processorPrefix+AcceptedSpansKey, + "Number of spans successfully pushed into the next component in the pipeline.", + stats.UnitDimensionless) + mProcessorRefusedSpans = stats.Int64( + processorPrefix+RefusedSpansKey, + "Number of spans that were rejected by the next component in the pipeline.", + stats.UnitDimensionless) + mProcessorDroppedSpans = stats.Int64( + processorPrefix+DroppedSpansKey, + "Number of spans that were dropped.", + stats.UnitDimensionless) + mProcessorAcceptedMetricPoints = stats.Int64( + processorPrefix+AcceptedMetricPointsKey, + "Number of metric points successfully pushed into the next component in the pipeline.", + stats.UnitDimensionless) + mProcessorRefusedMetricPoints = stats.Int64( + processorPrefix+RefusedMetricPointsKey, + "Number of metric points that were rejected by the next component in the pipeline.", + stats.UnitDimensionless) + mProcessorDroppedMetricPoints = stats.Int64( + processorPrefix+DroppedMetricPointsKey, + "Number of metric points that were dropped.", + stats.UnitDimensionless) + mProcessorAcceptedLogRecords = stats.Int64( + processorPrefix+AcceptedLogRecordsKey, + "Number of log records successfully pushed into the next component in the pipeline.", + stats.UnitDimensionless) + mProcessorRefusedLogRecords = stats.Int64( + processorPrefix+RefusedLogRecordsKey, + "Number of log records that were rejected by the next component in the pipeline.", + stats.UnitDimensionless) + mProcessorDroppedLogRecords = stats.Int64( + processorPrefix+DroppedLogRecordsKey, + "Number of log records that were dropped.", + stats.UnitDimensionless) +) + +// BuildProcessorCustomMetricName is used to be build a metric name following +// the standards used in the Collector. The configType should be the same +// value used to identify the type on the config. +func BuildProcessorCustomMetricName(configType, metric string) string { + return buildComponentPrefix(processorPrefix, configType) + metric +} + +// ProcessorMetricViews builds the metric views for custom metrics of processors. +func ProcessorMetricViews(configType string, legacyViews []*view.View) []*view.View { + var allViews []*view.View + if gLevel != configtelemetry.LevelNone { + for _, legacyView := range legacyViews { + // Ignore any nil entry and views without measure or aggregation. + // These can't be registered but some code registering legacy views may + // ignore the errors. + if legacyView == nil || legacyView.Measure == nil || legacyView.Aggregation == nil { + continue + } + newView := *legacyView + viewName := legacyView.Name + if viewName == "" { + viewName = legacyView.Measure.Name() + } + newView.Name = BuildProcessorCustomMetricName(configType, viewName) + allViews = append(allViews, &newView) + } + } + + return allViews +} + +var gProcessorObsReport = &ProcessorObsReport{level: configtelemetry.LevelNone} + +type ProcessorObsReport struct { + level configtelemetry.Level + mutators []tag.Mutator +} + +func NewProcessorObsReport(level configtelemetry.Level, processorName string) *ProcessorObsReport { + return &ProcessorObsReport{ + level: level, + mutators: []tag.Mutator{tag.Upsert(tagKeyProcessor, processorName, tag.WithTTL(tag.TTLNoPropagation))}, + } +} + +// TracesAccepted reports that the trace data was accepted. +func (por *ProcessorObsReport) TracesAccepted(ctx context.Context, numSpans int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedSpans.M(int64(numSpans)), + mProcessorRefusedSpans.M(0), + mProcessorDroppedSpans.M(0), + ) + } +} + +// TracesRefused reports that the trace data was refused. +func (por *ProcessorObsReport) TracesRefused(ctx context.Context, numSpans int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedSpans.M(0), + mProcessorRefusedSpans.M(int64(numSpans)), + mProcessorDroppedSpans.M(0), + ) + } +} + +// TracesDropped reports that the trace data was dropped. +func (por *ProcessorObsReport) TracesDropped(ctx context.Context, numSpans int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedSpans.M(0), + mProcessorRefusedSpans.M(0), + mProcessorDroppedSpans.M(int64(numSpans)), + ) + } +} + +// MetricsAccepted reports that the metrics were accepted. +func (por *ProcessorObsReport) MetricsAccepted(ctx context.Context, numPoints int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedMetricPoints.M(int64(numPoints)), + mProcessorRefusedMetricPoints.M(0), + mProcessorDroppedMetricPoints.M(0), + ) + } +} + +// MetricsRefused reports that the metrics were refused. +func (por *ProcessorObsReport) MetricsRefused(ctx context.Context, numPoints int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedMetricPoints.M(0), + mProcessorRefusedMetricPoints.M(int64(numPoints)), + mProcessorDroppedMetricPoints.M(0), + ) + } +} + +// MetricsDropped reports that the metrics were dropped. +func (por *ProcessorObsReport) MetricsDropped(ctx context.Context, numPoints int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedMetricPoints.M(0), + mProcessorRefusedMetricPoints.M(0), + mProcessorDroppedMetricPoints.M(int64(numPoints)), + ) + } +} + +// LogsAccepted reports that the logs were accepted. +func (por *ProcessorObsReport) LogsAccepted(ctx context.Context, numRecords int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedLogRecords.M(int64(numRecords)), + mProcessorRefusedLogRecords.M(0), + mProcessorDroppedLogRecords.M(0), + ) + } +} + +// LogsRefused reports that the logs were refused. +func (por *ProcessorObsReport) LogsRefused(ctx context.Context, numRecords int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedLogRecords.M(0), + mProcessorRefusedLogRecords.M(int64(numRecords)), + mProcessorDroppedMetricPoints.M(0), + ) + } +} + +// LogsDropped reports that the logs were dropped. +func (por *ProcessorObsReport) LogsDropped(ctx context.Context, numRecords int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + mProcessorAcceptedLogRecords.M(0), + mProcessorRefusedLogRecords.M(0), + mProcessorDroppedLogRecords.M(int64(numRecords)), + ) + } +} diff --git a/internal/otel_collector/obsreport/obsreport_receiver.go b/internal/otel_collector/obsreport/obsreport_receiver.go new file mode 100644 index 00000000000..33148a03372 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_receiver.go @@ -0,0 +1,364 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +const ( + // Key used to identify receivers in metrics and traces. + ReceiverKey = "receiver" + // Key used to identify the transport used to received the data. + TransportKey = "transport" + // Key used to identify the format of the data received. + FormatKey = "format" + + // Key used to identify spans accepted by the Collector. + AcceptedSpansKey = "accepted_spans" + // Key used to identify spans refused (ie.: not ingested) by the Collector. + RefusedSpansKey = "refused_spans" + + // Key used to identify metric points accepted by the Collector. + AcceptedMetricPointsKey = "accepted_metric_points" + // Key used to identify metric points refused (ie.: not ingested) by the + // Collector. + RefusedMetricPointsKey = "refused_metric_points" + + // Key used to identify log records accepted by the Collector. + AcceptedLogRecordsKey = "accepted_log_records" + // Key used to identify log records refused (ie.: not ingested) by the + // Collector. + RefusedLogRecordsKey = "refused_log_records" +) + +var ( + tagKeyReceiver, _ = tag.NewKey(ReceiverKey) + tagKeyTransport, _ = tag.NewKey(TransportKey) + + receiverPrefix = ReceiverKey + nameSep + receiveTraceDataOperationSuffix = nameSep + "TraceDataReceived" + receiverMetricsOperationSuffix = nameSep + "MetricsReceived" + receiverLogsOperationSuffix = nameSep + "LogsReceived" + + // Receiver metrics. Any count of data items below is in the original format + // that they were received, reasoning: reconciliation is easier if measurements + // on clients and receiver are expected to be the same. Translation issues + // that result in a different number of elements should be reported in a + // separate way. + mReceiverAcceptedSpans = stats.Int64( + receiverPrefix+AcceptedSpansKey, + "Number of spans successfully pushed into the pipeline.", + stats.UnitDimensionless) + mReceiverRefusedSpans = stats.Int64( + receiverPrefix+RefusedSpansKey, + "Number of spans that could not be pushed into the pipeline.", + stats.UnitDimensionless) + mReceiverAcceptedMetricPoints = stats.Int64( + receiverPrefix+AcceptedMetricPointsKey, + "Number of metric points successfully pushed into the pipeline.", + stats.UnitDimensionless) + mReceiverRefusedMetricPoints = stats.Int64( + receiverPrefix+RefusedMetricPointsKey, + "Number of metric points that could not be pushed into the pipeline.", + stats.UnitDimensionless) + mReceiverAcceptedLogRecords = stats.Int64( + receiverPrefix+AcceptedLogRecordsKey, + "Number of log records successfully pushed into the pipeline.", + stats.UnitDimensionless) + mReceiverRefusedLogRecords = stats.Int64( + receiverPrefix+RefusedLogRecordsKey, + "Number of log records that could not be pushed into the pipeline.", + stats.UnitDimensionless) +) + +// StartReceiveOptions has the options related to starting a receive operation. +type StartReceiveOptions struct { + // LongLivedCtx when true indicates that the context passed in the call + // outlives the individual receive operation. See WithLongLivedCtx() for + // more information. + LongLivedCtx bool +} + +// StartReceiveOption function applues changes to StartReceiveOptions. +type StartReceiveOption func(*StartReceiveOptions) + +// WithLongLivedCtx indicates that the context passed in the call outlives the +// receive operation at hand. Typically the long lived context is associated +// to a connection, eg.: a gRPC stream or a TCP connection, for which many +// batches of data are received in individual operations without a corresponding +// new context per operation. +// +// Example: +// +// func (r *receiver) ClientConnect(ctx context.Context, rcvChan <-chan consumerdata.TraceData) { +// longLivedCtx := obsreport.ReceiverContext(ctx, r.config.Name(), r.transport, "") +// for { +// // Since the context outlives the individual receive operations call obsreport using +// // WithLongLivedCtx(). +// ctx := obsreport.StartTraceDataReceiveOp( +// longLivedCtx, +// r.config.Name(), +// r.transport, +// obsreport.WithLongLivedCtx()) +// +// td, ok := <-rcvChan +// var err error +// if ok { +// err = r.nextConsumer.ConsumeTraceData(ctx, td) +// } +// obsreport.EndTraceDataReceiveOp( +// ctx, +// r.format, +// len(td.Spans), +// err) +// if !ok { +// break +// } +// } +// } +// +func WithLongLivedCtx() StartReceiveOption { + return func(opts *StartReceiveOptions) { + opts.LongLivedCtx = true + } +} + +// StartTraceDataReceiveOp is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. +func StartTraceDataReceiveOp( + operationCtx context.Context, + receiver string, + transport string, + opt ...StartReceiveOption, +) context.Context { + return traceReceiveOp( + operationCtx, + receiver, + transport, + receiveTraceDataOperationSuffix, + opt...) +} + +// EndTraceDataReceiveOp completes the receive operation that was started with +// StartTraceDataReceiveOp. +func EndTraceDataReceiveOp( + receiverCtx context.Context, + format string, + numReceivedSpans int, + err error, +) { + endReceiveOp( + receiverCtx, + format, + numReceivedSpans, + err, + configmodels.TracesDataType, + ) +} + +// StartLogsReceiveOp is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. +func StartLogsReceiveOp( + operationCtx context.Context, + receiver string, + transport string, + opt ...StartReceiveOption, +) context.Context { + return traceReceiveOp( + operationCtx, + receiver, + transport, + receiverLogsOperationSuffix, + opt...) +} + +// EndLogsReceiveOp completes the receive operation that was started with +// StartLogsReceiveOp. +func EndLogsReceiveOp( + receiverCtx context.Context, + format string, + numReceivedLogRecords int, + err error, +) { + endReceiveOp( + receiverCtx, + format, + numReceivedLogRecords, + err, + configmodels.LogsDataType, + ) +} + +// StartMetricsReceiveOp is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. +func StartMetricsReceiveOp( + operationCtx context.Context, + receiver string, + transport string, + opt ...StartReceiveOption, +) context.Context { + return traceReceiveOp( + operationCtx, + receiver, + transport, + receiverMetricsOperationSuffix, + opt...) +} + +// EndMetricsReceiveOp completes the receive operation that was started with +// StartMetricsReceiveOp. +func EndMetricsReceiveOp( + receiverCtx context.Context, + format string, + numReceivedPoints int, + err error, +) { + endReceiveOp( + receiverCtx, + format, + numReceivedPoints, + err, + configmodels.MetricsDataType, + ) +} + +// ReceiverContext adds the keys used when recording observability metrics to +// the given context returning the newly created context. This context should +// be used in related calls to the obsreport functions so metrics are properly +// recorded. +func ReceiverContext( + ctx context.Context, + receiver string, + transport string, +) context.Context { + ctx, _ = tag.New(ctx, + tag.Upsert(tagKeyReceiver, receiver, tag.WithTTL(tag.TTLNoPropagation)), + tag.Upsert(tagKeyTransport, transport, tag.WithTTL(tag.TTLNoPropagation))) + + return ctx +} + +// traceReceiveOp creates the span used to trace the operation. Returning +// the updated context with the created span. +func traceReceiveOp( + receiverCtx context.Context, + receiverName string, + transport string, + operationSuffix string, + opt ...StartReceiveOption, +) context.Context { + var opts StartReceiveOptions + for _, o := range opt { + o(&opts) + } + + var ctx context.Context + var span *trace.Span + spanName := receiverPrefix + receiverName + operationSuffix + if !opts.LongLivedCtx { + ctx, span = trace.StartSpan(receiverCtx, spanName) + } else { + // Since the receiverCtx is long lived do not use it to start the span. + // This way this trace ends when the EndTraceDataReceiveOp is called. + // Here is safe to ignore the returned context since it is not used below. + _, span = trace.StartSpan(context.Background(), spanName) + + // If the long lived context has a parent span, then add it as a parent link. + setParentLink(receiverCtx, span) + + ctx = trace.NewContext(receiverCtx, span) + } + + if transport != "" { + span.AddAttributes(trace.StringAttribute(TransportKey, transport)) + } + return ctx +} + +// endReceiveOp records the observability signals at the end of an operation. +func endReceiveOp( + receiverCtx context.Context, + format string, + numReceivedItems int, + err error, + dataType configmodels.DataType, +) { + numAccepted := numReceivedItems + numRefused := 0 + if err != nil { + numAccepted = 0 + numRefused = numReceivedItems + } + + span := trace.FromContext(receiverCtx) + + if gLevel != configtelemetry.LevelNone { + var acceptedMeasure, refusedMeasure *stats.Int64Measure + switch dataType { + case configmodels.TracesDataType: + acceptedMeasure = mReceiverAcceptedSpans + refusedMeasure = mReceiverRefusedSpans + case configmodels.MetricsDataType: + acceptedMeasure = mReceiverAcceptedMetricPoints + refusedMeasure = mReceiverRefusedMetricPoints + case configmodels.LogsDataType: + acceptedMeasure = mReceiverAcceptedLogRecords + refusedMeasure = mReceiverRefusedLogRecords + } + + stats.Record( + receiverCtx, + acceptedMeasure.M(int64(numAccepted)), + refusedMeasure.M(int64(numRefused))) + } + + // end span according to errors + if span.IsRecordingEvents() { + var acceptedItemsKey, refusedItemsKey string + switch dataType { + case configmodels.TracesDataType: + acceptedItemsKey = AcceptedSpansKey + refusedItemsKey = RefusedSpansKey + case configmodels.MetricsDataType: + acceptedItemsKey = AcceptedMetricPointsKey + refusedItemsKey = RefusedMetricPointsKey + case configmodels.LogsDataType: + acceptedItemsKey = AcceptedLogRecordsKey + refusedItemsKey = RefusedLogRecordsKey + } + + span.AddAttributes( + trace.StringAttribute( + FormatKey, format), + trace.Int64Attribute( + acceptedItemsKey, int64(numAccepted)), + trace.Int64Attribute( + refusedItemsKey, int64(numRefused)), + ) + span.SetStatus(errToStatus(err)) + } + span.End() +} diff --git a/internal/otel_collector/obsreport/obsreport_scraper.go b/internal/otel_collector/obsreport/obsreport_scraper.go new file mode 100644 index 00000000000..ee02acb78a8 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_scraper.go @@ -0,0 +1,132 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +const ( + // ScraperKey used to identify scrapers in metrics and traces. + ScraperKey = "scraper" + + // ScrapedMetricPointsKey used to identify metric points scraped by the + // Collector. + ScrapedMetricPointsKey = "scraped_metric_points" + // ErroredMetricPointsKey used to identify metric points errored (i.e. + // unable to be scraped) by the Collector. + ErroredMetricPointsKey = "errored_metric_points" +) + +const ( + scraperPrefix = ScraperKey + nameSep + scraperMetricsOperationSuffix = nameSep + "MetricsScraped" +) + +var ( + tagKeyScraper, _ = tag.NewKey(ScraperKey) + + mScraperScrapedMetricPoints = stats.Int64( + scraperPrefix+ScrapedMetricPointsKey, + "Number of metric points successfully scraped.", + stats.UnitDimensionless) + mScraperErroredMetricPoints = stats.Int64( + scraperPrefix+ErroredMetricPointsKey, + "Number of metric points that were unable to be scraped.", + stats.UnitDimensionless) +) + +// ScraperContext adds the keys used when recording observability metrics to +// the given context returning the newly created context. This context should +// be used in related calls to the obsreport functions so metrics are properly +// recorded. +func ScraperContext( + ctx context.Context, + receiver string, + scraper string, +) context.Context { + ctx, _ = tag.New(ctx, tag.Upsert(tagKeyReceiver, receiver, tag.WithTTL(tag.TTLNoPropagation))) + if scraper != "" { + ctx, _ = tag.New(ctx, tag.Upsert(tagKeyScraper, scraper, tag.WithTTL(tag.TTLNoPropagation))) + } + + return ctx +} + +// StartMetricsScrapeOp is called when a scrape operation is started. The +// returned context should be used in other calls to the obsreport functions +// dealing with the same scrape operation. +func StartMetricsScrapeOp( + scraperCtx context.Context, + receiver string, + scraper string, +) context.Context { + scraperName := receiver + if scraper != "" { + scraperName += "/" + scraper + } + + spanName := scraperPrefix + scraperName + scraperMetricsOperationSuffix + ctx, _ := trace.StartSpan(scraperCtx, spanName) + return ctx +} + +// EndMetricsScrapeOp completes the scrape operation that was started with +// StartMetricsScrapeOp. +func EndMetricsScrapeOp( + scraperCtx context.Context, + numScrapedMetrics int, + err error, +) { + numErroredMetrics := 0 + if err != nil { + if partialErr, isPartial := err.(consumererror.PartialScrapeError); isPartial { + numErroredMetrics = partialErr.Failed + } else { + numErroredMetrics = numScrapedMetrics + numScrapedMetrics = 0 + } + } + + span := trace.FromContext(scraperCtx) + + if gLevel != configtelemetry.LevelNone { + stats.Record( + scraperCtx, + mScraperScrapedMetricPoints.M(int64(numScrapedMetrics)), + mScraperErroredMetricPoints.M(int64(numErroredMetrics))) + } + + // end span according to errors + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute(FormatKey, string(configmodels.MetricsDataType)), + trace.Int64Attribute(ScrapedMetricPointsKey, int64(numScrapedMetrics)), + trace.Int64Attribute(ErroredMetricPointsKey, int64(numErroredMetrics)), + ) + + span.SetStatus(errToStatus(err)) + } + + span.End() +} diff --git a/internal/otel_collector/obsreport/obsreport_test.go b/internal/otel_collector/obsreport/obsreport_test.go new file mode 100644 index 00000000000..59733c72fc5 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_test.go @@ -0,0 +1,667 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// obsreport_test instead of just obsreport to avoid dependency cycle between +// obsreport_test and obsreporttest +package obsreport_test + +import ( + "context" + "errors" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/obsreport/obsreporttest" +) + +const ( + exporter = "fakeExporter" + processor = "fakeProcessor" + receiver = "fakeReicever" + scraper = "fakeScraper" + transport = "fakeTransport" + format = "fakeFormat" +) + +var ( + errFake = errors.New("errFake") + partialErrFake = consumererror.NewPartialScrapeError(errFake, 1) +) + +type receiveTestParams struct { + transport string + err error +} + +func TestConfigure(t *testing.T) { + tests := []struct { + name string + level configtelemetry.Level + wantViews []*view.View + }{ + { + name: "none", + level: configtelemetry.LevelNone, + }, + { + name: "basic", + level: configtelemetry.LevelBasic, + wantViews: obsreport.AllViews(), + }, + { + name: "normal", + level: configtelemetry.LevelNormal, + wantViews: obsreport.AllViews(), + }, + { + name: "detailed", + level: configtelemetry.LevelDetailed, + wantViews: obsreport.AllViews(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotViews := obsreport.Configure(tt.level) + assert.Equal(t, tt.wantViews, gotViews) + }) + } +} + +func TestReceiveTraceDataOp(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + parentCtx, parentSpan := trace.StartSpan(context.Background(), + t.Name(), trace.WithSampler(trace.AlwaysSample())) + defer parentSpan.End() + + receiverCtx := obsreport.ReceiverContext(parentCtx, receiver, transport) + params := []receiveTestParams{ + {transport, errFake}, + {"", nil}, + } + rcvdSpans := []int{13, 42} + for i, param := range params { + ctx := obsreport.StartTraceDataReceiveOp(receiverCtx, receiver, param.transport) + assert.NotNil(t, ctx) + + obsreport.EndTraceDataReceiveOp( + ctx, + format, + rcvdSpans[i], + param.err) + } + + spans := ss.PullAllSpans() + require.Equal(t, len(params), len(spans)) + + var acceptedSpans, refusedSpans int + for i, span := range spans { + assert.Equal(t, "receiver/"+receiver+"/TraceDataReceived", span.Name) + switch params[i].err { + case nil: + acceptedSpans += rcvdSpans[i] + assert.Equal(t, int64(rcvdSpans[i]), span.Attributes[obsreport.AcceptedSpansKey]) + assert.Equal(t, int64(0), span.Attributes[obsreport.RefusedSpansKey]) + assert.Equal(t, trace.Status{Code: trace.StatusCodeOK}, span.Status) + case errFake: + refusedSpans += rcvdSpans[i] + assert.Equal(t, int64(0), span.Attributes[obsreport.AcceptedSpansKey]) + assert.Equal(t, int64(rcvdSpans[i]), span.Attributes[obsreport.RefusedSpansKey]) + assert.Equal(t, params[i].err.Error(), span.Status.Message) + default: + t.Fatalf("unexpected param: %v", params[i]) + } + switch params[i].transport { + case "": + assert.NotContains(t, span.Attributes, obsreport.TransportKey) + default: + assert.Equal(t, params[i].transport, span.Attributes[obsreport.TransportKey]) + } + } + obsreporttest.CheckReceiverTracesViews(t, receiver, transport, int64(acceptedSpans), int64(refusedSpans)) +} + +func TestReceiveLogsOp(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + parentCtx, parentSpan := trace.StartSpan(context.Background(), + t.Name(), trace.WithSampler(trace.AlwaysSample())) + defer parentSpan.End() + + receiverCtx := obsreport.ReceiverContext(parentCtx, receiver, transport) + params := []receiveTestParams{ + {transport, errFake}, + {"", nil}, + } + rcvdLogRecords := []int{13, 42} + for i, param := range params { + ctx := obsreport.StartLogsReceiveOp(receiverCtx, receiver, param.transport) + assert.NotNil(t, ctx) + + obsreport.EndLogsReceiveOp( + ctx, + format, + rcvdLogRecords[i], + param.err) + } + + spans := ss.PullAllSpans() + require.Equal(t, len(params), len(spans)) + + var acceptedLogRecords, refusedLogRecords int + for i, span := range spans { + assert.Equal(t, "receiver/"+receiver+"/LogsReceived", span.Name) + switch params[i].err { + case nil: + acceptedLogRecords += rcvdLogRecords[i] + assert.Equal(t, int64(rcvdLogRecords[i]), span.Attributes[obsreport.AcceptedLogRecordsKey]) + assert.Equal(t, int64(0), span.Attributes[obsreport.RefusedLogRecordsKey]) + assert.Equal(t, trace.Status{Code: trace.StatusCodeOK}, span.Status) + case errFake: + refusedLogRecords += rcvdLogRecords[i] + assert.Equal(t, int64(0), span.Attributes[obsreport.AcceptedLogRecordsKey]) + assert.Equal(t, int64(rcvdLogRecords[i]), span.Attributes[obsreport.RefusedLogRecordsKey]) + assert.Equal(t, params[i].err.Error(), span.Status.Message) + default: + t.Fatalf("unexpected param: %v", params[i]) + } + switch params[i].transport { + case "": + assert.NotContains(t, span.Attributes, obsreport.TransportKey) + default: + assert.Equal(t, params[i].transport, span.Attributes[obsreport.TransportKey]) + } + } + obsreporttest.CheckReceiverLogsViews(t, receiver, transport, int64(acceptedLogRecords), int64(refusedLogRecords)) +} + +func TestReceiveMetricsOp(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + parentCtx, parentSpan := trace.StartSpan(context.Background(), + t.Name(), trace.WithSampler(trace.AlwaysSample())) + defer parentSpan.End() + + receiverCtx := obsreport.ReceiverContext(parentCtx, receiver, transport) + params := []receiveTestParams{ + {transport, errFake}, + {"", nil}, + } + rcvdMetricPts := []int{23, 29} + for i, param := range params { + ctx := obsreport.StartMetricsReceiveOp(receiverCtx, receiver, param.transport) + assert.NotNil(t, ctx) + + obsreport.EndMetricsReceiveOp( + ctx, + format, + rcvdMetricPts[i], + param.err) + } + + spans := ss.PullAllSpans() + require.Equal(t, len(params), len(spans)) + + var acceptedMetricPoints, refusedMetricPoints int + for i, span := range spans { + assert.Equal(t, "receiver/"+receiver+"/MetricsReceived", span.Name) + switch params[i].err { + case nil: + acceptedMetricPoints += rcvdMetricPts[i] + assert.Equal(t, int64(rcvdMetricPts[i]), span.Attributes[obsreport.AcceptedMetricPointsKey]) + assert.Equal(t, int64(0), span.Attributes[obsreport.RefusedMetricPointsKey]) + assert.Equal(t, trace.Status{Code: trace.StatusCodeOK}, span.Status) + case errFake: + refusedMetricPoints += rcvdMetricPts[i] + assert.Equal(t, int64(0), span.Attributes[obsreport.AcceptedMetricPointsKey]) + assert.Equal(t, int64(rcvdMetricPts[i]), span.Attributes[obsreport.RefusedMetricPointsKey]) + assert.Equal(t, params[i].err.Error(), span.Status.Message) + default: + t.Fatalf("unexpected param: %v", params[i]) + } + switch params[i].transport { + case "": + assert.NotContains(t, span.Attributes, obsreport.TransportKey) + default: + assert.Equal(t, params[i].transport, span.Attributes[obsreport.TransportKey]) + } + } + + obsreporttest.CheckReceiverMetricsViews(t, receiver, transport, int64(acceptedMetricPoints), int64(refusedMetricPoints)) +} + +func TestScrapeMetricsDataOp(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + parentCtx, parentSpan := trace.StartSpan(context.Background(), + t.Name(), trace.WithSampler(trace.AlwaysSample())) + defer parentSpan.End() + + receiverCtx := obsreport.ScraperContext(parentCtx, receiver, scraper) + errParams := []error{partialErrFake, errFake, nil} + scrapedMetricPts := []int{23, 29, 15} + for i, err := range errParams { + ctx := obsreport.StartMetricsScrapeOp(receiverCtx, receiver, scraper) + assert.NotNil(t, ctx) + + obsreport.EndMetricsScrapeOp( + ctx, + scrapedMetricPts[i], + err) + } + + spans := ss.PullAllSpans() + require.Equal(t, len(errParams), len(spans)) + + var scrapedMetricPoints, erroredMetricPoints int + for i, span := range spans { + assert.Equal(t, "scraper/"+receiver+"/"+scraper+"/MetricsScraped", span.Name) + switch errParams[i] { + case nil: + scrapedMetricPoints += scrapedMetricPts[i] + assert.Equal(t, int64(scrapedMetricPts[i]), span.Attributes[obsreport.ScrapedMetricPointsKey]) + assert.Equal(t, int64(0), span.Attributes[obsreport.ErroredMetricPointsKey]) + assert.Equal(t, trace.Status{Code: trace.StatusCodeOK}, span.Status) + case errFake: + erroredMetricPoints += scrapedMetricPts[i] + assert.Equal(t, int64(0), span.Attributes[obsreport.ScrapedMetricPointsKey]) + assert.Equal(t, int64(scrapedMetricPts[i]), span.Attributes[obsreport.ErroredMetricPointsKey]) + assert.Equal(t, errParams[i].Error(), span.Status.Message) + case partialErrFake: + scrapedMetricPoints += scrapedMetricPts[i] + erroredMetricPoints++ + assert.Equal(t, int64(scrapedMetricPts[i]), span.Attributes[obsreport.ScrapedMetricPointsKey]) + assert.Equal(t, int64(1), span.Attributes[obsreport.ErroredMetricPointsKey]) + assert.Equal(t, errParams[i].Error(), span.Status.Message) + default: + t.Fatalf("unexpected err param: %v", errParams[i]) + } + } + + obsreporttest.CheckScraperMetricsViews(t, receiver, scraper, int64(scrapedMetricPoints), int64(erroredMetricPoints)) +} + +func TestExportTraceDataOp(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + parentCtx, parentSpan := trace.StartSpan(context.Background(), + t.Name(), trace.WithSampler(trace.AlwaysSample())) + defer parentSpan.End() + + exporterCtx := obsreport.ExporterContext(parentCtx, exporter) + obsrep := obsreport.NewExporterObsReport(configtelemetry.LevelNormal, exporter) + errs := []error{nil, errFake} + numExportedSpans := []int{22, 14} + for i, err := range errs { + ctx := obsrep.StartTracesExportOp(exporterCtx) + assert.NotNil(t, ctx) + obsrep.EndTracesExportOp(ctx, numExportedSpans[i], err) + } + + spans := ss.PullAllSpans() + require.Equal(t, len(errs), len(spans)) + + var sentSpans, failedToSendSpans int + for i, span := range spans { + assert.Equal(t, "exporter/"+exporter+"/traces", span.Name) + switch errs[i] { + case nil: + sentSpans += numExportedSpans[i] + assert.Equal(t, int64(numExportedSpans[i]), span.Attributes[obsreport.SentSpansKey]) + assert.Equal(t, int64(0), span.Attributes[obsreport.FailedToSendSpansKey]) + assert.Equal(t, trace.Status{Code: trace.StatusCodeOK}, span.Status) + case errFake: + failedToSendSpans += numExportedSpans[i] + assert.Equal(t, int64(0), span.Attributes[obsreport.SentSpansKey]) + assert.Equal(t, int64(numExportedSpans[i]), span.Attributes[obsreport.FailedToSendSpansKey]) + assert.Equal(t, errs[i].Error(), span.Status.Message) + default: + t.Fatalf("unexpected error: %v", errs[i]) + } + } + + obsreporttest.CheckExporterTracesViews(t, exporter, int64(sentSpans), int64(failedToSendSpans)) +} + +func TestExportMetricsOp(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + parentCtx, parentSpan := trace.StartSpan(context.Background(), + t.Name(), trace.WithSampler(trace.AlwaysSample())) + defer parentSpan.End() + + exporterCtx := obsreport.ExporterContext(parentCtx, exporter) + obsrep := obsreport.NewExporterObsReport(configtelemetry.LevelNormal, exporter) + + errs := []error{nil, errFake} + toSendMetricPoints := []int{17, 23} + for i, err := range errs { + ctx := obsrep.StartMetricsExportOp(exporterCtx) + assert.NotNil(t, ctx) + + obsrep.EndMetricsExportOp(ctx, toSendMetricPoints[i], err) + } + + spans := ss.PullAllSpans() + require.Equal(t, len(errs), len(spans)) + + var sentMetricPoints, failedToSendMetricPoints int + for i, span := range spans { + assert.Equal(t, "exporter/"+exporter+"/metrics", span.Name) + switch errs[i] { + case nil: + sentMetricPoints += toSendMetricPoints[i] + assert.Equal(t, int64(toSendMetricPoints[i]), span.Attributes[obsreport.SentMetricPointsKey]) + assert.Equal(t, int64(0), span.Attributes[obsreport.FailedToSendMetricPointsKey]) + assert.Equal(t, trace.Status{Code: trace.StatusCodeOK}, span.Status) + case errFake: + failedToSendMetricPoints += toSendMetricPoints[i] + assert.Equal(t, int64(0), span.Attributes[obsreport.SentMetricPointsKey]) + assert.Equal(t, int64(toSendMetricPoints[i]), span.Attributes[obsreport.FailedToSendMetricPointsKey]) + assert.Equal(t, errs[i].Error(), span.Status.Message) + default: + t.Fatalf("unexpected error: %v", errs[i]) + } + } + + obsreporttest.CheckExporterMetricsViews(t, exporter, int64(sentMetricPoints), int64(failedToSendMetricPoints)) +} + +func TestExportLogsOp(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + parentCtx, parentSpan := trace.StartSpan(context.Background(), + t.Name(), trace.WithSampler(trace.AlwaysSample())) + defer parentSpan.End() + + exporterCtx := obsreport.ExporterContext(parentCtx, exporter) + obsrep := obsreport.NewExporterObsReport(configtelemetry.LevelNormal, exporter) + + errs := []error{nil, errFake} + toSendLogRecords := []int{17, 23} + for i, err := range errs { + ctx := obsrep.StartLogsExportOp(exporterCtx) + assert.NotNil(t, ctx) + + obsrep.EndLogsExportOp(ctx, toSendLogRecords[i], err) + } + + spans := ss.PullAllSpans() + require.Equal(t, len(errs), len(spans)) + + var sentLogRecords, failedToSendLogRecords int + for i, span := range spans { + assert.Equal(t, "exporter/"+exporter+"/logs", span.Name) + switch errs[i] { + case nil: + sentLogRecords += toSendLogRecords[i] + assert.Equal(t, int64(toSendLogRecords[i]), span.Attributes[obsreport.SentLogRecordsKey]) + assert.Equal(t, int64(0), span.Attributes[obsreport.FailedToSendLogRecordsKey]) + assert.Equal(t, trace.Status{Code: trace.StatusCodeOK}, span.Status) + case errFake: + failedToSendLogRecords += toSendLogRecords[i] + assert.Equal(t, int64(0), span.Attributes[obsreport.SentLogRecordsKey]) + assert.Equal(t, int64(toSendLogRecords[i]), span.Attributes[obsreport.FailedToSendLogRecordsKey]) + assert.Equal(t, errs[i].Error(), span.Status.Message) + default: + t.Fatalf("unexpected error: %v", errs[i]) + } + } + + obsreporttest.CheckExporterLogsViews(t, exporter, int64(sentLogRecords), int64(failedToSendLogRecords)) +} + +func TestReceiveWithLongLivedCtx(t *testing.T) { + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + trace.ApplyConfig(trace.Config{ + DefaultSampler: trace.AlwaysSample(), + }) + defer func() { + trace.ApplyConfig(trace.Config{ + DefaultSampler: trace.ProbabilitySampler(1e-4), + }) + }() + + parentCtx, parentSpan := trace.StartSpan(context.Background(), t.Name()) + defer parentSpan.End() + + longLivedCtx := obsreport.ReceiverContext(parentCtx, receiver, transport) + ops := []struct { + numSpans int + err error + }{ + {numSpans: 13}, + {numSpans: 42, err: errFake}, + } + for _, op := range ops { + // Use a new context on each operation to simulate distinct operations + // under the same long lived context. + ctx := obsreport.StartTraceDataReceiveOp( + longLivedCtx, + receiver, + transport, + obsreport.WithLongLivedCtx()) + assert.NotNil(t, ctx) + + obsreport.EndTraceDataReceiveOp( + ctx, + format, + op.numSpans, + op.err) + } + + spans := ss.PullAllSpans() + require.Equal(t, len(ops), len(spans)) + + for i, span := range spans { + assert.Equal(t, trace.SpanID{}, span.ParentSpanID) + require.Equal(t, 1, len(span.Links)) + link := span.Links[0] + assert.Equal(t, trace.LinkTypeParent, link.Type) + assert.Equal(t, parentSpan.SpanContext().TraceID, link.TraceID) + assert.Equal(t, parentSpan.SpanContext().SpanID, link.SpanID) + assert.Equal(t, "receiver/"+receiver+"/TraceDataReceived", span.Name) + assert.Equal(t, transport, span.Attributes[obsreport.TransportKey]) + switch ops[i].err { + case nil: + assert.Equal(t, int64(ops[i].numSpans), span.Attributes[obsreport.AcceptedSpansKey]) + assert.Equal(t, int64(0), span.Attributes[obsreport.RefusedSpansKey]) + assert.Equal(t, trace.Status{Code: trace.StatusCodeOK}, span.Status) + case errFake: + assert.Equal(t, int64(0), span.Attributes[obsreport.AcceptedSpansKey]) + assert.Equal(t, int64(ops[i].numSpans), span.Attributes[obsreport.RefusedSpansKey]) + assert.Equal(t, ops[i].err.Error(), span.Status.Message) + default: + t.Fatalf("unexpected error: %v", ops[i].err) + } + } +} + +func TestProcessorTraceData(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + const acceptedSpans = 27 + const refusedSpans = 19 + const droppedSpans = 13 + + obsrep := obsreport.NewProcessorObsReport(configtelemetry.LevelNormal, processor) + obsrep.TracesAccepted(context.Background(), acceptedSpans) + obsrep.TracesRefused(context.Background(), refusedSpans) + obsrep.TracesDropped(context.Background(), droppedSpans) + + obsreporttest.CheckProcessorTracesViews(t, processor, acceptedSpans, refusedSpans, droppedSpans) +} + +func TestProcessorMetricsData(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + const acceptedPoints = 29 + const refusedPoints = 11 + const droppedPoints = 17 + + obsrep := obsreport.NewProcessorObsReport(configtelemetry.LevelNormal, processor) + obsrep.MetricsAccepted(context.Background(), acceptedPoints) + obsrep.MetricsRefused(context.Background(), refusedPoints) + obsrep.MetricsDropped(context.Background(), droppedPoints) + + obsreporttest.CheckProcessorMetricsViews(t, processor, acceptedPoints, refusedPoints, droppedPoints) +} + +func TestProcessorMetricViews(t *testing.T) { + measures := []stats.Measure{ + stats.Int64("firstMeasure", "test firstMeasure", stats.UnitDimensionless), + stats.Int64("secondMeasure", "test secondMeasure", stats.UnitBytes), + } + legacyViews := []*view.View{ + { + Name: measures[0].Name(), + Description: measures[0].Description(), + Measure: measures[0], + Aggregation: view.Sum(), + }, + { + Measure: measures[1], + Aggregation: view.Count(), + }, + } + + tests := []struct { + name string + level configtelemetry.Level + want []*view.View + }{ + { + name: "none", + level: configtelemetry.LevelNone, + }, + { + name: "basic", + level: configtelemetry.LevelBasic, + want: []*view.View{ + { + Name: "processor/test_type/" + measures[0].Name(), + Description: measures[0].Description(), + Measure: measures[0], + Aggregation: view.Sum(), + }, + { + Name: "processor/test_type/" + measures[1].Name(), + Measure: measures[1], + Aggregation: view.Count(), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obsreport.Configure(tt.level) + got := obsreport.ProcessorMetricViews("test_type", legacyViews) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestProcessorLogRecords(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + const acceptedRecords = 29 + const refusedRecords = 11 + const droppedRecords = 17 + + obsrep := obsreport.NewProcessorObsReport(configtelemetry.LevelNormal, processor) + obsrep.LogsAccepted(context.Background(), acceptedRecords) + obsrep.LogsRefused(context.Background(), refusedRecords) + obsrep.LogsDropped(context.Background(), droppedRecords) + + obsreporttest.CheckProcessorLogsViews(t, processor, acceptedRecords, refusedRecords, droppedRecords) +} + +type spanStore struct { + sync.Mutex + spans []*trace.SpanData +} + +func (ss *spanStore) ExportSpan(sd *trace.SpanData) { + ss.Lock() + ss.spans = append(ss.spans, sd) + ss.Unlock() +} + +func (ss *spanStore) PullAllSpans() []*trace.SpanData { + ss.Lock() + capturedSpans := ss.spans + ss.spans = nil + ss.Unlock() + return capturedSpans +} diff --git a/internal/otel_collector/obsreport/obsreporttest/obsreporttest.go b/internal/otel_collector/obsreport/obsreporttest/obsreporttest.go new file mode 100644 index 00000000000..b1e1a413ff6 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreporttest/obsreporttest.go @@ -0,0 +1,205 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreporttest + +import ( + "reflect" + "sort" + "testing" + + "github.com/stretchr/testify/require" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/obsreport" +) + +var ( + // Names used by the metrics and labels are hard coded here in order to avoid + // inadvertent changes: at this point changing metric names and labels should + // be treated as a breaking changing and requires a good justification. + // Changes to metric names or labels can break alerting, dashboards, etc + // that are used to monitor the Collector in production deployments. + // DO NOT SWITCH THE VARIABLES BELOW TO SIMILAR ONES DEFINED ON THE PACKAGE. + receiverTag, _ = tag.NewKey("receiver") + scraperTag, _ = tag.NewKey("scraper") + transportTag, _ = tag.NewKey("transport") + exporterTag, _ = tag.NewKey("exporter") + processorTag, _ = tag.NewKey("processor") +) + +// SetupRecordedMetricsTest does setup the testing environment to check the metrics recorded by receivers, producers or exporters. +// The returned function should be deferred. +func SetupRecordedMetricsTest() (func(), error) { + views := obsreport.Configure(configtelemetry.LevelNormal) + err := view.Register(views...) + if err != nil { + return nil, err + } + + return func() { + view.Unregister(views...) + }, err +} + +// CheckExporterTracesViews checks that for the current exported values for trace exporter views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckExporterTracesViews(t *testing.T, exporter string, acceptedSpans, droppedSpans int64) { + exporterTags := tagsForExporterView(exporter) + CheckValueForView(t, exporterTags, acceptedSpans, "exporter/sent_spans") + CheckValueForView(t, exporterTags, droppedSpans, "exporter/send_failed_spans") +} + +// CheckExporterMetricsViews checks that for the current exported values for metrics exporter views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckExporterMetricsViews(t *testing.T, exporter string, acceptedMetricsPoints, droppedMetricsPoints int64) { + exporterTags := tagsForExporterView(exporter) + CheckValueForView(t, exporterTags, acceptedMetricsPoints, "exporter/sent_metric_points") + CheckValueForView(t, exporterTags, droppedMetricsPoints, "exporter/send_failed_metric_points") +} + +// CheckExporterLogsViews checks that for the current exported values for logs exporter views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckExporterLogsViews(t *testing.T, exporter string, acceptedLogRecords, droppedLogRecords int64) { + exporterTags := tagsForExporterView(exporter) + CheckValueForView(t, exporterTags, acceptedLogRecords, "exporter/sent_log_records") + CheckValueForView(t, exporterTags, droppedLogRecords, "exporter/send_failed_log_records") +} + +// CheckProcessorTracesViews checks that for the current exported values for trace exporter views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckProcessorTracesViews(t *testing.T, processor string, acceptedSpans, refusedSpans, droppedSpans int64) { + processorTags := tagsForProcessorView(processor) + CheckValueForView(t, processorTags, acceptedSpans, "processor/accepted_spans") + CheckValueForView(t, processorTags, refusedSpans, "processor/refused_spans") + CheckValueForView(t, processorTags, droppedSpans, "processor/dropped_spans") +} + +// CheckProcessorMetricsViews checks that for the current exported values for metrics exporter views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckProcessorMetricsViews(t *testing.T, processor string, acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints int64) { + processorTags := tagsForProcessorView(processor) + CheckValueForView(t, processorTags, acceptedMetricPoints, "processor/accepted_metric_points") + CheckValueForView(t, processorTags, refusedMetricPoints, "processor/refused_metric_points") + CheckValueForView(t, processorTags, droppedMetricPoints, "processor/dropped_metric_points") +} + +// CheckProcessorLogsViews checks that for the current exported values for logs exporter views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckProcessorLogsViews(t *testing.T, processor string, acceptedLogRecords, refusedLogRecords, droppedLogRecords int64) { + processorTags := tagsForProcessorView(processor) + CheckValueForView(t, processorTags, acceptedLogRecords, "processor/accepted_log_records") + CheckValueForView(t, processorTags, refusedLogRecords, "processor/refused_log_records") + CheckValueForView(t, processorTags, droppedLogRecords, "processor/dropped_log_records") +} + +// CheckReceiverTracesViews checks that for the current exported values for trace receiver views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckReceiverTracesViews(t *testing.T, receiver, protocol string, acceptedSpans, droppedSpans int64) { + receiverTags := tagsForReceiverView(receiver, protocol) + CheckValueForView(t, receiverTags, acceptedSpans, "receiver/accepted_spans") + CheckValueForView(t, receiverTags, droppedSpans, "receiver/refused_spans") +} + +// CheckReceiverLogsViews checks that for the current exported values for logs receiver views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckReceiverLogsViews(t *testing.T, receiver, protocol string, acceptedLogRecords, droppedLogRecords int64) { + receiverTags := tagsForReceiverView(receiver, protocol) + CheckValueForView(t, receiverTags, acceptedLogRecords, "receiver/accepted_log_records") + CheckValueForView(t, receiverTags, droppedLogRecords, "receiver/refused_log_records") +} + +// CheckReceiverMetricsViews checks that for the current exported values for metrics receiver views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckReceiverMetricsViews(t *testing.T, receiver, protocol string, acceptedMetricPoints, droppedMetricPoints int64) { + receiverTags := tagsForReceiverView(receiver, protocol) + CheckValueForView(t, receiverTags, acceptedMetricPoints, "receiver/accepted_metric_points") + CheckValueForView(t, receiverTags, droppedMetricPoints, "receiver/refused_metric_points") +} + +// CheckScraperMetricsViews checks that for the current exported values for metrics scraper views match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckScraperMetricsViews(t *testing.T, receiver, scraper string, scrapedMetricPoints, erroredMetricPoints int64) { + scraperTags := tagsForScraperView(receiver, scraper) + CheckValueForView(t, scraperTags, scrapedMetricPoints, "scraper/scraped_metric_points") + CheckValueForView(t, scraperTags, erroredMetricPoints, "scraper/errored_metric_points") +} + +// CheckValueForView checks that for the current exported value in the view with the given name +// for {LegacyTagKeyReceiver: receiverName} is equal to "value". +func CheckValueForView(t *testing.T, wantTags []tag.Tag, value int64, vName string) { + // Make sure the tags slice is sorted by tag keys. + sortTags(wantTags) + + rows, err := view.RetrieveData(vName) + require.NoError(t, err) + + for _, row := range rows { + // Make sure the tags slice is sorted by tag keys. + sortTags(row.Tags) + if reflect.DeepEqual(wantTags, row.Tags) { + sum := row.Data.(*view.SumData) + require.Equal(t, float64(value), sum.Value) + return + } + } + + require.Failf(t, "could not find tags", "wantTags: %s in rows %v", wantTags, rows) +} + +// tagsForReceiverView returns the tags that are needed for the receiver views. +func tagsForReceiverView(receiver, transport string) []tag.Tag { + tags := make([]tag.Tag, 0, 2) + + tags = append(tags, tag.Tag{Key: receiverTag, Value: receiver}) + if transport != "" { + tags = append(tags, tag.Tag{Key: transportTag, Value: transport}) + } + + return tags +} + +// tagsForScraperView returns the tags that are needed for the scraper views. +func tagsForScraperView(receiver, scraper string) []tag.Tag { + tags := make([]tag.Tag, 0, 2) + + tags = append(tags, tag.Tag{Key: receiverTag, Value: receiver}) + if scraper != "" { + tags = append(tags, tag.Tag{Key: scraperTag, Value: scraper}) + } + + return tags +} + +// tagsForProcessorView returns the tags that are needed for the processor views. +func tagsForProcessorView(processor string) []tag.Tag { + return []tag.Tag{ + {Key: processorTag, Value: processor}, + } +} + +// tagsForExporterView returns the tags that are needed for the exporter views. +func tagsForExporterView(exporter string) []tag.Tag { + return []tag.Tag{ + {Key: exporterTag, Value: exporter}, + } +} + +func sortTags(tags []tag.Tag) { + sort.SliceStable(tags, func(i, j int) bool { + return tags[i].Key.Name() < tags[j].Key.Name() + }) +} diff --git a/internal/otel_collector/obsreport/obsreporttest/obsreporttest_test.go b/internal/otel_collector/obsreport/obsreporttest/obsreporttest_test.go new file mode 100644 index 00000000000..cb8262e7b31 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreporttest/obsreporttest_test.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreporttest_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/obsreport/obsreporttest" +) + +const ( + exporter = "fakeExporter" + receiver = "fakeReicever" + transport = "fakeTransport" + format = "fakeFormat" +) + +func TestCheckReceiverTracesViews(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + receiverCtx := obsreport.ReceiverContext(context.Background(), receiver, transport) + ctx := obsreport.StartTraceDataReceiveOp(receiverCtx, receiver, transport) + assert.NotNil(t, ctx) + obsreport.EndTraceDataReceiveOp( + ctx, + format, + 7, + nil) + + obsreporttest.CheckReceiverTracesViews(t, receiver, transport, 7, 0) +} + +func TestCheckReceiverMetricsViews(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + receiverCtx := obsreport.ReceiverContext(context.Background(), receiver, transport) + ctx := obsreport.StartMetricsReceiveOp(receiverCtx, receiver, transport) + assert.NotNil(t, ctx) + obsreport.EndMetricsReceiveOp(ctx, format, 7, nil) + + obsreporttest.CheckReceiverMetricsViews(t, receiver, transport, 7, 0) +} + +func TestCheckReceiverLogsViews(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + receiverCtx := obsreport.ReceiverContext(context.Background(), receiver, transport) + ctx := obsreport.StartLogsReceiveOp(receiverCtx, receiver, transport) + assert.NotNil(t, ctx) + obsreport.EndLogsReceiveOp(ctx, format, 7, nil) + + obsreporttest.CheckReceiverLogsViews(t, receiver, transport, 7, 0) +} + +func TestCheckExporterTracesViews(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + obsrep := obsreport.NewExporterObsReport(configtelemetry.LevelNormal, exporter) + exporterCtx := obsreport.ExporterContext(context.Background(), exporter) + ctx := obsrep.StartTracesExportOp(exporterCtx) + assert.NotNil(t, ctx) + + obsrep.EndTracesExportOp(ctx, 7, nil) + + obsreporttest.CheckExporterTracesViews(t, exporter, 7, 0) +} + +func TestCheckExporterMetricsViews(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + obsrep := obsreport.NewExporterObsReport(configtelemetry.LevelNormal, exporter) + exporterCtx := obsreport.ExporterContext(context.Background(), exporter) + ctx := obsrep.StartMetricsExportOp(exporterCtx) + assert.NotNil(t, ctx) + + obsrep.EndMetricsExportOp(ctx, 7, nil) + + obsreporttest.CheckExporterMetricsViews(t, exporter, 7, 0) +} + +func TestCheckExporterLogsViews(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + obsrep := obsreport.NewExporterObsReport(configtelemetry.LevelNormal, exporter) + exporterCtx := obsreport.ExporterContext(context.Background(), exporter) + ctx := obsrep.StartLogsExportOp(exporterCtx) + assert.NotNil(t, ctx) + obsrep.EndLogsExportOp(ctx, 7, nil) + + obsreporttest.CheckExporterLogsViews(t, exporter, 7, 0) +} diff --git a/internal/otel_collector/processor/README.md b/internal/otel_collector/processor/README.md new file mode 100644 index 00000000000..081f35a98e0 --- /dev/null +++ b/internal/otel_collector/processor/README.md @@ -0,0 +1,234 @@ +# General Information + +Processors are used at various stages of a pipeline. Generally, a processor +pre-processes data before it is exported (e.g. modify attributes or sample) or +helps ensure that data makes it through a pipeline successfully (e.g. +batch/retry). + +Some important aspects of pipelines and processors to be aware of: +- [Recommended Processors](#recommended-processors) +- [Data Ownership](#data-ownership) +- [Exclusive Ownership](#exclusive-ownership) +- [Shared Ownership](#shared-ownership) +- [Ordering Processors](#ordering-processors) + +Supported processors (sorted alphabetically): +- [Attributes Processor](attributesprocessor/README.md) +- [Batch Processor](batchprocessor/README.md) +- [Filter Processor](filterprocessor/README.md) +- [Memory Limiter Processor](memorylimiter/README.md) +- [Queued Retry Processor](queuedprocessor/README.md) +- [Resource Processor](resourceprocessor/README.md) +- [Probabilistic Sampling Processor](samplingprocessor/probabilisticsamplerprocessor/README.md) +- [Span Processor](spanprocessor/README.md) + +The [contributors repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) + has more processors that can be added to custom builds of the Collector. + +## Recommended Processors + +No processors are enabled by default, however multiple processors are +recommended to be enabled depending on the data source. Processors must be +enabled for every data source and not all processors support all data sources. +In addition, it is important to note that the order of processors matters. The +order in each section below is the best practice. Refer to the individual +processor documentation for more information. + +### Traces + +1. [memory_limiter](memorylimiter/README.md) +2. *any sampling processors* +3. [batch](batchprocessor/README.md) +4. *any other processors* + +### Metrics + +1. [memory_limiter](memorylimiter/README.md) +2. [batch](batchprocessor/README.md) +3. *any other processors* + +## Data Ownership + +The ownership of the `TraceData` and `MetricsData` in a pipeline is passed as the data travels +through the pipeline. The data is created by the receiver and then the ownership is passed +to the first processor when `ConsumeTraceData`/`ConsumeMetricsData` function is called. + +Note: the receiver may be attached to multiple pipelines, in which case the same data +will be passed to all attached pipelines via a data fan-out connector. + +From data ownership perspective pipelines can work in 2 modes: +* Exclusive data ownership +* Shared data ownership + +The mode is defined during startup based on data modification intent reported by the +processors. The intent is reported by each processor via `MutatesConsumedData` field of +the struct returned by `GetCapabilities` function. If any processor in the pipeline +declares an intent to modify the data then that pipeline will work in exclusive ownership +mode. In addition, any other pipeline that receives data from a receiver that is attached +to a pipeline with exclusive ownership mode will be also operating in exclusive ownership +mode. + +### Exclusive Ownership + +In exclusive ownership mode the data is owned exclusively by a particular processor at a +given moment of time and the processor is free to modify the data it owns. + +Exclusive ownership mode is only applicable for pipelines that receive data from the +same receiver. If a pipeline is marked to be in exclusive ownership mode then any data +received from a shared receiver will be cloned at the fan-out connector before passing +further to each pipeline. This ensures that each pipeline has its own exclusive copy of +data and the data can be safely modified in the pipeline. + +The exclusive ownership of data allows processors to freely modify the data while +they own it (e.g. see `attributesprocessor`). The duration of ownership of the data +by processor is from the beginning of `ConsumeTraceData`/`ConsumeMetricsData` call +until the processor calls the next processor's `ConsumeTraceData`/`ConsumeMetricsData` +function, which passes the ownership to the next processor. After that the processor +must no longer read or write the data since it may be concurrently modified by the +new owner. + +Exclusive Ownership mode allows to easily implement processors that need to modify +the data by simply declaring such intent. + +### Shared Ownership + +In shared ownership mode no particular processor owns the data and no processor is +allowed the modify the shared data. + +In this mode no cloning is performed at the fan-out connector of receivers that +are attached to multiple pipelines. In this case all such pipelines will see +the same single shared copy of the data. Processors in pipelines operating in shared +ownership mode are prohibited from modifying the original data that they receive +via `ConsumeTraceData`/`ConsumeMetricsData` call. Processors may only read the data but +must not modify the data. + +If the processor needs to modify the data while performing the processing but +does not want to incur the cost of data cloning that Exclusive mode brings then +the processor can declare that it does not modify the data and use any +different technique that ensures original data is not modified. For example, +the processor can implement copy-on-write approach for individual sub-parts of +`TraceData`/`MetricsData` argument. Any approach that does not mutate the +original `TraceData`/`MetricsData` argument (including referenced data, such as +`Node`, `Resource`, `Spans`, etc) is allowed. + +If the processor uses such technique it should declare that it does not intend +to modify the original data by setting `MutatesConsumedData=false` in its capabilities +to avoid marking the pipeline for Exclusive ownership and to avoid the cost of +data cloning described in Exclusive Ownership section. + +## Ordering Processors + +The order processors are specified in a pipeline is important as this is the +order in which each processor is applied to traces and metrics. + +### Include/Exclude Metrics + +The [filter processor](filterprocessor/README.md) exposes the option to provide a set of +metric names to match against to determine if the metric should be +included or excluded from the processor. To configure this option, under +`include` and/or `exclude` both `match_type` and `metrics_names` are required. + +Note: If both `include` and `exclude` are specified, the `include` properties +are checked before the `exclude` properties. + +```yaml +filter: + # metrics indicates this processor applies to metrics + metrics: + # include and/or exclude can be specified. However, the include properties + # are always checked before the exclude properties. + {include, exclude}: + # match_type controls how items matching is done. + # Possible values are "regexp" or "strict". + # This is a required field. + match_type: {strict, regexp} + + # regexp is an optional configuration section for match_type regexp. + regexp: + # < see "Match Configuration" below > + + # metric_names specify an array of items to match the metric name against. + # This is a required field. + metric_names: [, ..., ] +``` + +#### Match Configuration + +Some `match_type` values have additional configuration options that can be +specified. The `match_type` value is the name of the configuration section. +These sections are optional. + +```yaml +# regexp is an optional configuration section for match_type regexp. +regexp: + # cacheenabled determines whether match results are LRU cached to make subsequent matches faster. + # Cache size is unlimited unless cachemaxnumentries is also specified. + cacheenabled: + # cachemaxnumentries is the max number of entries of the LRU cache; ignored if cacheenabled is false. + cachemaxnumentries: +``` + +### Include/Exclude Spans + +The [attribute processor](attributesprocessor/README.md) and the [span processor](spanprocessor/README.md) expose +the option to provide a set of properties of a span to match against to determine +if the span should be included or excluded from the processor. To configure +this option, under `include` and/or `exclude` at least `match_type` and one of +`services`, `span_names` or `attributes` is required. + +Note: If both `include` and `exclude` are specified, the `include` properties +are checked before the `exclude` properties. + +```yaml +{span, attributes}: + # include and/or exclude can be specified. However, the include properties + # are always checked before the exclude properties. + {include, exclude}: + # At least one of services, span_names or attributes must be specified. + # It is supported to have more than one specified, but all of the specified + # conditions must evaluate to true for a match to occur. + + # match_type controls how items in "services" and "span_names" arrays are + # interpreted. Possible values are "regexp" or "strict". + # This is a required field. + match_type: {strict, regexp} + + # regexp is an optional configuration section for match_type regexp. + regexp: + # < see "Match Configuration" below > + + # services specify an array of items to match the service name against. + # A match occurs if the span service name matches at least of the items. + # This is an optional field. + services: [, ..., ] + + # The span name must match at least one of the items. + # This is an optional field. + span_names: [, ..., ] + + # Attributes specifies the list of attributes to match against. + # All of these attributes must match exactly for a match to occur. + # This is an optional field. + attributes: + # Key specifies the attribute to match against. + - key: + # Value specifies the exact value to match against. + # If not specified, a match occurs if the key is present in the attributes. + value: {value} +``` + +#### Match Configuration + +Some `match_type` values have additional configuration options that can be +specified. The `match_type` value is the name of the configuration section. +These sections are optional. + +```yaml +# regexp is an optional configuration section for match_type regexp. +regexp: + # cacheenabled determines whether match results are LRU cached to make subsequent matches faster. + # Cache size is unlimited unless cachemaxnumentries is also specified. + cacheenabled: + # cachemaxnumentries is the max number of entries of the LRU cache; ignored if cacheenabled is false. + cachemaxnumentries: +``` diff --git a/internal/otel_collector/processor/attributesprocessor/README.md b/internal/otel_collector/processor/attributesprocessor/README.md new file mode 100644 index 00000000000..36b612c4acc --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/README.md @@ -0,0 +1,106 @@ +# Attributes Processor + +Supported pipeline types: traces + +The attributes processor modifies attributes of a span. Please refer to +[config.go](./config.go) for the config spec. + +It optionally supports the ability to [include/exclude spans](../README.md#includeexclude-spans). + +It takes a list of actions which are performed in order specified in the config. +The supported actions are: +- `insert`: Inserts a new attribute in spans where the key does not already exist. +- `update`: Updates an attribute in spans where the key does exist. +- `upsert`: Performs insert or update. Inserts a new attribute in spans where the + key does not already exist and updates an attribute in spans where the key + does exist. +- `delete`: Deletes an attribute from a span. +- `hash`: Hashes (SHA1) an existing attribute value. +- `extract`: Extracts values using a regular expression rule from the input key + to target keys specified in the rule. If a target key already exists, it will + be overridden. Note: It behaves similar to the Span Processor `to_attributes` + setting with the existing attribute as the source. + +For the actions `insert`, `update` and `upsert`, + - `key` is required + - one of `value` or `from_attribute` is required + - `action` is required. +```yaml + # Key specifies the attribute to act upon. +- key: + action: {insert, update, upsert} + # Value specifies the value to populate for the key. + # The type is inferred from the configuration. + value: + + # Key specifies the attribute to act upon. +- key: + action: {insert, update, upsert} + # FromAttribute specifies the attribute from the span to use to populate + # the value. If the attribute doesn't exist, no action is performed. + from_attribute: +``` + +For the `delete` action, + - `key` is required + - `action: delete` is required. +```yaml +# Key specifies the attribute to act upon. +- key: + action: delete +``` + + +For the `hash` action, + - `key` is required + - `action: hash` is required. +```yaml +# Key specifies the attribute to act upon. +- key: + action: hash +``` + + +For the `extract` action, + - `key` is required + - `pattern` is required. + ```yaml + # Key specifies the attribute to extract values from. + # The value of `key` is NOT altered. +- key: + # Rule specifies the regex pattern used to extract attributes from the value + # of `key`. + # The submatchers must be named. + # If attributes already exist, they will be overwritten. + pattern: + action: extract + + ``` + +The list of actions can be composed to create rich scenarios, such as +back filling attribute, copying values to a new key, redacting sensitive information. +The following is a sample configuration. + +```yaml +processors: + attributes/example: + actions: + - key: db.table + action: delete + - key: redacted_span + value: true + action: upsert + - key: copy_key + from_attribute: key_original + action: update + - key: account_id + value: 2245 + - key: account_password + action: delete + - key: account_email + action: hash + +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. diff --git a/internal/otel_collector/processor/attributesprocessor/attributes_log.go b/internal/otel_collector/processor/attributesprocessor/attributes_log.go new file mode 100644 index 00000000000..83b5174ab0d --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/attributes_log.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributesprocessor + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterlog" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +type logAttributesProcessor struct { + attrProc *processorhelper.AttrProc + include filterlog.Matcher + exclude filterlog.Matcher +} + +// newLogAttributesProcessor returns a processor that modifies attributes of a +// log record. To construct the attributes processors, the use of the factory +// methods are required in order to validate the inputs. +func newLogAttributesProcessor(attrProc *processorhelper.AttrProc, include, exclude filterlog.Matcher) *logAttributesProcessor { + return &logAttributesProcessor{ + attrProc: attrProc, + include: include, + exclude: exclude, + } +} + +// ProcessLogs implements the LogsProcessor +func (a *logAttributesProcessor) ProcessLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { + rls := ld.ResourceLogs() + for i := 0; i < rls.Len(); i++ { + rs := rls.At(i) + ilss := rs.InstrumentationLibraryLogs() + resource := rs.Resource() + for j := 0; j < ilss.Len(); j++ { + ils := ilss.At(j) + logs := ils.Logs() + library := ils.InstrumentationLibrary() + for k := 0; k < logs.Len(); k++ { + lr := logs.At(k) + if a.skipLog(lr, resource, library) { + continue + } + + a.attrProc.Process(lr.Attributes()) + } + } + } + return ld, nil +} + +// skipLog determines if a log should be processed. +// True is returned when a log should be skipped. +// False is returned when a log should not be skipped. +// The logic determining if a log should be processed is set +// in the attribute configuration with the include and exclude settings. +// Include properties are checked before exclude settings are checked. +func (a *logAttributesProcessor) skipLog(lr pdata.LogRecord, resource pdata.Resource, library pdata.InstrumentationLibrary) bool { + if a.include != nil { + // A false returned in this case means the log should not be processed. + if include := a.include.MatchLogRecord(lr, resource, library); !include { + return true + } + } + + if a.exclude != nil { + // A true returned in this case means the log should not be processed. + if exclude := a.exclude.MatchLogRecord(lr, resource, library); exclude { + return true + } + } + + return false +} diff --git a/internal/otel_collector/processor/attributesprocessor/attributes_log_test.go b/internal/otel_collector/processor/attributesprocessor/attributes_log_test.go new file mode 100644 index 00000000000..2bf0afe50a7 --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/attributes_log_test.go @@ -0,0 +1,423 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributesprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +// Common structure for all the Tests +type logTestCase struct { + name string + inputAttributes map[string]pdata.AttributeValue + expectedAttributes map[string]pdata.AttributeValue +} + +// runIndividualLogTestCase is the common logic of passing trace data through a configured attributes processor. +func runIndividualLogTestCase(t *testing.T, tt logTestCase, tp component.LogsProcessor) { + t.Run(tt.name, func(t *testing.T) { + ld := generateLogData(tt.name, tt.inputAttributes) + assert.NoError(t, tp.ConsumeLogs(context.Background(), ld)) + // Ensure that the modified `ld` has the attributes sorted: + sortLogAttributes(ld) + require.Equal(t, generateLogData(tt.name, tt.expectedAttributes), ld) + }) +} + +func generateLogData(logName string, attrs map[string]pdata.AttributeValue) pdata.Logs { + td := pdata.NewLogs() + td.ResourceLogs().Resize(1) + rs := td.ResourceLogs().At(0) + rs.InstrumentationLibraryLogs().Resize(1) + ils := rs.InstrumentationLibraryLogs().At(0) + lrs := ils.Logs() + lrs.Resize(1) + lrs.At(0).SetName(logName) + lrs.At(0).Attributes().InitFromMap(attrs).Sort() + return td +} + +func sortLogAttributes(ld pdata.Logs) { + rss := ld.ResourceLogs() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + rs.Resource().Attributes().Sort() + ilss := rs.InstrumentationLibraryLogs() + for j := 0; j < ilss.Len(); j++ { + logs := ilss.At(j).Logs() + for k := 0; k < logs.Len(); k++ { + s := logs.At(k) + s.Attributes().Sort() + } + } + } +} + +// TestLogProcessor_Values tests all possible value types. +func TestLogProcessor_NilEmptyData(t *testing.T) { + type nilEmptyTestCase struct { + name string + input pdata.Logs + output pdata.Logs + } + testCases := []nilEmptyTestCase{ + { + name: "empty", + input: testdata.GenerateLogDataEmpty(), + output: testdata.GenerateLogDataEmpty(), + }, + { + name: "one-empty-resource-logs", + input: testdata.GenerateLogDataOneEmptyResourceLogs(), + output: testdata.GenerateLogDataOneEmptyResourceLogs(), + }, + { + name: "no-libraries", + input: testdata.GenerateLogDataOneEmptyResourceLogs(), + output: testdata.GenerateLogDataOneEmptyResourceLogs(), + }, + } + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Settings.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + {Key: "attribute1", Action: processorhelper.DELETE}, + } + + tp, err := factory.CreateLogsProcessor( + context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewLogsNop()) + require.Nil(t, err) + require.NotNil(t, tp) + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, tp.ConsumeLogs(context.Background(), tt.input)) + assert.EqualValues(t, tt.output, tt.input) + }) + } +} + +func TestAttributes_FilterLogs(t *testing.T) { + testCases := []logTestCase{ + { + name: "apply processor", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + { + name: "apply processor with different value for exclude property", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + "NoModification": pdata.NewAttributeValueBool(false), + }, + }, + { + name: "incorrect name for include property", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "attribute match for exclude property", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + } + oCfg.Include = &filterconfig.MatchProperties{ + LogNames: []string{"^[^i].*"}, + Config: *createConfig(filterset.Regexp), + } + oCfg.Exclude = &filterconfig.MatchProperties{ + Attributes: []filterconfig.Attribute{ + {Key: "NoModification", Value: true}, + }, + Config: *createConfig(filterset.Strict), + } + tp, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tt := range testCases { + runIndividualLogTestCase(t, tt, tp) + } +} + +func TestAttributes_FilterLogsByNameStrict(t *testing.T) { + testCases := []logTestCase{ + { + name: "apply", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + { + name: "apply", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + "NoModification": pdata.NewAttributeValueBool(false), + }, + }, + { + name: "incorrect_log_name", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "dont_apply", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "incorrect_log_name_with_attr", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + } + oCfg.Include = &filterconfig.MatchProperties{ + LogNames: []string{"apply", "dont_apply"}, + Config: *createConfig(filterset.Strict), + } + oCfg.Exclude = &filterconfig.MatchProperties{ + LogNames: []string{"dont_apply"}, + Config: *createConfig(filterset.Strict), + } + tp, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tt := range testCases { + runIndividualLogTestCase(t, tt, tp) + } +} + +func TestAttributes_FilterLogsByNameRegexp(t *testing.T) { + testCases := []logTestCase{ + { + name: "apply_to_log_with_no_attrs", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + { + name: "apply_to_log_with_attr", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + "NoModification": pdata.NewAttributeValueBool(false), + }, + }, + { + name: "incorrect_log_name", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "apply_dont_apply", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "incorrect_log_name_with_attr", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + } + oCfg.Include = &filterconfig.MatchProperties{ + LogNames: []string{"^apply.*"}, + Config: *createConfig(filterset.Regexp), + } + oCfg.Exclude = &filterconfig.MatchProperties{ + LogNames: []string{".*dont_apply$"}, + Config: *createConfig(filterset.Regexp), + } + tp, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tt := range testCases { + runIndividualLogTestCase(t, tt, tp) + } +} + +func TestLogAttributes_Hash(t *testing.T) { + testCases := []logTestCase{ + { + name: "String", + inputAttributes: map[string]pdata.AttributeValue{ + "user.email": pdata.NewAttributeValueString("john.doe@example.com"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user.email": pdata.NewAttributeValueString("73ec53c4ba1747d485ae2a0d7bfafa6cda80a5a9"), + }, + }, + { + name: "Int", + inputAttributes: map[string]pdata.AttributeValue{ + "user.id": pdata.NewAttributeValueInt(10), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user.id": pdata.NewAttributeValueString("71aa908aff1548c8c6cdecf63545261584738a25"), + }, + }, + { + name: "Double", + inputAttributes: map[string]pdata.AttributeValue{ + "user.balance": pdata.NewAttributeValueDouble(99.1), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user.balance": pdata.NewAttributeValueString("76429edab4855b03073f9429fd5d10313c28655e"), + }, + }, + { + name: "Bool", + inputAttributes: map[string]pdata.AttributeValue{ + "user.authenticated": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user.authenticated": pdata.NewAttributeValueString("bf8b4530d8d246dd74ac53a13471bba17941dff7"), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "user.email", Action: processorhelper.HASH}, + {Key: "user.id", Action: processorhelper.HASH}, + {Key: "user.balance", Action: processorhelper.HASH}, + {Key: "user.authenticated", Action: processorhelper.HASH}, + } + + tp, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tt := range testCases { + runIndividualLogTestCase(t, tt, tp) + } +} + +func BenchmarkAttributes_FilterLogsByName(b *testing.B) { + testCases := []logTestCase{ + { + name: "apply_to_log_with_no_attrs", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + { + name: "apply_to_log_with_attr", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + "NoModification": pdata.NewAttributeValueBool(false), + }, + }, + { + name: "dont_apply", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + } + oCfg.Include = &filterconfig.MatchProperties{ + LogNames: []string{"^apply.*"}, + } + tp, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + require.Nil(b, err) + require.NotNil(b, tp) + + for _, tt := range testCases { + td := generateLogData(tt.name, tt.inputAttributes) + + b.Run(tt.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + assert.NoError(b, tp.ConsumeLogs(context.Background(), td)) + } + }) + + // Ensure that the modified `td` has the attributes sorted: + sortLogAttributes(td) + require.Equal(b, generateLogData(tt.name, tt.expectedAttributes), td) + } +} diff --git a/internal/otel_collector/processor/attributesprocessor/attributes_trace.go b/internal/otel_collector/processor/attributesprocessor/attributes_trace.go new file mode 100644 index 00000000000..0e708fe55be --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/attributes_trace.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributesprocessor + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterspan" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +type spanAttributesProcessor struct { + attrProc *processorhelper.AttrProc + include filterspan.Matcher + exclude filterspan.Matcher +} + +// newTraceProcessor returns a processor that modifies attributes of a span. +// To construct the attributes processors, the use of the factory methods are required +// in order to validate the inputs. +func newSpanAttributesProcessor(attrProc *processorhelper.AttrProc, include, exclude filterspan.Matcher) *spanAttributesProcessor { + return &spanAttributesProcessor{ + attrProc: attrProc, + include: include, + exclude: exclude, + } +} + +// ProcessTraces implements the TProcessor +func (a *spanAttributesProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + resource := rs.Resource() + ilss := rs.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + ils := ilss.At(j) + spans := ils.Spans() + library := ils.InstrumentationLibrary() + for k := 0; k < spans.Len(); k++ { + span := spans.At(k) + if filterspan.SkipSpan(a.include, a.exclude, span, resource, library) { + continue + } + + a.attrProc.Process(span.Attributes()) + } + } + } + return td, nil +} diff --git a/internal/otel_collector/processor/attributesprocessor/attributes_trace_test.go b/internal/otel_collector/processor/attributesprocessor/attributes_trace_test.go new file mode 100644 index 00000000000..a6eaf7f7506 --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/attributes_trace_test.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributesprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/processor/processorhelper" + "go.opentelemetry.io/collector/translator/conventions" +) + +// Common structure for all the Tests +type testCase struct { + name string + serviceName string + inputAttributes map[string]pdata.AttributeValue + expectedAttributes map[string]pdata.AttributeValue +} + +// runIndividualTestCase is the common logic of passing trace data through a configured attributes processor. +func runIndividualTestCase(t *testing.T, tt testCase, tp component.TracesProcessor) { + t.Run(tt.name, func(t *testing.T) { + td := generateTraceData(tt.serviceName, tt.name, tt.inputAttributes) + assert.NoError(t, tp.ConsumeTraces(context.Background(), td)) + // Ensure that the modified `td` has the attributes sorted: + sortAttributes(td) + require.Equal(t, generateTraceData(tt.serviceName, tt.name, tt.expectedAttributes), td) + }) +} + +func generateTraceData(serviceName, spanName string, attrs map[string]pdata.AttributeValue) pdata.Traces { + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + rs := td.ResourceSpans().At(0) + if serviceName != "" { + rs.Resource().Attributes().UpsertString(conventions.AttributeServiceName, serviceName) + } + rs.InstrumentationLibrarySpans().Resize(1) + ils := rs.InstrumentationLibrarySpans().At(0) + spans := ils.Spans() + spans.Resize(1) + spans.At(0).SetName(spanName) + spans.At(0).Attributes().InitFromMap(attrs).Sort() + return td +} + +func sortAttributes(td pdata.Traces) { + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + rs.Resource().Attributes().Sort() + ilss := rs.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + spans := ilss.At(j).Spans() + for k := 0; k < spans.Len(); k++ { + spans.At(k).Attributes().Sort() + } + } + } +} + +// TestSpanProcessor_Values tests all possible value types. +func TestSpanProcessor_NilEmptyData(t *testing.T) { + type nilEmptyTestCase struct { + name string + input pdata.Traces + output pdata.Traces + } + // TODO: Add test for "nil" Span/Attributes. This needs support from data slices to allow to construct that. + testCases := []nilEmptyTestCase{ + { + name: "empty", + input: testdata.GenerateTraceDataEmpty(), + output: testdata.GenerateTraceDataEmpty(), + }, + { + name: "one-empty-resource-spans", + input: testdata.GenerateTraceDataOneEmptyResourceSpans(), + output: testdata.GenerateTraceDataOneEmptyResourceSpans(), + }, + { + name: "no-libraries", + input: testdata.GenerateTraceDataNoLibraries(), + output: testdata.GenerateTraceDataNoLibraries(), + }, + { + name: "one-empty-instrumentation-library", + input: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + output: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + }, + } + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Settings.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + {Key: "attribute1", Action: processorhelper.DELETE}, + } + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, tp.ConsumeTraces(context.Background(), tt.input)) + assert.EqualValues(t, tt.output, tt.input) + }) + } +} + +func TestAttributes_FilterSpans(t *testing.T) { + testCases := []testCase{ + { + name: "apply processor", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + { + name: "apply processor with different value for exclude property", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + "NoModification": pdata.NewAttributeValueBool(false), + }, + }, + { + name: "incorrect name for include property", + serviceName: "noname", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "attribute match for exclude property", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + } + oCfg.Include = &filterconfig.MatchProperties{ + Services: []string{"svcA", "svcB.*"}, + Config: *createConfig(filterset.Regexp), + } + oCfg.Exclude = &filterconfig.MatchProperties{ + Attributes: []filterconfig.Attribute{ + {Key: "NoModification", Value: true}, + }, + Config: *createConfig(filterset.Strict), + } + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, tp) + } +} + +func TestAttributes_FilterSpansByNameStrict(t *testing.T) { + testCases := []testCase{ + { + name: "apply", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + { + name: "apply", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + "NoModification": pdata.NewAttributeValueBool(false), + }, + }, + { + name: "incorrect_span_name", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "dont_apply", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "incorrect_span_name_with_attr", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + } + oCfg.Include = &filterconfig.MatchProperties{ + SpanNames: []string{"apply", "dont_apply"}, + Config: *createConfig(filterset.Strict), + } + oCfg.Exclude = &filterconfig.MatchProperties{ + SpanNames: []string{"dont_apply"}, + Config: *createConfig(filterset.Strict), + } + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, tp) + } +} + +func TestAttributes_FilterSpansByNameRegexp(t *testing.T) { + testCases := []testCase{ + { + name: "apply_to_span_with_no_attrs", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + { + name: "apply_to_span_with_attr", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + "NoModification": pdata.NewAttributeValueBool(false), + }, + }, + { + name: "incorrect_span_name", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "apply_dont_apply", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + { + name: "incorrect_span_name_with_attr", + serviceName: "svcB", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(true), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + } + oCfg.Include = &filterconfig.MatchProperties{ + SpanNames: []string{"^apply.*"}, + Config: *createConfig(filterset.Regexp), + } + oCfg.Exclude = &filterconfig.MatchProperties{ + SpanNames: []string{".*dont_apply$"}, + Config: *createConfig(filterset.Regexp), + } + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, tp) + } +} + +func TestAttributes_Hash(t *testing.T) { + testCases := []testCase{ + { + name: "String", + inputAttributes: map[string]pdata.AttributeValue{ + "user.email": pdata.NewAttributeValueString("john.doe@example.com"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user.email": pdata.NewAttributeValueString("73ec53c4ba1747d485ae2a0d7bfafa6cda80a5a9"), + }, + }, + { + name: "Int", + inputAttributes: map[string]pdata.AttributeValue{ + "user.id": pdata.NewAttributeValueInt(10), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user.id": pdata.NewAttributeValueString("71aa908aff1548c8c6cdecf63545261584738a25"), + }, + }, + { + name: "Double", + inputAttributes: map[string]pdata.AttributeValue{ + "user.balance": pdata.NewAttributeValueDouble(99.1), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user.balance": pdata.NewAttributeValueString("76429edab4855b03073f9429fd5d10313c28655e"), + }, + }, + { + name: "Bool", + inputAttributes: map[string]pdata.AttributeValue{ + "user.authenticated": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user.authenticated": pdata.NewAttributeValueString("bf8b4530d8d246dd74ac53a13471bba17941dff7"), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "user.email", Action: processorhelper.HASH}, + {Key: "user.id", Action: processorhelper.HASH}, + {Key: "user.balance", Action: processorhelper.HASH}, + {Key: "user.authenticated", Action: processorhelper.HASH}, + } + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, tp) + } +} + +func BenchmarkAttributes_FilterSpansByName(b *testing.B) { + testCases := []testCase{ + { + name: "apply_to_span_with_no_attrs", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + { + name: "apply_to_span_with_attr", + inputAttributes: map[string]pdata.AttributeValue{ + "NoModification": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + "NoModification": pdata.NewAttributeValueBool(false), + }, + }, + { + name: "dont_apply", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "attribute1", Action: processorhelper.INSERT, Value: 123}, + } + oCfg.Include = &filterconfig.MatchProperties{ + SpanNames: []string{"^apply.*"}, + } + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + require.Nil(b, err) + require.NotNil(b, tp) + + for _, tt := range testCases { + td := generateTraceData(tt.serviceName, tt.name, tt.inputAttributes) + + b.Run(tt.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + assert.NoError(b, tp.ConsumeTraces(context.Background(), td)) + } + }) + + // Ensure that the modified `td` has the attributes sorted: + sortAttributes(td) + require.Equal(b, generateTraceData(tt.serviceName, tt.name, tt.expectedAttributes), td) + } +} + +func createConfig(matchType filterset.MatchType) *filterset.Config { + return &filterset.Config{ + MatchType: matchType, + } +} diff --git a/internal/otel_collector/processor/attributesprocessor/config.go b/internal/otel_collector/processor/attributesprocessor/config.go new file mode 100644 index 00000000000..6e4b10b3bd4 --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/config.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributesprocessor + +import ( + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +// Config specifies the set of attributes to be inserted, updated, upserted and +// deleted and the properties to include/exclude a span from being processed. +// This processor handles all forms of modifications to attributes within a span. +// Prior to any actions being applied, each span is compared against +// the include properties and then the exclude properties if they are specified. +// This determines if a span is to be processed or not. +// The list of actions is applied in order specified in the configuration. +type Config struct { + configmodels.ProcessorSettings `mapstructure:",squash"` + + filterconfig.MatchConfig `mapstructure:",squash"` + + // Specifies the list of attributes to act on. + // The set of actions are {INSERT, UPDATE, UPSERT, DELETE, HASH, EXTRACT}. + // This is a required field. + processorhelper.Settings `mapstructure:",squash"` +} diff --git a/internal/otel_collector/processor/attributesprocessor/config_test.go b/internal/otel_collector/processor/attributesprocessor/config_test.go new file mode 100644 index 00000000000..c4bf7ad9b49 --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/config_test.go @@ -0,0 +1,237 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributesprocessor + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +func TestLoadingConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Processors[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + assert.NoError(t, err) + require.NotNil(t, cfg) + + p0 := cfg.Processors["attributes/insert"] + assert.Equal(t, p0, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/insert", + TypeVal: typeStr, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "attribute1", Value: 123, Action: processorhelper.INSERT}, + {Key: "string key", FromAttribute: "anotherkey", Action: processorhelper.INSERT}, + }, + }, + }) + + p1 := cfg.Processors["attributes/update"] + assert.Equal(t, p1, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/update", + TypeVal: typeStr, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "boo", FromAttribute: "foo", Action: processorhelper.UPDATE}, + {Key: "db.secret", Value: "redacted", Action: processorhelper.UPDATE}, + }, + }, + }) + + p2 := cfg.Processors["attributes/upsert"] + assert.Equal(t, p2, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/upsert", + TypeVal: typeStr, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "region", Value: "planet-earth", Action: processorhelper.UPSERT}, + {Key: "new_user_key", FromAttribute: "user_key", Action: processorhelper.UPSERT}, + }, + }, + }) + + p3 := cfg.Processors["attributes/delete"] + assert.Equal(t, p3, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/delete", + TypeVal: typeStr, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "credit_card", Action: processorhelper.DELETE}, + {Key: "duplicate_key", Action: processorhelper.DELETE}, + }, + }, + }) + + p4 := cfg.Processors["attributes/hash"] + assert.Equal(t, p4, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/hash", + TypeVal: typeStr, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "user.email", Action: processorhelper.HASH}, + }, + }, + }) + + p5 := cfg.Processors["attributes/excludemulti"] + assert.Equal(t, p5, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/excludemulti", + TypeVal: typeStr, + }, + MatchConfig: filterconfig.MatchConfig{ + Exclude: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{"svcA", "svcB"}, + Attributes: []filterconfig.Attribute{ + {Key: "env", Value: "dev"}, + {Key: "test_request"}, + }, + }, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "credit_card", Action: processorhelper.DELETE}, + {Key: "duplicate_key", Action: processorhelper.DELETE}, + }, + }, + }) + + p6 := cfg.Processors["attributes/includeservices"] + assert.Equal(t, p6, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/includeservices", + TypeVal: typeStr, + }, + MatchConfig: filterconfig.MatchConfig{ + Include: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Services: []string{"auth.*", "login.*"}, + }, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "credit_card", Action: processorhelper.DELETE}, + {Key: "duplicate_key", Action: processorhelper.DELETE}, + }, + }, + }) + + p7 := cfg.Processors["attributes/selectiveprocessing"] + assert.Equal(t, p7, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/selectiveprocessing", + TypeVal: typeStr, + }, + MatchConfig: filterconfig.MatchConfig{ + Include: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Services: []string{"svcA", "svcB"}, + }, + Exclude: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Strict), + Attributes: []filterconfig.Attribute{ + {Key: "redact_trace", Value: false}, + }, + }, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "credit_card", Action: processorhelper.DELETE}, + {Key: "duplicate_key", Action: processorhelper.DELETE}, + }, + }, + }) + + p8 := cfg.Processors["attributes/complex"] + assert.Equal(t, p8, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/complex", + TypeVal: typeStr, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "operation", Value: "default", Action: processorhelper.INSERT}, + {Key: "svc.operation", FromAttribute: "operation", Action: processorhelper.UPSERT}, + {Key: "operation", Action: processorhelper.DELETE}, + }, + }, + }) + + p9 := cfg.Processors["attributes/example"] + assert.Equal(t, p9, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/example", + TypeVal: typeStr, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "db.table", Action: processorhelper.DELETE}, + {Key: "redacted_span", Value: true, Action: processorhelper.UPSERT}, + {Key: "copy_key", FromAttribute: "key_original", Action: processorhelper.UPDATE}, + {Key: "account_id", Value: 2245, Action: processorhelper.INSERT}, + {Key: "account_password", Action: processorhelper.DELETE}, + }, + }, + }) + + p10 := cfg.Processors["attributes/regexp"] + assert.Equal(t, p10, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "attributes/regexp", + TypeVal: typeStr, + }, + MatchConfig: filterconfig.MatchConfig{ + Include: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + Services: []string{"auth.*"}, + }, + Exclude: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + SpanNames: []string{"login.*"}, + }, + }, + Settings: processorhelper.Settings{ + Actions: []processorhelper.ActionKeyValue{ + {Key: "password", Action: processorhelper.UPDATE, Value: "obfuscated"}, + {Key: "token", Action: processorhelper.DELETE}, + }, + }, + }) + +} diff --git a/internal/otel_collector/processor/attributesprocessor/doc.go b/internal/otel_collector/processor/attributesprocessor/doc.go new file mode 100644 index 00000000000..6f8455a1722 --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package attributesprocessor contains the logic to modify attributes of a span. +// It supports insert, update, upsert and delete as actions. +package attributesprocessor diff --git a/internal/otel_collector/processor/attributesprocessor/factory.go b/internal/otel_collector/processor/attributesprocessor/factory.go new file mode 100644 index 00000000000..c0a30538004 --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/factory.go @@ -0,0 +1,113 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributesprocessor + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/internal/processor/filterlog" + "go.opentelemetry.io/collector/internal/processor/filterspan" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // typeStr is the value of "type" key in configuration. + typeStr = "attributes" +) + +var processorCapabilities = component.ProcessorCapabilities{MutatesConsumedData: true} + +// NewFactory returns a new factory for the Attributes processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTraceProcessor), + processorhelper.WithLogs(createLogProcessor)) +} + +// Note: This isn't a valid configuration because the processor would do no work. +func createDefaultConfig() configmodels.Processor { + return &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createTraceProcessor( + _ context.Context, + _ component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.TracesConsumer, +) (component.TracesProcessor, error) { + oCfg := cfg.(*Config) + if len(oCfg.Actions) == 0 { + return nil, fmt.Errorf("error creating \"attributes\" processor due to missing required field \"actions\" of processor %q", cfg.Name()) + } + attrProc, err := processorhelper.NewAttrProc(&oCfg.Settings) + if err != nil { + return nil, fmt.Errorf("error creating \"attributes\" processor: %w of processor %q", err, cfg.Name()) + } + include, err := filterspan.NewMatcher(oCfg.Include) + if err != nil { + return nil, err + } + exclude, err := filterspan.NewMatcher(oCfg.Exclude) + if err != nil { + return nil, err + } + + return processorhelper.NewTraceProcessor( + cfg, + nextConsumer, + newSpanAttributesProcessor(attrProc, include, exclude), + processorhelper.WithCapabilities(processorCapabilities)) +} + +func createLogProcessor( + _ context.Context, + _ component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.LogsConsumer, +) (component.LogsProcessor, error) { + oCfg := cfg.(*Config) + if len(oCfg.Actions) == 0 { + return nil, fmt.Errorf("error creating \"attributes\" processor due to missing required field \"actions\" of processor %q", cfg.Name()) + } + attrProc, err := processorhelper.NewAttrProc(&oCfg.Settings) + if err != nil { + return nil, fmt.Errorf("error creating \"attributes\" processor: %w of processor %q", err, cfg.Name()) + } + include, err := filterlog.NewMatcher(oCfg.Include) + if err != nil { + return nil, err + } + exclude, err := filterlog.NewMatcher(oCfg.Exclude) + if err != nil { + return nil, err + } + + return processorhelper.NewLogsProcessor( + cfg, + nextConsumer, + newLogAttributesProcessor(attrProc, include, exclude), + processorhelper.WithCapabilities(processorCapabilities)) +} diff --git a/internal/otel_collector/processor/attributesprocessor/factory_test.go b/internal/otel_collector/processor/attributesprocessor/factory_test.go new file mode 100644 index 00000000000..a8fa6b9549f --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/factory_test.go @@ -0,0 +1,149 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributesprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +func TestFactory_Type(t *testing.T) { + factory := NewFactory() + assert.Equal(t, factory.Type(), configmodels.Type(typeStr)) +} + +func TestFactory_CreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.Equal(t, cfg, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: typeStr, + TypeVal: typeStr, + }, + }) + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestFactoryCreateTraceProcessor_EmptyActions(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + ap, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + assert.Error(t, err) + assert.Nil(t, ap) +} + +func TestFactoryCreateTraceProcessor_InvalidActions(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + // Missing key + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "", Value: 123, Action: processorhelper.UPSERT}, + } + ap, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + assert.Error(t, err) + assert.Nil(t, ap) +} + +func TestFactoryCreateTraceProcessor(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "a key", Action: processorhelper.DELETE}, + } + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + assert.NotNil(t, tp) + assert.NoError(t, err) + + tp, err = factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, nil) + assert.Nil(t, tp) + assert.Error(t, err) + + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Action: processorhelper.DELETE}, + } + tp, err = factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + assert.Nil(t, tp) + assert.Error(t, err) +} + +func TestFactory_CreateMetricsProcessor(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + mp, err := factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, nil) + require.Nil(t, mp) + assert.Equal(t, err, configerror.ErrDataTypeIsNotSupported) +} + +func TestFactoryCreateLogsProcessor_EmptyActions(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + ap, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + assert.Error(t, err) + assert.Nil(t, ap) +} + +func TestFactoryCreateLogsProcessor_InvalidActions(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + // Missing key + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "", Value: 123, Action: processorhelper.UPSERT}, + } + ap, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + assert.Error(t, err) + assert.Nil(t, ap) +} + +func TestFactoryCreateLogsProcessor(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Key: "a key", Action: processorhelper.DELETE}, + } + + tp, err := factory.CreateLogsProcessor( + context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + assert.NotNil(t, tp) + assert.NoError(t, err) + + tp, err = factory.CreateLogsProcessor( + context.Background(), component.ProcessorCreateParams{}, cfg, nil) + assert.Nil(t, tp) + assert.Error(t, err) + + oCfg.Actions = []processorhelper.ActionKeyValue{ + {Action: processorhelper.DELETE}, + } + tp, err = factory.CreateLogsProcessor( + context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewLogsNop()) + assert.Nil(t, tp) + assert.Error(t, err) +} diff --git a/internal/otel_collector/processor/attributesprocessor/testdata/config.yaml b/internal/otel_collector/processor/attributesprocessor/testdata/config.yaml new file mode 100644 index 00000000000..8525f0be36e --- /dev/null +++ b/internal/otel_collector/processor/attributesprocessor/testdata/config.yaml @@ -0,0 +1,316 @@ +processors: + # The following example demonstrates inserting keys/values into spans. + attributes/insert: + actions: + # The following inserts a new attribute {"attribute1": 123} to spans where + # the key "attribute1" doesn't exist. + # The type of `attribute1` is inferred by the configuration. + # `123` is an integer and is stored as an integer in the attributes. + # This demonstrates how to backfill spans with an attribute that may + # not have been sent by all clients. + - key: "attribute1" + value: 123 + action: insert + # The following uses the value from attribute "anotherkey" to insert a new + # attribute {"string key": } to spans + # where the key "string key" does not exist. If the attribute "anotherkey" + # doesn't exist, no new attribute is inserted to spans. + - key: "string key" + from_attribute: "anotherkey" + action: insert + + # The following example demonstrates using regex to create new attributes + # based on the value of another attribute. + attributes/regex_insert: + actions: + # The following uses the value from `key:http.url` to upsert attributes + # to the target keys specified in the `rule`. + # (Insert attributes for target keys that do not exist and update keys + # that exist.) + # Given http.url = http://example.com/path?queryParam1=value1,queryParam2=value2 + # then the following attributes will be inserted: + # http_protocol: http + # http_domain: example.com + # http_path: path + # http_query_params=queryParam1=value1,queryParam2=value2 + # http.url value does NOT change. + # Note: Similar to the Span Procesor, if a target key already exists, + # it will be updated. + - key: "http.url" + pattern: ^(?P.*):\/\/(?P.*)\/(?P.*)(\?|\&)(?P.*) + action: extract + + # The following demonstrates configuring the processor to only update existing + # keys in an attribute. + # Note: `action: update` must be set. + attributes/update: + actions: + # The following updates the attribute 'boo' using the value from attribute + # 'foo'. Spans without the attribute 'boo' will not change. + - key: "boo" + from_attribute: "foo" + action: update + # The following updates the attribute to { "db.secret": "redacted"}. + # This demonstrates sanitizing spans of sensitive data. + - key: db.secret + value: redacted + action: update + + # The following demonstrates setting an attribute on both spans where the + # key does exist and the key doesn't exist. + attributes/upsert: + actions: + # The following demonstrates how to set an attribute on all spans. + # Any spans that already had `region` now have value `planet-earth`. + # This can be done to set properties for all traces without + # requiring an instrumentation change. + - key: region + value: "planet-earth" + action: upsert + + # The following demonstrates copying a value to a new key. + # Note: If a span doesn't contain `user_key`, no new attribute `new_user_key` + # is created. + - key: new_user_key + from_attribute: user_key + action: upsert + + # The following demonstrates deleting keys from an attribute. + attributes/delete: + actions: + - key: credit_card + action: delete + - key: duplicate_key + action: delete + + # The following demonstrates hash existing attribute values. + attributes/hash: + actions: + - key: user.email + action: hash + + + # The following demonstrates excluding spans from this attributes processor. + # Ex. The following spans match the properties and won't be processed by the + # processor. + # Span1 Name: 'svcB' Attributes: {env: dev, test_request: 123, credit_card: 1234} + # Span2 Name: 'svcA' Attributes: {env: dev, test_request: false} + # The following spans do not match the properties and the processor actions + # are applied to it. + # Span3 Name: 'svcB' Attributes: {env: 1, test_request: dev, credit_card: 1234} + # Span4 Name: 'svcC' Attributes: {env: dev, test_request: false} + attributes/excludemulti: + # Specifies the spans properties that exclude a span from being processed. + exclude: + # match_type defines that "services" is an array of strings that must + # match service name strictly. + match_type: strict + # The Span service name must be equal to "svcA" or "svcB". + services: ["svcA", "svcB"] + attributes: + # This exact attribute ('env', 'dev') must exist in the span for a match. + - {key: env, value: "dev"} + # As long as there is an attribute with key 'test_request' in the span + # there is a match. + - {key: test_request} + actions: + - key: credit_card + action: delete + - key: duplicate_key + action: delete + + # The following demonstrates excluding spans from this attributes processor based on a resource. + attributes/excluderesources: + # Specifies the spans properties that exclude a span from being processed. + exclude: + # match_type defines that "resources" is an map where values must match strictly. + match_type: strict + resources: + # This exact resource ('host.type', 'n1-standard-1') must exist in the span for a match. + - {key: host.type, value: "n1-standard-1"} + actions: + - key: credit_card + action: delete + - key: duplicate_key + action: delete + + # The following demonstrates excluding spans from this attributes processor based on an instrumenting library. + # If no version is provided, any version will match, even no version. + # If a blank version provided, only no version will match. + attributes/excludelibrary: + # Specifies the spans properties that exclude a span from being processed. + exclude: + # match_type defines that "libraries" is an map where values must match strictly. + match_type: strict + libraries: + # This exact library ('mongo-java-driver', version '3.8.0') must exist in the span for a match. + - {name: "mongo-java-driver", version: "3.8.0"} + actions: + - key: credit_card + action: delete + - key: duplicate_key + action: delete + + # The following demonstrates including spans for this attributes processor. + # All other spans that do no match the properties are not processed + # by this processor. + # Ex. The following are spans match the properties and the actions are applied. + # Span1 Name: 'svcB' Attributes: {env: dev, test_request: 123, credit_card: 1234} + # Span2 Name: 'svcA' Attributes: {env: dev, test_request: false} + # Span3 Name: 'svcB' Attributes: {env: 1, test_request: dev, credit_card: 1234} + # The following span does not match the include properties and the + # processor actions are not applied. + # Span4 Name: 'svcC' Attributes: {env: dev, test_request: false} + attributes/includeservices: + # Specifies the span properties that must exist for the processor to be applied. + include: + # match_type defines that "services" is an array of regexp-es. + match_type: regexp + # The Span service name must match "auth.*" or "login.*" regexp. + services: ["auth.*", "login.*"] + actions: + - key: credit_card + action: delete + - key: duplicate_key + action: delete + + # The following demonstrates specifying the set of span properties to + # indicate which spans this processor should be applied to. The `include` of + # properties say which ones should be included and the `exclude` properties + # further filter out spans that shouldn't be processed. + # Ex. The following are spans match the properties and the actions are applied. + # Note this span is processed because the value type of redact_trace is a string instead of a boolean. + # Span1 Name: 'svcB' Attributes: {env: production, test_request: 123, credit_card: 1234, redact_trace: "false"} + # Span2 Name: 'svcA' Attributes: {env: staging, test_request: false, redact_trace: true} + # The following span does not match the include properties and the + # processor actions are not applied. + # Span3 Name: 'svcB' Attributes: {env: production, test_request: true, credit_card: 1234, redact_trace: false} + # Span4 Name: 'svcC' Attributes: {env: dev, test_request: false} + attributes/selectiveprocessing: + # Specifies the span properties that must exist for the processor to be applied. + include: + # match_type defines that "services" is an array of strings that must + # match service name strictly. + match_type: strict + # The Span service name must be equal to "svcA" or "svcB". + services: ["svcA", "svcB"] + exclude: + # match_type defines that "attributes" values must match strictly. + match_type: strict + attributes: + - { key: redact_trace, value: false} + actions: + - key: credit_card + action: delete + - key: duplicate_key + action: delete + + # The following demonstrates how to backfill spans missing an attribute, + # insert/update that value to a new key and deleting the old key. This guarantees + # an attribute `svc.operation` exists in spans and the attribute `operation` + # doesn't exist. + # Ex: The spans before the processor `attributes/complex`. + # Span1 Attributes: {timeout: 10, svc.operation: addition, operation: addition} + # Span2 Attributes: {operation: subtract, math_value: 123} + # Span3 Attributes: {timeout: 10, math_value: 4} + # Span4 Attributes: {svc.operation: division, timeout: 3} + attributes/complex: + # Note: There are no include and exclude settings so all spans are processed. + actions: + - key: operation + value: default + action: insert + # The spans after the first action of insert. + # Span1 Attributes: {timeout: 10, svc.operation: addition, operation: addition} + # Span2 Attributes: {operation: subtract, math_value: 123} + # Span3 Attributes: {timeout: 10, math_value: 4, operation: default} + # Span4 Attributes: {svc.operation: division, timeout: 3, operation:default} + + - key: svc.operation + from_attribute: operation + action: upsert + # The spans after the second action of upsert. + # Span1 Attributes: {timeout: 10, svc.operation: addition, operation: addition} + # Span2 Attributes: {svc.operation: subtract, operation: subtract, math_value: 123} + # Span3 Attributes: {svc.operation: default, timeout: 10, math_value: 4, operation: default} + # Span4 Attributes: {svc.operation: default, timeout: 3, operation:default} + + - key: operation + action: delete + # The spans after the third/final action of delete. + # Span1 Attributes: {timeout: 10, svc.operation: addition} + # Span2 Attributes: {svc.operation: subtract, math_value: 123} + # Span3 Attributes: {svc.operation: default, timeout: 10, math_value: 4} + # Span4 Attributes: {svc.operation: default, timeout: 3} + + # The following is an example of various actions. The actions are applied in + # the order specified in the configuration. + attributes/example: + actions: + - key: db.table + action: delete + - key: redacted_span + value: true + action: upsert + - key: copy_key + from_attribute: key_original + action: update + - key: account_id + value: 2245 + action: insert + - key: account_password + action: delete + + # The following demonstrates how to process spans that have a service name and span + # name that match regexp patterns. This processor will remove "token" attribute + # and will obfuscate "password" attribute in spans where service name matches "auth.*" + # and where span name does not match "login.*". + attributes/regexp: + # Specifies the span properties that must exist for the processor to be applied. + include: + # match_type defines that "services" is an array of regexp-es. + match_type: regexp + # The span service name must match "auth.*" pattern. + services: ["auth.*"] + exclude: + # match_type defines that "span_names" is an array of regexp-es. + match_type: regexp + # The span name must not match "login.*" pattern. + span_names: ["login.*"] + actions: + - key: password + action: update + value: "obfuscated" + - key: token + action: delete + + # The following demonstrates how to process spans that have an attribute that matches a regexp patterns. + # This processor will obfuscate "db.statement" attribute in spans where "db.statement attribute + # matches a regex pattern. + attributes/regexp2: + # Specifies the span properties that must exist for the processor to be applied. + include: + # match_type defines that "attributes" is a map where values are regexp-es. + match_type: regexp + attributes: + # This attribute ('db.statement') must exist in the span and match the regex ('SELECT \* FROM USERS.*') for a match. + - {key: env, value: "'SELECT * FROM USERS WHERE ID=1'"} + actions: + - key: db.statement + action: update + value: "SELECT * FROM USERS [obfuscated]" + +receivers: + examplereceiver: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [attributes/insert] + exporters: [exampleexporter] + + diff --git a/internal/otel_collector/processor/batchprocessor/README.md b/internal/otel_collector/processor/batchprocessor/README.md new file mode 100644 index 00000000000..3ea4e354545 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/README.md @@ -0,0 +1,38 @@ +# Batch Processor + +Supported pipeline types: metric, traces, logs + +The batch processor accepts spans, metrics, or logs and places them into +batches. Batching helps better compress the data and reduce the number of +outgoing connections required to transmit the data. This processor supports +both size and time based batching. + +It is highly recommended to configure the batch processor on every collector. +The batch processor should be defined in the pipeline after the `memory_limiter` +as well as any sampling processors. This is because batching should happen after +any data drops such as sampling. + +Please refer to [config.go](./config.go) for the config spec. + +The following configuration options can be modified: +- `send_batch_size` (default = 8192): Number of spans or metrics after which a +batch will be sent. +- `timeout` (default = 200ms): Time duration after which a batch will be sent +regardless of size. +- `send_batch_max_size` (default = 0): The maximum number of items in a batch. + This property ensures that larger batches are split into smaller units. + By default (`0`), there is no upper limit of the batch size. + It is currently supported only for the trace pipeline. + +Examples: + +```yaml +processors: + batch: + batch/2: + send_batch_size: 10000 + timeout: 10s +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. diff --git a/internal/otel_collector/processor/batchprocessor/batch_processor.go b/internal/otel_collector/processor/batchprocessor/batch_processor.go new file mode 100644 index 00000000000..f43d8fd0c8a --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/batch_processor.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "context" + "runtime" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/processor" +) + +// batch_processor is a component that accepts spans and metrics, places them +// into batches and sends downstream. +// +// batch_processor implements consumer.TracesConsumer and consumer.MetricsConsumer +// +// Batches are sent out with any of the following conditions: +// - batch size reaches cfg.SendBatchSize +// - cfg.Timeout is elapsed since the timestamp when the previous batch was sent out. +type batchProcessor struct { + name string + logger *zap.Logger + telemetryLevel configtelemetry.Level + + sendBatchSize uint32 + timeout time.Duration + sendBatchMaxSize uint32 + + timer *time.Timer + done chan struct{} + newItem chan interface{} + batch batch + + ctx context.Context + cancel context.CancelFunc +} + +type batch interface { + // export the current batch + export(ctx context.Context) error + + // itemCount returns the size of the current batch + itemCount() uint32 + + // size returns the size in bytes of the current batch + size() int + + // reset the current batch structure with zero/empty values. + reset() + + // add item to the current batch + add(item interface{}) +} + +var _ consumer.TracesConsumer = (*batchProcessor)(nil) +var _ consumer.MetricsConsumer = (*batchProcessor)(nil) +var _ consumer.LogsConsumer = (*batchProcessor)(nil) + +func newBatchProcessor(params component.ProcessorCreateParams, cfg *Config, batch batch, telemetryLevel configtelemetry.Level) *batchProcessor { + ctx, cancel := context.WithCancel(context.Background()) + return &batchProcessor{ + name: cfg.Name(), + logger: params.Logger, + telemetryLevel: telemetryLevel, + + sendBatchSize: cfg.SendBatchSize, + sendBatchMaxSize: cfg.SendBatchMaxSize, + timeout: cfg.Timeout, + done: make(chan struct{}, 1), + newItem: make(chan interface{}, runtime.NumCPU()), + batch: batch, + ctx: ctx, + cancel: cancel, + } +} + +func (bp *batchProcessor) GetCapabilities() component.ProcessorCapabilities { + return component.ProcessorCapabilities{MutatesConsumedData: true} +} + +// Start is invoked during service startup. +func (bp *batchProcessor) Start(context.Context, component.Host) error { + go bp.startProcessingCycle() + return nil +} + +// Shutdown is invoked during service shutdown. +func (bp *batchProcessor) Shutdown(context.Context) error { + bp.cancel() + <-bp.done + return nil +} + +func (bp *batchProcessor) startProcessingCycle() { + bp.timer = time.NewTimer(bp.timeout) + for { + select { + case <-bp.ctx.Done(): + DONE: + for { + select { + case item := <-bp.newItem: + bp.processItem(item) + default: + break DONE + } + } + // This is the close of the channel + if bp.batch.itemCount() > 0 { + // TODO: Set a timeout on sendTraces or + // make it cancellable using the context that Shutdown gets as a parameter + bp.sendItems(statTimeoutTriggerSend) + } + close(bp.done) + return + case item := <-bp.newItem: + if item == nil { + continue + } + bp.processItem(item) + case <-bp.timer.C: + if bp.batch.itemCount() > 0 { + bp.sendItems(statTimeoutTriggerSend) + } + bp.resetTimer() + } + } +} + +func (bp *batchProcessor) processItem(item interface{}) { + if bp.sendBatchMaxSize > 0 { + if td, ok := item.(pdata.Traces); ok { + itemCount := bp.batch.itemCount() + if itemCount+uint32(td.SpanCount()) > bp.sendBatchMaxSize { + tdRemainSize := splitTrace(int(bp.sendBatchSize-itemCount), td) + item = tdRemainSize + go func() { + bp.newItem <- td + }() + } + } + } + + bp.batch.add(item) + if bp.batch.itemCount() >= bp.sendBatchSize { + bp.timer.Stop() + bp.sendItems(statBatchSizeTriggerSend) + bp.resetTimer() + } +} + +func (bp *batchProcessor) resetTimer() { + bp.timer.Reset(bp.timeout) +} + +func (bp *batchProcessor) sendItems(measure *stats.Int64Measure) { + // Add that it came form the trace pipeline? + statsTags := []tag.Mutator{tag.Insert(processor.TagProcessorNameKey, bp.name)} + _ = stats.RecordWithTags(context.Background(), statsTags, measure.M(1), statBatchSendSize.M(int64(bp.batch.itemCount()))) + + if bp.telemetryLevel == configtelemetry.LevelDetailed { + _ = stats.RecordWithTags(context.Background(), statsTags, statBatchSendSizeBytes.M(int64(bp.batch.size()))) + } + + if err := bp.batch.export(context.Background()); err != nil { + bp.logger.Warn("Sender failed", zap.Error(err)) + } + bp.batch.reset() +} + +// ConsumeTraces implements TracesProcessor +func (bp *batchProcessor) ConsumeTraces(_ context.Context, td pdata.Traces) error { + bp.newItem <- td + return nil +} + +// ConsumeTraces implements MetricsProcessor +func (bp *batchProcessor) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + // First thing is convert into a different internal format + bp.newItem <- md + return nil +} + +// ConsumeLogs implements LogsProcessor +func (bp *batchProcessor) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + bp.newItem <- ld + return nil +} + +// newBatchTracesProcessor creates a new batch processor that batches traces by size or with timeout +func newBatchTracesProcessor(params component.ProcessorCreateParams, trace consumer.TracesConsumer, cfg *Config, telemetryLevel configtelemetry.Level) *batchProcessor { + return newBatchProcessor(params, cfg, newBatchTraces(trace), telemetryLevel) +} + +// newBatchMetricsProcessor creates a new batch processor that batches metrics by size or with timeout +func newBatchMetricsProcessor(params component.ProcessorCreateParams, metrics consumer.MetricsConsumer, cfg *Config, telemetryLevel configtelemetry.Level) *batchProcessor { + return newBatchProcessor(params, cfg, newBatchMetrics(metrics), telemetryLevel) +} + +// newBatchLogsProcessor creates a new batch processor that batches logs by size or with timeout +func newBatchLogsProcessor(params component.ProcessorCreateParams, logs consumer.LogsConsumer, cfg *Config, telemetryLevel configtelemetry.Level) *batchProcessor { + return newBatchProcessor(params, cfg, newBatchLogs(logs), telemetryLevel) +} + +type batchTraces struct { + nextConsumer consumer.TracesConsumer + traceData pdata.Traces + spanCount uint32 +} + +func newBatchTraces(nextConsumer consumer.TracesConsumer) *batchTraces { + b := &batchTraces{nextConsumer: nextConsumer} + b.reset() + return b +} + +// add updates current batchTraces by adding new TraceData object +func (bt *batchTraces) add(item interface{}) { + td := item.(pdata.Traces) + newSpanCount := td.SpanCount() + if newSpanCount == 0 { + return + } + + bt.spanCount += uint32(newSpanCount) + td.ResourceSpans().MoveAndAppendTo(bt.traceData.ResourceSpans()) +} + +func (bt *batchTraces) export(ctx context.Context) error { + return bt.nextConsumer.ConsumeTraces(ctx, bt.traceData) +} + +func (bt *batchTraces) itemCount() uint32 { + return bt.spanCount +} + +func (bt *batchTraces) size() int { + return bt.traceData.Size() +} + +// resets the current batchTraces structure with zero values +func (bt *batchTraces) reset() { + bt.traceData = pdata.NewTraces() + bt.spanCount = 0 +} + +type batchMetrics struct { + nextConsumer consumer.MetricsConsumer + metricData pdata.Metrics + metricCount uint32 +} + +func newBatchMetrics(nextConsumer consumer.MetricsConsumer) *batchMetrics { + b := &batchMetrics{nextConsumer: nextConsumer} + b.reset() + return b +} + +func (bm *batchMetrics) export(ctx context.Context) error { + return bm.nextConsumer.ConsumeMetrics(ctx, bm.metricData) +} + +func (bm *batchMetrics) itemCount() uint32 { + return bm.metricCount +} + +func (bm *batchMetrics) size() int { + return bm.metricData.Size() +} + +// resets the current batchMetrics structure with zero/empty values. +func (bm *batchMetrics) reset() { + bm.metricData = pdata.NewMetrics() + bm.metricCount = 0 +} + +func (bm *batchMetrics) add(item interface{}) { + md := item.(pdata.Metrics) + + newMetricsCount := md.MetricCount() + if newMetricsCount == 0 { + return + } + bm.metricCount += uint32(newMetricsCount) + md.ResourceMetrics().MoveAndAppendTo(bm.metricData.ResourceMetrics()) +} + +type batchLogs struct { + nextConsumer consumer.LogsConsumer + logData pdata.Logs + logCount uint32 +} + +func newBatchLogs(nextConsumer consumer.LogsConsumer) *batchLogs { + b := &batchLogs{nextConsumer: nextConsumer} + b.reset() + return b +} + +func (bm *batchLogs) export(ctx context.Context) error { + return bm.nextConsumer.ConsumeLogs(ctx, bm.logData) +} + +func (bm *batchLogs) itemCount() uint32 { + return bm.logCount +} + +func (bm *batchLogs) size() int { + return bm.logData.SizeBytes() +} + +// resets the current batchLogs structure with zero/empty values. +func (bm *batchLogs) reset() { + bm.logData = pdata.NewLogs() + bm.logCount = 0 +} + +func (bm *batchLogs) add(item interface{}) { + ld := item.(pdata.Logs) + + newLogsCount := ld.LogRecordCount() + if newLogsCount == 0 { + return + } + bm.logCount += uint32(newLogsCount) + ld.ResourceLogs().MoveAndAppendTo(bm.logData.ResourceLogs()) +} diff --git a/internal/otel_collector/processor/batchprocessor/batch_processor_test.go b/internal/otel_collector/processor/batchprocessor/batch_processor_test.go new file mode 100644 index 00000000000..b21bbe3dff8 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/batch_processor_test.go @@ -0,0 +1,691 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestBatchProcessorSpansDelivered(t *testing.T) { + sink := new(consumertest.TracesSink) + cfg := createDefaultConfig().(*Config) + cfg.SendBatchSize = 128 + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchTracesProcessor(creationParams, sink, cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + requestCount := 1000 + spansPerRequest := 100 + traceDataSlice := make([]pdata.Traces, 0, requestCount) + for requestNum := 0; requestNum < requestCount; requestNum++ { + td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + for spanIndex := 0; spanIndex < spansPerRequest; spanIndex++ { + spans.At(spanIndex).SetName(getTestSpanName(requestNum, spanIndex)) + } + traceDataSlice = append(traceDataSlice, td.Clone()) + assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) + } + + // Added to test logic that check for empty resources. + td := testdata.GenerateTraceDataEmpty() + assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) + + require.NoError(t, batcher.Shutdown(context.Background())) + + require.Equal(t, requestCount*spansPerRequest, sink.SpansCount()) + receivedTraces := sink.AllTraces() + spansReceivedByName := spansReceivedByName(receivedTraces) + for requestNum := 0; requestNum < requestCount; requestNum++ { + spans := traceDataSlice[requestNum].ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + for spanIndex := 0; spanIndex < spansPerRequest; spanIndex++ { + require.EqualValues(t, + spans.At(spanIndex), + spansReceivedByName[getTestSpanName(requestNum, spanIndex)]) + } + } +} + +func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) { + sink := new(consumertest.TracesSink) + cfg := createDefaultConfig().(*Config) + cfg.SendBatchSize = 128 + cfg.SendBatchMaxSize = 128 + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchTracesProcessor(creationParams, sink, cfg, configtelemetry.LevelBasic) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + requestCount := 1000 + spansPerRequest := 150 + for requestNum := 0; requestNum < requestCount; requestNum++ { + td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + for spanIndex := 0; spanIndex < spansPerRequest; spanIndex++ { + spans.At(spanIndex).SetName(getTestSpanName(requestNum, spanIndex)) + } + assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) + } + + // Added to test logic that check for empty resources. + td := testdata.GenerateTraceDataEmpty() + batcher.ConsumeTraces(context.Background(), td) + + // wait for all spans to be reported + for { + if sink.SpansCount() == requestCount*spansPerRequest { + break + } + <-time.After(cfg.Timeout) + } + + require.NoError(t, batcher.Shutdown(context.Background())) + + require.Equal(t, requestCount*spansPerRequest, sink.SpansCount()) + for i := 0; i < len(sink.AllTraces())-1; i++ { + assert.Equal(t, cfg.SendBatchSize, uint32(sink.AllTraces()[i].SpanCount())) + } + // the last batch has the remaining size + assert.Equal(t, (requestCount*spansPerRequest)%int(cfg.SendBatchSize), sink.AllTraces()[len(sink.AllTraces())-1].SpanCount()) +} + +func TestBatchProcessorSentBySize(t *testing.T) { + views := MetricViews() + require.NoError(t, view.Register(views...)) + defer view.Unregister(views...) + + sink := new(consumertest.TracesSink) + cfg := createDefaultConfig().(*Config) + sendBatchSize := 20 + cfg.SendBatchSize = uint32(sendBatchSize) + cfg.Timeout = 500 * time.Millisecond + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchTracesProcessor(creationParams, sink, cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + requestCount := 100 + spansPerRequest := 5 + + start := time.Now() + sizeSum := 0 + for requestNum := 0; requestNum < requestCount; requestNum++ { + td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + sizeSum += td.Size() + assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) + } + + require.NoError(t, batcher.Shutdown(context.Background())) + + elapsed := time.Since(start) + require.LessOrEqual(t, elapsed.Nanoseconds(), cfg.Timeout.Nanoseconds()) + + expectedBatchesNum := requestCount * spansPerRequest / sendBatchSize + expectedBatchingFactor := sendBatchSize / spansPerRequest + + require.Equal(t, requestCount*spansPerRequest, sink.SpansCount()) + receivedTraces := sink.AllTraces() + require.EqualValues(t, expectedBatchesNum, len(receivedTraces)) + for _, td := range receivedTraces { + rss := td.ResourceSpans() + require.Equal(t, expectedBatchingFactor, rss.Len()) + for i := 0; i < expectedBatchingFactor; i++ { + require.Equal(t, spansPerRequest, rss.At(i).InstrumentationLibrarySpans().At(0).Spans().Len()) + } + } + + viewData, err := view.RetrieveData("processor/batch/" + statBatchSendSize.Name()) + require.NoError(t, err) + assert.Equal(t, 1, len(viewData)) + distData := viewData[0].Data.(*view.DistributionData) + assert.Equal(t, int64(expectedBatchesNum), distData.Count) + assert.Equal(t, sink.SpansCount(), int(distData.Sum())) + assert.Equal(t, sendBatchSize, int(distData.Min)) + assert.Equal(t, sendBatchSize, int(distData.Max)) + + viewData, err = view.RetrieveData("processor/batch/" + statBatchSendSizeBytes.Name()) + require.NoError(t, err) + assert.Equal(t, 1, len(viewData)) + distData = viewData[0].Data.(*view.DistributionData) + assert.Equal(t, int64(expectedBatchesNum), distData.Count) + assert.Equal(t, sizeSum, int(distData.Sum())) +} + +func TestBatchProcessorSentByTimeout(t *testing.T) { + sink := new(consumertest.TracesSink) + cfg := createDefaultConfig().(*Config) + sendBatchSize := 100 + cfg.SendBatchSize = uint32(sendBatchSize) + cfg.Timeout = 100 * time.Millisecond + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + + requestCount := 5 + spansPerRequest := 10 + start := time.Now() + + batcher := newBatchTracesProcessor(creationParams, sink, cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + for requestNum := 0; requestNum < requestCount; requestNum++ { + td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) + } + + // Wait for at least one batch to be sent. + for { + if sink.SpansCount() != 0 { + break + } + <-time.After(cfg.Timeout) + } + + elapsed := time.Since(start) + require.LessOrEqual(t, cfg.Timeout.Nanoseconds(), elapsed.Nanoseconds()) + + // This should not change the results in the sink, verified by the expectedBatchesNum + require.NoError(t, batcher.Shutdown(context.Background())) + + expectedBatchesNum := 1 + expectedBatchingFactor := 5 + + require.Equal(t, requestCount*spansPerRequest, sink.SpansCount()) + receivedTraces := sink.AllTraces() + require.EqualValues(t, expectedBatchesNum, len(receivedTraces)) + for _, td := range receivedTraces { + rss := td.ResourceSpans() + require.Equal(t, expectedBatchingFactor, rss.Len()) + for i := 0; i < expectedBatchingFactor; i++ { + require.Equal(t, spansPerRequest, rss.At(i).InstrumentationLibrarySpans().At(0).Spans().Len()) + } + } +} + +func TestBatchProcessorTraceSendWhenClosing(t *testing.T) { + cfg := Config{ + Timeout: 3 * time.Second, + SendBatchSize: 1000, + } + sink := new(consumertest.TracesSink) + + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchTracesProcessor(creationParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + requestCount := 10 + spansPerRequest := 10 + for requestNum := 0; requestNum < requestCount; requestNum++ { + td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) + } + + require.NoError(t, batcher.Shutdown(context.Background())) + + require.Equal(t, requestCount*spansPerRequest, sink.SpansCount()) + require.Equal(t, 1, len(sink.AllTraces())) +} + +func TestBatchMetricProcessor_ReceivingData(t *testing.T) { + // Instantiate the batch processor with low config values to test data + // gets sent through the processor. + cfg := Config{ + Timeout: 200 * time.Millisecond, + SendBatchSize: 50, + } + + requestCount := 100 + metricsPerRequest := 5 + sink := new(consumertest.MetricsSink) + + createParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchMetricsProcessor(createParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + metricDataSlice := make([]pdata.Metrics, 0, requestCount) + + for requestNum := 0; requestNum < requestCount; requestNum++ { + md := testdata.GenerateMetricsManyMetricsSameResource(metricsPerRequest) + metrics := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() + for metricIndex := 0; metricIndex < metricsPerRequest; metricIndex++ { + metrics.At(metricIndex).SetName(getTestMetricName(requestNum, metricIndex)) + } + metricDataSlice = append(metricDataSlice, md.Clone()) + assert.NoError(t, batcher.ConsumeMetrics(context.Background(), md)) + } + + // Added to test case with empty resources sent. + md := testdata.GenerateMetricsEmpty() + assert.NoError(t, batcher.ConsumeMetrics(context.Background(), md)) + + require.NoError(t, batcher.Shutdown(context.Background())) + + require.Equal(t, requestCount*metricsPerRequest, sink.MetricsCount()) + receivedMds := sink.AllMetrics() + metricsReceivedByName := metricsReceivedByName(receivedMds) + for requestNum := 0; requestNum < requestCount; requestNum++ { + metrics := metricDataSlice[requestNum].ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() + for metricIndex := 0; metricIndex < metricsPerRequest; metricIndex++ { + require.EqualValues(t, + metrics.At(metricIndex), + metricsReceivedByName[getTestMetricName(requestNum, metricIndex)]) + } + } +} + +func TestBatchMetricProcessor_BatchSize(t *testing.T) { + views := MetricViews() + require.NoError(t, view.Register(views...)) + defer view.Unregister(views...) + + // Instantiate the batch processor with low config values to test data + // gets sent through the processor. + cfg := Config{ + Timeout: 100 * time.Millisecond, + SendBatchSize: 50, + } + + requestCount := 100 + metricsPerRequest := 5 + sink := new(consumertest.MetricsSink) + + createParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchMetricsProcessor(createParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + start := time.Now() + size := 0 + for requestNum := 0; requestNum < requestCount; requestNum++ { + md := testdata.GenerateMetricsManyMetricsSameResource(metricsPerRequest) + size += md.Size() + assert.NoError(t, batcher.ConsumeMetrics(context.Background(), md)) + } + require.NoError(t, batcher.Shutdown(context.Background())) + + elapsed := time.Since(start) + require.LessOrEqual(t, elapsed.Nanoseconds(), cfg.Timeout.Nanoseconds()) + + expectedBatchesNum := requestCount * metricsPerRequest / int(cfg.SendBatchSize) + expectedBatchingFactor := int(cfg.SendBatchSize) / metricsPerRequest + + require.Equal(t, requestCount*metricsPerRequest, sink.MetricsCount()) + receivedMds := sink.AllMetrics() + require.Equal(t, expectedBatchesNum, len(receivedMds)) + for _, md := range receivedMds { + require.Equal(t, expectedBatchingFactor, md.ResourceMetrics().Len()) + for i := 0; i < expectedBatchingFactor; i++ { + require.Equal(t, metricsPerRequest, md.ResourceMetrics().At(i).InstrumentationLibraryMetrics().At(0).Metrics().Len()) + } + } + + viewData, err := view.RetrieveData("processor/batch/" + statBatchSendSize.Name()) + require.NoError(t, err) + assert.Equal(t, 1, len(viewData)) + distData := viewData[0].Data.(*view.DistributionData) + assert.Equal(t, int64(expectedBatchesNum), distData.Count) + assert.Equal(t, sink.MetricsCount(), int(distData.Sum())) + assert.Equal(t, cfg.SendBatchSize, uint32(distData.Min)) + assert.Equal(t, cfg.SendBatchSize, uint32(distData.Max)) + + viewData, err = view.RetrieveData("processor/batch/" + statBatchSendSizeBytes.Name()) + require.NoError(t, err) + assert.Equal(t, 1, len(viewData)) + distData = viewData[0].Data.(*view.DistributionData) + assert.Equal(t, int64(expectedBatchesNum), distData.Count) + assert.Equal(t, size, int(distData.Sum())) +} + +func TestBatchMetricsProcessor_Timeout(t *testing.T) { + cfg := Config{ + Timeout: 100 * time.Millisecond, + SendBatchSize: 100, + } + requestCount := 5 + metricsPerRequest := 10 + sink := new(consumertest.MetricsSink) + + createParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchMetricsProcessor(createParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + start := time.Now() + for requestNum := 0; requestNum < requestCount; requestNum++ { + md := testdata.GenerateMetricsManyMetricsSameResource(metricsPerRequest) + assert.NoError(t, batcher.ConsumeMetrics(context.Background(), md)) + } + + // Wait for at least one batch to be sent. + for { + if sink.MetricsCount() != 0 { + break + } + <-time.After(cfg.Timeout) + } + + elapsed := time.Since(start) + require.LessOrEqual(t, cfg.Timeout.Nanoseconds(), elapsed.Nanoseconds()) + + // This should not change the results in the sink, verified by the expectedBatchesNum + require.NoError(t, batcher.Shutdown(context.Background())) + + expectedBatchesNum := 1 + expectedBatchingFactor := 5 + + require.Equal(t, requestCount*metricsPerRequest, sink.MetricsCount()) + receivedMds := sink.AllMetrics() + require.Equal(t, expectedBatchesNum, len(receivedMds)) + for _, md := range receivedMds { + require.Equal(t, expectedBatchingFactor, md.ResourceMetrics().Len()) + for i := 0; i < expectedBatchingFactor; i++ { + require.Equal(t, metricsPerRequest, md.ResourceMetrics().At(i).InstrumentationLibraryMetrics().At(0).Metrics().Len()) + } + } +} + +func TestBatchMetricProcessor_Shutdown(t *testing.T) { + cfg := Config{ + Timeout: 3 * time.Second, + SendBatchSize: 1000, + } + requestCount := 5 + metricsPerRequest := 10 + sink := new(consumertest.MetricsSink) + + createParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchMetricsProcessor(createParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + for requestNum := 0; requestNum < requestCount; requestNum++ { + md := testdata.GenerateMetricsManyMetricsSameResource(metricsPerRequest) + assert.NoError(t, batcher.ConsumeMetrics(context.Background(), md)) + } + + require.NoError(t, batcher.Shutdown(context.Background())) + + require.Equal(t, requestCount*metricsPerRequest, sink.MetricsCount()) + require.Equal(t, 1, len(sink.AllMetrics())) +} + +func getTestSpanName(requestNum, index int) string { + return fmt.Sprintf("test-span-%d-%d", requestNum, index) +} + +func spansReceivedByName(tds []pdata.Traces) map[string]pdata.Span { + spansReceivedByName := map[string]pdata.Span{} + for i := range tds { + rss := tds[i].ResourceSpans() + for i := 0; i < rss.Len(); i++ { + ilss := rss.At(i).InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + spans := ilss.At(j).Spans() + for k := 0; k < spans.Len(); k++ { + span := spans.At(k) + spansReceivedByName[spans.At(k).Name()] = span + } + } + } + } + return spansReceivedByName +} + +func metricsReceivedByName(mds []pdata.Metrics) map[string]pdata.Metric { + metricsReceivedByName := map[string]pdata.Metric{} + for _, md := range mds { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + ilms := rms.At(i).InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + metrics := ilms.At(j).Metrics() + for k := 0; k < metrics.Len(); k++ { + metric := metrics.At(k) + metricsReceivedByName[metric.Name()] = metric + } + } + } + } + return metricsReceivedByName +} + +func getTestMetricName(requestNum, index int) string { + return fmt.Sprintf("test-metric-int-%d-%d", requestNum, index) +} + +func BenchmarkTraceSizeBytes(b *testing.B) { + td := testdata.GenerateTraceDataManySpansSameResource(8192) + for n := 0; n < b.N; n++ { + fmt.Println(td.Size()) + } +} + +func BenchmarkTraceSizeSpanCount(b *testing.B) { + td := testdata.GenerateTraceDataManySpansSameResource(8192) + for n := 0; n < b.N; n++ { + td.SpanCount() + } +} + +func TestBatchLogProcessor_ReceivingData(t *testing.T) { + // Instantiate the batch processor with low config values to test data + // gets sent through the processor. + cfg := Config{ + Timeout: 200 * time.Millisecond, + SendBatchSize: 50, + } + + requestCount := 100 + logsPerRequest := 5 + sink := new(consumertest.LogsSink) + + createParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchLogsProcessor(createParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + logDataSlice := make([]pdata.Logs, 0, requestCount) + + for requestNum := 0; requestNum < requestCount; requestNum++ { + ld := testdata.GenerateLogDataManyLogsSameResource(logsPerRequest) + logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() + for logIndex := 0; logIndex < logsPerRequest; logIndex++ { + logs.At(logIndex).SetName(getTestLogName(requestNum, logIndex)) + } + logDataSlice = append(logDataSlice, ld.Clone()) + assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) + } + + // Added to test case with empty resources sent. + ld := testdata.GenerateLogDataEmpty() + assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) + + require.NoError(t, batcher.Shutdown(context.Background())) + + require.Equal(t, requestCount*logsPerRequest, sink.LogRecordsCount()) + receivedMds := sink.AllLogs() + logsReceivedByName := logsReceivedByName(receivedMds) + for requestNum := 0; requestNum < requestCount; requestNum++ { + logs := logDataSlice[requestNum].ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() + for logIndex := 0; logIndex < logsPerRequest; logIndex++ { + require.EqualValues(t, + logs.At(logIndex), + logsReceivedByName[getTestLogName(requestNum, logIndex)]) + } + } +} + +func TestBatchLogProcessor_BatchSize(t *testing.T) { + views := MetricViews() + require.NoError(t, view.Register(views...)) + defer view.Unregister(views...) + + // Instantiate the batch processor with low config values to test data + // gets sent through the processor. + cfg := Config{ + Timeout: 100 * time.Millisecond, + SendBatchSize: 50, + } + + requestCount := 100 + logsPerRequest := 5 + sink := new(consumertest.LogsSink) + + createParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchLogsProcessor(createParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + start := time.Now() + size := 0 + for requestNum := 0; requestNum < requestCount; requestNum++ { + ld := testdata.GenerateLogDataManyLogsSameResource(logsPerRequest) + size += ld.SizeBytes() + assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) + } + require.NoError(t, batcher.Shutdown(context.Background())) + + elapsed := time.Since(start) + require.LessOrEqual(t, elapsed.Nanoseconds(), cfg.Timeout.Nanoseconds()) + + expectedBatchesNum := requestCount * logsPerRequest / int(cfg.SendBatchSize) + expectedBatchingFactor := int(cfg.SendBatchSize) / logsPerRequest + + require.Equal(t, requestCount*logsPerRequest, sink.LogRecordsCount()) + receivedMds := sink.AllLogs() + require.Equal(t, expectedBatchesNum, len(receivedMds)) + for _, ld := range receivedMds { + require.Equal(t, expectedBatchingFactor, ld.ResourceLogs().Len()) + for i := 0; i < expectedBatchingFactor; i++ { + require.Equal(t, logsPerRequest, ld.ResourceLogs().At(i).InstrumentationLibraryLogs().At(0).Logs().Len()) + } + } + + viewData, err := view.RetrieveData("processor/batch/" + statBatchSendSize.Name()) + require.NoError(t, err) + assert.Equal(t, 1, len(viewData)) + distData := viewData[0].Data.(*view.DistributionData) + assert.Equal(t, int64(expectedBatchesNum), distData.Count) + assert.Equal(t, sink.LogRecordsCount(), int(distData.Sum())) + assert.Equal(t, cfg.SendBatchSize, uint32(distData.Min)) + assert.Equal(t, cfg.SendBatchSize, uint32(distData.Max)) + + viewData, err = view.RetrieveData("processor/batch/" + statBatchSendSizeBytes.Name()) + require.NoError(t, err) + assert.Equal(t, 1, len(viewData)) + distData = viewData[0].Data.(*view.DistributionData) + assert.Equal(t, int64(expectedBatchesNum), distData.Count) + assert.Equal(t, size, int(distData.Sum())) +} + +func TestBatchLogsProcessor_Timeout(t *testing.T) { + cfg := Config{ + Timeout: 100 * time.Millisecond, + SendBatchSize: 100, + } + requestCount := 5 + logsPerRequest := 10 + sink := new(consumertest.LogsSink) + + createParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchLogsProcessor(createParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + start := time.Now() + for requestNum := 0; requestNum < requestCount; requestNum++ { + ld := testdata.GenerateLogDataManyLogsSameResource(logsPerRequest) + assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) + } + + // Wait for at least one batch to be sent. + for { + if sink.LogRecordsCount() != 0 { + break + } + <-time.After(cfg.Timeout) + } + + elapsed := time.Since(start) + require.LessOrEqual(t, cfg.Timeout.Nanoseconds(), elapsed.Nanoseconds()) + + // This should not change the results in the sink, verified by the expectedBatchesNum + require.NoError(t, batcher.Shutdown(context.Background())) + + expectedBatchesNum := 1 + expectedBatchingFactor := 5 + + require.Equal(t, requestCount*logsPerRequest, sink.LogRecordsCount()) + receivedMds := sink.AllLogs() + require.Equal(t, expectedBatchesNum, len(receivedMds)) + for _, ld := range receivedMds { + require.Equal(t, expectedBatchingFactor, ld.ResourceLogs().Len()) + for i := 0; i < expectedBatchingFactor; i++ { + require.Equal(t, logsPerRequest, ld.ResourceLogs().At(i).InstrumentationLibraryLogs().At(0).Logs().Len()) + } + } +} + +func TestBatchLogProcessor_Shutdown(t *testing.T) { + cfg := Config{ + Timeout: 3 * time.Second, + SendBatchSize: 1000, + } + requestCount := 5 + logsPerRequest := 10 + sink := new(consumertest.LogsSink) + + createParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + batcher := newBatchLogsProcessor(createParams, sink, &cfg, configtelemetry.LevelDetailed) + require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) + + for requestNum := 0; requestNum < requestCount; requestNum++ { + ld := testdata.GenerateLogDataManyLogsSameResource(logsPerRequest) + assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) + } + + require.NoError(t, batcher.Shutdown(context.Background())) + + require.Equal(t, requestCount*logsPerRequest, sink.LogRecordsCount()) + require.Equal(t, 1, len(sink.AllLogs())) +} + +func getTestLogName(requestNum, index int) string { + return fmt.Sprintf("test-log-int-%d-%d", requestNum, index) +} + +func logsReceivedByName(lds []pdata.Logs) map[string]pdata.LogRecord { + logsReceivedByName := map[string]pdata.LogRecord{} + for i := range lds { + ld := lds[i] + rms := ld.ResourceLogs() + for i := 0; i < rms.Len(); i++ { + ilms := rms.At(i).InstrumentationLibraryLogs() + for j := 0; j < ilms.Len(); j++ { + logs := ilms.At(j).Logs() + for k := 0; k < logs.Len(); k++ { + log := logs.At(k) + logsReceivedByName[log.Name()] = log + } + } + } + } + return logsReceivedByName +} diff --git a/internal/otel_collector/processor/batchprocessor/config.go b/internal/otel_collector/processor/batchprocessor/config.go new file mode 100644 index 00000000000..ed66435ba15 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/config.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "time" + + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for batch processor. +type Config struct { + configmodels.ProcessorSettings `mapstructure:",squash"` + + // Timeout sets the time after which a batch will be sent regardless of size. + Timeout time.Duration `mapstructure:"timeout,omitempty"` + + // SendBatchSize is the size of a batch which after hit, will trigger it to be sent. + SendBatchSize uint32 `mapstructure:"send_batch_size,omitempty"` + + // SendBatchMaxSize is the maximum size of a batch. Larger batches are split into smaller units. + // Default value is 0, that means no maximum size. + SendBatchMaxSize uint32 `mapstructure:"send_batch_max_size,omitempty"` +} diff --git a/internal/otel_collector/processor/batchprocessor/config_test.go b/internal/otel_collector/processor/batchprocessor/config_test.go new file mode 100644 index 00000000000..301a310ef36 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/config_test.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Processors[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.Nil(t, err) + require.NotNil(t, cfg) + + p0 := cfg.Processors["batch"] + assert.Equal(t, p0, factory.CreateDefaultConfig()) + + p1 := cfg.Processors["batch/2"] + + timeout := time.Second * 10 + sendBatchSize := uint32(10000) + sendBatchMaxSize := uint32(11000) + + assert.Equal(t, p1, + &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "batch", + NameVal: "batch/2", + }, + SendBatchSize: sendBatchSize, + SendBatchMaxSize: sendBatchMaxSize, + Timeout: timeout, + }) +} diff --git a/internal/otel_collector/processor/batchprocessor/factory.go b/internal/otel_collector/processor/batchprocessor/factory.go new file mode 100644 index 00000000000..9e5d4ca441d --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/factory.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "context" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "batch" + + defaultSendBatchSize = uint32(8192) + defaultTimeout = 200 * time.Millisecond +) + +// NewFactory returns a new factory for the Batch processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTraceProcessor), + processorhelper.WithMetrics(createMetricsProcessor), + processorhelper.WithLogs(createLogsProcessor)) +} + +func createDefaultConfig() configmodels.Processor { + return &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + SendBatchSize: defaultSendBatchSize, + Timeout: defaultTimeout, + } +} + +func createTraceProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.TracesConsumer, +) (component.TracesProcessor, error) { + oCfg := cfg.(*Config) + level := configtelemetry.GetMetricsLevelFlagValue() + return newBatchTracesProcessor(params, nextConsumer, oCfg, level), nil +} + +func createMetricsProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.MetricsConsumer, +) (component.MetricsProcessor, error) { + oCfg := cfg.(*Config) + level := configtelemetry.GetMetricsLevelFlagValue() + return newBatchMetricsProcessor(params, nextConsumer, oCfg, level), nil +} + +func createLogsProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.LogsConsumer, +) (component.LogsProcessor, error) { + oCfg := cfg.(*Config) + level := configtelemetry.GetMetricsLevelFlagValue() + return newBatchLogsProcessor(params, nextConsumer, oCfg, level), nil +} diff --git a/internal/otel_collector/processor/batchprocessor/factory_test.go b/internal/otel_collector/processor/batchprocessor/factory_test.go new file mode 100644 index 00000000000..28d36d15d21 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/factory_test.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateProcessor(t *testing.T) { + factory := NewFactory() + + cfg := factory.CreateDefaultConfig() + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + tp, err := factory.CreateTracesProcessor(context.Background(), creationParams, cfg, nil) + assert.NotNil(t, tp) + assert.NoError(t, err, "cannot create trace processor") + + mp, err := factory.CreateMetricsProcessor(context.Background(), creationParams, cfg, nil) + assert.NotNil(t, mp) + assert.NoError(t, err, "cannot create metric processor") + + lp, err := factory.CreateLogsProcessor(context.Background(), creationParams, cfg, nil) + assert.NotNil(t, lp) + assert.NoError(t, err, "cannot create logs processor") +} diff --git a/internal/otel_collector/processor/batchprocessor/metrics.go b/internal/otel_collector/processor/batchprocessor/metrics.go new file mode 100644 index 00000000000..6852b334053 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/metrics.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/processor" +) + +var ( + statBatchSizeTriggerSend = stats.Int64("batch_size_trigger_send", "Number of times the batch was sent due to a size trigger", stats.UnitDimensionless) + statTimeoutTriggerSend = stats.Int64("timeout_trigger_send", "Number of times the batch was sent due to a timeout trigger", stats.UnitDimensionless) + statBatchSendSize = stats.Int64("batch_send_size", "Number of units in the batch", stats.UnitDimensionless) + statBatchSendSizeBytes = stats.Int64("batch_send_size_bytes", "Number of bytes in batch that was sent", stats.UnitBytes) +) + +// MetricViews returns the metrics views related to batching +func MetricViews() []*view.View { + processorTagKeys := []tag.Key{processor.TagProcessorNameKey} + + countBatchSizeTriggerSendView := &view.View{ + Name: statBatchSizeTriggerSend.Name(), + Measure: statBatchSizeTriggerSend, + Description: statBatchSizeTriggerSend.Description(), + TagKeys: processorTagKeys, + Aggregation: view.Sum(), + } + + countTimeoutTriggerSendView := &view.View{ + Name: statTimeoutTriggerSend.Name(), + Measure: statTimeoutTriggerSend, + Description: statTimeoutTriggerSend.Description(), + TagKeys: processorTagKeys, + Aggregation: view.Sum(), + } + + distributionBatchSendSizeView := &view.View{ + Name: statBatchSendSize.Name(), + Measure: statBatchSendSize, + Description: statBatchSendSize.Description(), + TagKeys: processorTagKeys, + Aggregation: view.Distribution(10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, 100000), + } + + distributionBatchSendSizeBytesView := &view.View{ + Name: statBatchSendSizeBytes.Name(), + Measure: statBatchSendSizeBytes, + Description: statBatchSendSizeBytes.Description(), + TagKeys: processorTagKeys, + Aggregation: view.Distribution(10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, + 100_000, 200_000, 300_000, 400_000, 500_000, 600_000, 700_000, 800_00, 900_000, + 1000_000, 2000_000, 3000_000, 4000_000, 5000_000, 6000_000, 7000_000, 8000_000, 9000_000), + } + + legacyViews := []*view.View{ + countBatchSizeTriggerSendView, + countTimeoutTriggerSendView, + distributionBatchSendSizeView, + distributionBatchSendSizeBytesView, + } + + return obsreport.ProcessorMetricViews(typeStr, legacyViews) +} diff --git a/internal/otel_collector/processor/batchprocessor/metrics_test.go b/internal/otel_collector/processor/batchprocessor/metrics_test.go new file mode 100644 index 00000000000..167186e37b1 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/metrics_test.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBatchProcessorMetrics(t *testing.T) { + viewNames := []string{ + "batch_size_trigger_send", + "timeout_trigger_send", + "batch_send_size", + "batch_send_size_bytes", + } + views := MetricViews() + for i, viewName := range viewNames { + assert.Equal(t, "processor/batch/"+viewName, views[i].Name) + } +} diff --git a/internal/otel_collector/processor/batchprocessor/splittraces.go b/internal/otel_collector/processor/batchprocessor/splittraces.go new file mode 100644 index 00000000000..34c0c179f0d --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/splittraces.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// splitTrace removes spans from the input trace and returns a new trace of the specified size. +func splitTrace(size int, toSplit pdata.Traces) pdata.Traces { + if toSplit.SpanCount() <= size { + return toSplit + } + copiedSpans := 0 + result := pdata.NewTraces() + rss := toSplit.ResourceSpans() + for i := rss.Len() - 1; i >= 0; i-- { + rs := rss.At(i) + destRs := pdata.NewResourceSpans() + rs.Resource().CopyTo(destRs.Resource()) + result.ResourceSpans().Append(destRs) + + for j := rs.InstrumentationLibrarySpans().Len() - 1; j >= 0; j-- { + instSpans := rs.InstrumentationLibrarySpans().At(j) + destInstSpans := pdata.NewInstrumentationLibrarySpans() + destRs.InstrumentationLibrarySpans().Append(destInstSpans) + instSpans.InstrumentationLibrary().CopyTo(destInstSpans.InstrumentationLibrary()) + + if size-copiedSpans >= instSpans.Spans().Len() { + destInstSpans.Spans().Resize(instSpans.Spans().Len()) + } else { + destInstSpans.Spans().Resize(size - copiedSpans) + } + for k, destIdx := instSpans.Spans().Len()-1, 0; k >= 0 && copiedSpans < size; k, destIdx = k-1, destIdx+1 { + span := instSpans.Spans().At(k) + span.CopyTo(destInstSpans.Spans().At(destIdx)) + copiedSpans++ + // remove span + instSpans.Spans().Resize(instSpans.Spans().Len() - 1) + } + if instSpans.Spans().Len() == 0 { + rs.InstrumentationLibrarySpans().Resize(rs.InstrumentationLibrarySpans().Len() - 1) + } + if copiedSpans == size { + return result + } + } + if rs.InstrumentationLibrarySpans().Len() == 0 { + rss.Resize(rss.Len() - 1) + } + } + return result +} diff --git a/internal/otel_collector/processor/batchprocessor/splittraces_test.go b/internal/otel_collector/processor/batchprocessor/splittraces_test.go new file mode 100644 index 00000000000..6e54cc15f51 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/splittraces_test.go @@ -0,0 +1,113 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestSplitTraces_noop(t *testing.T) { + td := testdata.GenerateTraceDataManySpansSameResource(20) + splitSize := 40 + split := splitTrace(splitSize, td) + assert.Equal(t, td, split) + + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(5) + assert.EqualValues(t, td, split) +} + +func TestSplitTraces(t *testing.T) { + td := testdata.GenerateTraceDataManySpansSameResource(20) + spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + for i := 0; i < spans.Len(); i++ { + spans.At(i).SetName(getTestSpanName(0, i)) + } + cp := pdata.NewTraces() + cp.ResourceSpans().Resize(1) + cp.ResourceSpans().At(0).InstrumentationLibrarySpans().Resize(1) + cp.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(5) + cpSpans := cp.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + td.ResourceSpans().At(0).Resource().CopyTo( + cp.ResourceSpans().At(0).Resource()) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).InstrumentationLibrary().CopyTo( + cp.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).InstrumentationLibrary()) + spans.At(19).CopyTo(cpSpans.At(0)) + spans.At(18).CopyTo(cpSpans.At(1)) + spans.At(17).CopyTo(cpSpans.At(2)) + spans.At(16).CopyTo(cpSpans.At(3)) + spans.At(15).CopyTo(cpSpans.At(4)) + + splitSize := 5 + split := splitTrace(splitSize, td) + assert.Equal(t, splitSize, split.SpanCount()) + assert.Equal(t, cp, split) + assert.Equal(t, 15, td.SpanCount()) + assert.Equal(t, "test-span-0-19", split.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).Name()) + assert.Equal(t, "test-span-0-15", split.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(4).Name()) +} + +func TestSplitTracesMultipleResourceSpans(t *testing.T) { + td := testdata.GenerateTraceDataManySpansSameResource(20) + spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + for i := 0; i < spans.Len(); i++ { + spans.At(i).SetName(getTestSpanName(0, i)) + } + td.ResourceSpans().Resize(2) + // add second index to resource spans + testdata.GenerateTraceDataManySpansSameResource(20). + ResourceSpans().At(0).CopyTo(td.ResourceSpans().At(1)) + spans = td.ResourceSpans().At(1).InstrumentationLibrarySpans().At(0).Spans() + for i := 0; i < spans.Len(); i++ { + spans.At(i).SetName(getTestSpanName(1, i)) + } + + splitSize := 5 + split := splitTrace(splitSize, td) + assert.Equal(t, splitSize, split.SpanCount()) + assert.Equal(t, 35, td.SpanCount()) + assert.Equal(t, "test-span-1-19", split.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).Name()) + assert.Equal(t, "test-span-1-15", split.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(4).Name()) +} + +func TestSplitTracesMultipleResourceSpans_split_size_greater_than_span_size(t *testing.T) { + td := testdata.GenerateTraceDataManySpansSameResource(20) + spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + for i := 0; i < spans.Len(); i++ { + spans.At(i).SetName(getTestSpanName(0, i)) + } + td.ResourceSpans().Resize(2) + // add second index to resource spans + testdata.GenerateTraceDataManySpansSameResource(20). + ResourceSpans().At(0).CopyTo(td.ResourceSpans().At(1)) + spans = td.ResourceSpans().At(1).InstrumentationLibrarySpans().At(0).Spans() + for i := 0; i < spans.Len(); i++ { + spans.At(i).SetName(getTestSpanName(1, i)) + } + + splitSize := 25 + split := splitTrace(splitSize, td) + assert.Equal(t, splitSize, split.SpanCount()) + assert.Equal(t, 40-splitSize, td.SpanCount()) + assert.Equal(t, 1, td.ResourceSpans().Len()) + assert.Equal(t, "test-span-1-19", split.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).Name()) + assert.Equal(t, "test-span-1-0", split.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(19).Name()) + assert.Equal(t, "test-span-0-19", split.ResourceSpans().At(1).InstrumentationLibrarySpans().At(0).Spans().At(0).Name()) + assert.Equal(t, "test-span-0-15", split.ResourceSpans().At(1).InstrumentationLibrarySpans().At(0).Spans().At(4).Name()) +} diff --git a/internal/otel_collector/processor/batchprocessor/testdata/config.yaml b/internal/otel_collector/processor/batchprocessor/testdata/config.yaml new file mode 100644 index 00000000000..7cfcda24075 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/testdata/config.yaml @@ -0,0 +1,19 @@ +receivers: + examplereceiver: + +processors: + batch: + batch/2: + timeout: 10s + send_batch_size: 10000 + send_batch_max_size: 11000 + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [batch/2] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/cloningfanoutconnector.go b/internal/otel_collector/processor/cloningfanoutconnector.go new file mode 100644 index 00000000000..a4715de7633 --- /dev/null +++ b/internal/otel_collector/processor/cloningfanoutconnector.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processor + +import ( + "context" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// This file contains implementations of cloning Trace/Metrics connectors +// that fan out the data to multiple other consumers. Cloning connectors create +// clones of data before fanning out, which ensures each consumer gets their +// own copy of data and is free to modify it. + +// NewMetricsCloningFanOutConnector wraps multiple metrics consumers in a single one and clones the data +// before fanning out. +func NewMetricsCloningFanOutConnector(mcs []consumer.MetricsConsumer) consumer.MetricsConsumer { + if len(mcs) == 1 { + // Don't wrap if no need to do it. + return mcs[0] + } + return metricsCloningFanOutConnector(mcs) +} + +type metricsCloningFanOutConnector []consumer.MetricsConsumer + +var _ consumer.MetricsConsumer = (*metricsCloningFanOutConnector)(nil) + +// ConsumeMetrics exports the MetricsData to all consumers wrapped by the current one. +func (mfc metricsCloningFanOutConnector) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + var errs []error + + // Fan out to first len-1 consumers. + for i := 0; i < len(mfc)-1; i++ { + // Create a clone of data. We need to clone because consumers may modify the data. + if err := mfc[i].ConsumeMetrics(ctx, md.Clone()); err != nil { + errs = append(errs, err) + } + } + + if len(mfc) > 0 { + // Give the original data to the last consumer. + lastTc := mfc[len(mfc)-1] + if err := lastTc.ConsumeMetrics(ctx, md); err != nil { + errs = append(errs, err) + } + } + + return componenterror.CombineErrors(errs) +} + +// NewTracesCloningFanOutConnector wraps multiple traces consumers in a single one and clones the data +// before fanning out. +func NewTracesCloningFanOutConnector(tcs []consumer.TracesConsumer) consumer.TracesConsumer { + if len(tcs) == 1 { + // Don't wrap if no need to do it. + return tcs[0] + } + return tracesCloningFanOutConnector(tcs) +} + +type tracesCloningFanOutConnector []consumer.TracesConsumer + +var _ consumer.TracesConsumer = (*tracesCloningFanOutConnector)(nil) + +// ConsumeTraceData exports the span data to all trace consumers wrapped by the current one. +func (tfc tracesCloningFanOutConnector) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + var errs []error + + // Fan out to first len-1 consumers. + for i := 0; i < len(tfc)-1; i++ { + // Create a clone of data. We need to clone because consumers may modify the data. + if err := tfc[i].ConsumeTraces(ctx, td.Clone()); err != nil { + errs = append(errs, err) + } + } + + if len(tfc) > 0 { + // Give the original data to the last consumer. + lastTc := tfc[len(tfc)-1] + if err := lastTc.ConsumeTraces(ctx, td); err != nil { + errs = append(errs, err) + } + } + + return componenterror.CombineErrors(errs) +} + +// NewLogsCloningFanOutConnector wraps multiple trace consumers in a single one. +func NewLogsCloningFanOutConnector(lcs []consumer.LogsConsumer) consumer.LogsConsumer { + if len(lcs) == 1 { + // Don't wrap if no need to do it. + return lcs[0] + } + return logsCloningFanOutConnector(lcs) +} + +type logsCloningFanOutConnector []consumer.LogsConsumer + +var _ consumer.LogsConsumer = (*logsCloningFanOutConnector)(nil) + +// ConsumeLogs exports the log data to all consumers wrapped by the current one. +func (lfc logsCloningFanOutConnector) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { + var errs []error + + // Fan out to first len-1 consumers. + for i := 0; i < len(lfc)-1; i++ { + // Create a clone of data. We need to clone because consumers may modify the data. + if err := lfc[i].ConsumeLogs(ctx, ld.Clone()); err != nil { + errs = append(errs, err) + } + } + + if len(lfc) > 0 { + // Give the original data to the last consumer. + lastTc := lfc[len(lfc)-1] + if err := lastTc.ConsumeLogs(ctx, ld); err != nil { + errs = append(errs, err) + } + } + + return componenterror.CombineErrors(errs) +} diff --git a/internal/otel_collector/processor/cloningfanoutconnector_test.go b/internal/otel_collector/processor/cloningfanoutconnector_test.go new file mode 100644 index 00000000000..39fdf043a0e --- /dev/null +++ b/internal/otel_collector/processor/cloningfanoutconnector_test.go @@ -0,0 +1,155 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestTraceProcessorCloningNotMultiplexing(t *testing.T) { + nop := consumertest.NewTracesNop() + tfc := NewTracesCloningFanOutConnector([]consumer.TracesConsumer{nop}) + assert.Same(t, nop, tfc) +} + +func TestTraceProcessorCloningMultiplexing(t *testing.T) { + processors := make([]consumer.TracesConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.TracesSink) + } + + tfc := NewTracesCloningFanOutConnector(processors) + td := testdata.GenerateTraceDataTwoSpansSameResource() + + var wantSpansCount = 0 + for i := 0; i < 2; i++ { + wantSpansCount += td.SpanCount() + err := tfc.ConsumeTraces(context.Background(), td) + if err != nil { + t.Errorf("Wanted nil got error") + return + } + } + + for i, p := range processors { + m := p.(*consumertest.TracesSink) + assert.Equal(t, wantSpansCount, m.SpansCount()) + spanOrig := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) + allTraces := m.AllTraces() + spanClone := allTraces[0].ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) + if i < len(processors)-1 { + assert.True(t, td.ResourceSpans().At(0).Resource() != allTraces[0].ResourceSpans().At(0).Resource()) + assert.True(t, spanOrig != spanClone) + } else { + assert.True(t, td.ResourceSpans().At(0).Resource() == allTraces[0].ResourceSpans().At(0).Resource()) + assert.True(t, spanOrig == spanClone) + } + assert.EqualValues(t, td.ResourceSpans().At(0).Resource(), allTraces[0].ResourceSpans().At(0).Resource()) + assert.EqualValues(t, spanOrig, spanClone) + } +} + +func TestMetricsProcessorCloningNotMultiplexing(t *testing.T) { + nop := consumertest.NewMetricsNop() + mfc := NewMetricsFanOutConnector([]consumer.MetricsConsumer{nop}) + assert.Same(t, nop, mfc) +} + +func TestMetricsProcessorCloningMultiplexing(t *testing.T) { + processors := make([]consumer.MetricsConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.MetricsSink) + } + + mfc := NewMetricsCloningFanOutConnector(processors) + md := testdata.GeneratMetricsAllTypesWithSampleDatapoints() + + var wantMetricsCount = 0 + for i := 0; i < 2; i++ { + wantMetricsCount += md.MetricCount() + err := mfc.ConsumeMetrics(context.Background(), md) + if err != nil { + t.Errorf("Wanted nil got error") + return + } + } + + for i, p := range processors { + m := p.(*consumertest.MetricsSink) + assert.Equal(t, wantMetricsCount, m.MetricsCount()) + metricOrig := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) + allMetrics := m.AllMetrics() + metricClone := allMetrics[0].ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) + if i < len(processors)-1 { + assert.True(t, md.ResourceMetrics().At(0).Resource() != allMetrics[0].ResourceMetrics().At(0).Resource()) + assert.True(t, metricOrig != metricClone) + } else { + assert.True(t, md.ResourceMetrics().At(0).Resource() == allMetrics[0].ResourceMetrics().At(0).Resource()) + assert.True(t, metricOrig == metricClone) + } + assert.EqualValues(t, md.ResourceMetrics().At(0).Resource(), allMetrics[0].ResourceMetrics().At(0).Resource()) + assert.EqualValues(t, metricOrig, metricClone) + } +} + +func TestLogsProcessorCloningNotMultiplexing(t *testing.T) { + nop := consumertest.NewLogsNop() + lfc := NewLogsCloningFanOutConnector([]consumer.LogsConsumer{nop}) + assert.Same(t, nop, lfc) +} + +func TestLogsProcessorCloningMultiplexing(t *testing.T) { + processors := make([]consumer.LogsConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.LogsSink) + } + + mfc := NewLogsCloningFanOutConnector(processors) + ld := testdata.GenerateLogDataOneLog() + + var wantMetricsCount = 0 + for i := 0; i < 2; i++ { + wantMetricsCount += ld.LogRecordCount() + err := mfc.ConsumeLogs(context.Background(), ld) + if err != nil { + t.Errorf("Wanted nil got error") + return + } + } + + for i, p := range processors { + m := p.(*consumertest.LogsSink) + assert.Equal(t, wantMetricsCount, m.LogRecordsCount()) + metricOrig := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().At(0) + allLogs := m.AllLogs() + metricClone := allLogs[0].ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().At(0) + if i < len(processors)-1 { + assert.True(t, ld.ResourceLogs().At(0).Resource() != allLogs[0].ResourceLogs().At(0).Resource()) + assert.True(t, metricOrig != metricClone) + } else { + assert.True(t, ld.ResourceLogs().At(0).Resource() == allLogs[0].ResourceLogs().At(0).Resource()) + assert.True(t, metricOrig == metricClone) + } + assert.EqualValues(t, ld.ResourceLogs().At(0).Resource(), allLogs[0].ResourceLogs().At(0).Resource()) + assert.EqualValues(t, metricOrig, metricClone) + } +} diff --git a/internal/otel_collector/processor/fanoutconnector.go b/internal/otel_collector/processor/fanoutconnector.go new file mode 100644 index 00000000000..f590c4280b0 --- /dev/null +++ b/internal/otel_collector/processor/fanoutconnector.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processor + +import ( + "context" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// This file contains implementations of Trace/Metrics connectors +// that fan out the data to multiple other consumers. + +// NewMetricsFanOutConnector wraps multiple metrics consumers in a single one. +func NewMetricsFanOutConnector(mcs []consumer.MetricsConsumer) consumer.MetricsConsumer { + if len(mcs) == 1 { + // Don't wrap if no need to do it. + return mcs[0] + } + return metricsFanOutConnector(mcs) +} + +type metricsFanOutConnector []consumer.MetricsConsumer + +var _ consumer.MetricsConsumer = (*metricsFanOutConnector)(nil) + +// ConsumeMetricsData exports the MetricsData to all consumers wrapped by the current one. +func (mfc metricsFanOutConnector) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + var errs []error + for _, mc := range mfc { + if err := mc.ConsumeMetrics(ctx, md); err != nil { + errs = append(errs, err) + } + } + return componenterror.CombineErrors(errs) +} + +// NewTracesFanOutConnector wraps multiple trace consumers in a single one. +func NewTracesFanOutConnector(tcs []consumer.TracesConsumer) consumer.TracesConsumer { + if len(tcs) == 1 { + // Don't wrap if no need to do it. + return tcs[0] + } + return traceFanOutConnector(tcs) +} + +type traceFanOutConnector []consumer.TracesConsumer + +var _ consumer.TracesConsumer = (*traceFanOutConnector)(nil) + +// ConsumeTraces exports the span data to all trace consumers wrapped by the current one. +func (tfc traceFanOutConnector) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + var errs []error + for _, tc := range tfc { + if err := tc.ConsumeTraces(ctx, td); err != nil { + errs = append(errs, err) + } + } + return componenterror.CombineErrors(errs) +} + +// NewLogsFanOutConnector wraps multiple log consumers in a single one. +func NewLogsFanOutConnector(lcs []consumer.LogsConsumer) consumer.LogsConsumer { + if len(lcs) == 1 { + // Don't wrap if no need to do it. + return lcs[0] + } + return logsFanOutConnector(lcs) +} + +type logsFanOutConnector []consumer.LogsConsumer + +var _ consumer.LogsConsumer = (*logsFanOutConnector)(nil) + +// Consume exports the log data to all consumers wrapped by the current one. +func (fc logsFanOutConnector) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { + var errs []error + for _, tc := range fc { + if err := tc.ConsumeLogs(ctx, ld); err != nil { + errs = append(errs, err) + } + } + return componenterror.CombineErrors(errs) +} diff --git a/internal/otel_collector/processor/fanoutconnector_test.go b/internal/otel_collector/processor/fanoutconnector_test.go new file mode 100644 index 00000000000..69fb3f21511 --- /dev/null +++ b/internal/otel_collector/processor/fanoutconnector_test.go @@ -0,0 +1,204 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processor + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestTracesProcessorNotMultiplexing(t *testing.T) { + nop := consumertest.NewTracesNop() + tfc := NewTracesFanOutConnector([]consumer.TracesConsumer{nop}) + assert.Same(t, nop, tfc) +} + +func TestTracesProcessorMultiplexing(t *testing.T) { + processors := make([]consumer.TracesConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.TracesSink) + } + + tfc := NewTracesFanOutConnector(processors) + td := testdata.GenerateTraceDataOneSpan() + + var wantSpansCount = 0 + for i := 0; i < 2; i++ { + wantSpansCount += td.SpanCount() + err := tfc.ConsumeTraces(context.Background(), td) + if err != nil { + t.Errorf("Wanted nil got error") + return + } + } + + for _, p := range processors { + m := p.(*consumertest.TracesSink) + assert.Equal(t, wantSpansCount, m.SpansCount()) + assert.EqualValues(t, td, m.AllTraces()[0]) + } +} + +func TestTraceProcessorWhenOneErrors(t *testing.T) { + processors := make([]consumer.TracesConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.TracesSink) + } + + // Make one processor return error + processors[1].(*consumertest.TracesSink).SetConsumeError(errors.New("my_error")) + + tfc := NewTracesFanOutConnector(processors) + td := testdata.GenerateTraceDataOneSpan() + + var wantSpansCount = 0 + for i := 0; i < 2; i++ { + wantSpansCount += td.SpanCount() + err := tfc.ConsumeTraces(context.Background(), td) + if err == nil { + t.Errorf("Wanted error got nil") + return + } + } + + assert.Equal(t, 0, processors[1].(*consumertest.TracesSink).SpansCount()) + assert.Equal(t, wantSpansCount, processors[0].(*consumertest.TracesSink).SpansCount()) + assert.Equal(t, wantSpansCount, processors[2].(*consumertest.TracesSink).SpansCount()) +} + +func TestMetricsProcessorNotMultiplexing(t *testing.T) { + nop := consumertest.NewMetricsNop() + mfc := NewMetricsFanOutConnector([]consumer.MetricsConsumer{nop}) + assert.Same(t, nop, mfc) +} + +func TestMetricsProcessorMultiplexing(t *testing.T) { + processors := make([]consumer.MetricsConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.MetricsSink) + } + + mfc := NewMetricsFanOutConnector(processors) + md := testdata.GenerateMetricsOneMetric() + + var wantMetricsCount = 0 + for i := 0; i < 2; i++ { + wantMetricsCount += md.MetricCount() + err := mfc.ConsumeMetrics(context.Background(), md) + if err != nil { + t.Errorf("Wanted nil got error") + return + } + } + + for _, p := range processors { + m := p.(*consumertest.MetricsSink) + assert.Equal(t, wantMetricsCount, m.MetricsCount()) + assert.EqualValues(t, md, m.AllMetrics()[0]) + } +} + +func TestMetricsProcessorWhenOneErrors(t *testing.T) { + processors := make([]consumer.MetricsConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.MetricsSink) + } + + // Make one processor return error + processors[1].(*consumertest.MetricsSink).SetConsumeError(errors.New("my_error")) + + mfc := NewMetricsFanOutConnector(processors) + md := testdata.GenerateMetricsOneMetric() + + var wantMetricsCount = 0 + for i := 0; i < 2; i++ { + wantMetricsCount += md.MetricCount() + err := mfc.ConsumeMetrics(context.Background(), md) + if err == nil { + t.Errorf("Wanted error got nil") + return + } + } + + assert.Equal(t, 0, processors[1].(*consumertest.MetricsSink).MetricsCount()) + assert.Equal(t, wantMetricsCount, processors[0].(*consumertest.MetricsSink).MetricsCount()) + assert.Equal(t, wantMetricsCount, processors[2].(*consumertest.MetricsSink).MetricsCount()) +} + +func TestLogsProcessorNotMultiplexing(t *testing.T) { + nop := consumertest.NewLogsNop() + lfc := NewLogsFanOutConnector([]consumer.LogsConsumer{nop}) + assert.Same(t, nop, lfc) +} + +func TestLogsProcessorMultiplexing(t *testing.T) { + processors := make([]consumer.LogsConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.LogsSink) + } + + lfc := NewLogsFanOutConnector(processors) + ld := testdata.GenerateLogDataOneLog() + + var wantMetricsCount = 0 + for i := 0; i < 2; i++ { + wantMetricsCount += ld.LogRecordCount() + err := lfc.ConsumeLogs(context.Background(), ld) + if err != nil { + t.Errorf("Wanted nil got error") + return + } + } + + for _, p := range processors { + m := p.(*consumertest.LogsSink) + assert.Equal(t, wantMetricsCount, m.LogRecordsCount()) + assert.EqualValues(t, ld, m.AllLogs()[0]) + } +} + +func TestLogsProcessorWhenOneErrors(t *testing.T) { + processors := make([]consumer.LogsConsumer, 3) + for i := range processors { + processors[i] = new(consumertest.LogsSink) + } + + // Make one processor return error + processors[1].(*consumertest.LogsSink).SetConsumeError(errors.New("my_error")) + + lfc := NewLogsFanOutConnector(processors) + ld := testdata.GenerateLogDataOneLog() + + var wantMetricsCount = 0 + for i := 0; i < 2; i++ { + wantMetricsCount += ld.LogRecordCount() + err := lfc.ConsumeLogs(context.Background(), ld) + if err == nil { + t.Errorf("Wanted error got nil") + return + } + } + + assert.Equal(t, 0, processors[1].(*consumertest.LogsSink).LogRecordsCount()) + assert.Equal(t, wantMetricsCount, processors[0].(*consumertest.LogsSink).LogRecordsCount()) + assert.Equal(t, wantMetricsCount, processors[2].(*consumertest.LogsSink).LogRecordsCount()) +} diff --git a/internal/otel_collector/processor/filterprocessor/README.md b/internal/otel_collector/processor/filterprocessor/README.md new file mode 100644 index 00000000000..98dd6a825cd --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/README.md @@ -0,0 +1,107 @@ +# Filter Processor + +Supported pipeline types: metrics + +The filter processor can be configured to include or exclude metrics based on +metric name in the case of the 'strict' or 'regexp' match types, or based on other +metric attributes in the case of the 'expr' match type. Please refer to +[config.go](./config.go) for the config spec. + +It takes a pipeline type, of which only `metrics` is supported, followed by an +action: +- `include`: Any names NOT matching filters are excluded from remainder of pipeline +- `exclude`: Any names matching filters are excluded from remainder of pipeline + +For the actions the following parameters are required: + - `match_type`: strict|regexp|expr + - `metric_names`: (only for a `match_type` of 'strict' or 'regexp') list of strings or re2 regex patterns + - `expressions`: (only for a `match_type` of 'expr') list of expr expressions (see "Using an 'expr' match_type" below) + +More details can found at [include/exclude metrics](../README.md#includeexclude-metrics). + +Examples: + +```yaml +processors: + filter/1: + metrics: + include: + match_type: regexp + metric_names: + - prefix/.* + - prefix_.* + exclude: + match_type: strict + metric_names: + - hello_world + - hello/world +``` + +Refer to the config files in [testdata](./testdata) for detailed +examples on using the processor. + +### Using an 'expr' match_type + +In addition to matching metric names with the 'strict' or 'regexp' match types, the filter processor +supports matching entire `Metric`s using the [expr](https://github.com/antonmedv/expr) expression engine. + +The 'expr' filter evaluates the supplied boolean expressions _per datapoint_ on a metric, and returns a result +for the entire metric. If any datapoint evaluates to true then the entire metric evaluates to true, otherwise +false. + +Made available to the expression environment are the following: + +* `MetricName` + a variable containing the current Metric's name +* `Label(name)` + a function that takes a label name string as an argument and returns a string: the value of a label with that + name if one exists, or "" +* `HasLabel(name)` + a function that takes a label name string as an argument and returns a boolean: true if the datapoint has a label + with that name, false otherwise + +Example: + +```yaml +processors: + filter/1: + metrics: + exclude: + match_type: expr + expressions: + - MetricName == "my.metric" && Label("my_label") == "abc123" +``` + +The above config will filter out any Metric that both has the name "my.metric" and has at least one datapoint +with a label of 'my_label="abc123"'. + +##### Support for multiple expressions + +As with "strict" and "regexp", multiple "expr" `expressions` are allowed. + +For example, the following two filters have the same effect: they filter out metrics named "system.cpu.time" and +"system.disk.io". + +``` +processors: + filter/expr: + metrics: + exclude: + match_type: expr + expressions: + - MetricName == "system.cpu.time" + - MetricName == "system.disk.io" + filter/strict: + metrics: + exclude: + match_type: strict + metric_names: + - system.cpu.time + - system.disk.io +``` + +The expressions are effectively ORed per datapoint. So for the above 'expr' configuration, given a datapoint, if its +parent Metric's name is "system.cpu.time" or "system.disk.io" then there's a match. The conditions are tested against +all the datapoints in a Metric until there's a match, in which case the entire Metric is considered a match, and in +the above example the Metric will be excluded. If after testing all the datapoints in a Metric against all the +expressions there isn't a match, the entire Metric is considered to be not matching. diff --git a/internal/otel_collector/processor/filterprocessor/config.go b/internal/otel_collector/processor/filterprocessor/config.go new file mode 100644 index 00000000000..f434f2295a7 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/config.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterprocessor + +import ( + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/internal/processor/filtermetric" +) + +// Config defines configuration for Resource processor. +type Config struct { + configmodels.ProcessorSettings `mapstructure:",squash"` + Metrics MetricFilters `mapstructure:"metrics"` +} + +// MetricFilter filters by Metric properties. +type MetricFilters struct { + // Include match properties describe metrics that should be included in the Collector Service pipeline, + // all other metrics should be dropped from further processing. + // If both Include and Exclude are specified, Include filtering occurs first. + Include *filtermetric.MatchProperties `mapstructure:"include"` + + // Exclude match properties describe metrics that should be excluded from the Collector Service pipeline, + // all other metrics should be included. + // If both Include and Exclude are specified, Include filtering occurs first. + Exclude *filtermetric.MatchProperties `mapstructure:"exclude"` +} diff --git a/internal/otel_collector/processor/filterprocessor/config_test.go b/internal/otel_collector/processor/filterprocessor/config_test.go new file mode 100644 index 00000000000..faf8eafa1f3 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/config_test.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterprocessor + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/internal/processor/filtermetric" + fsregexp "go.opentelemetry.io/collector/internal/processor/filterset/regexp" +) + +// TestLoadingConfigRegexp tests loading testdata/config_strict.yaml +func TestLoadingConfigStrict(t *testing.T) { + // list of filters used repeatedly on testdata/config_strict.yaml + testDataFilters := []string{ + "hello_world", + "hello/world", + } + + testDataMetricProperties := &filtermetric.MatchProperties{ + MatchType: filtermetric.Strict, + MetricNames: testDataFilters, + } + + factories, err := componenttest.ExampleComponents() + assert.Nil(t, err) + + factory := NewFactory() + factories.Processors[configmodels.Type(typeStr)] = factory + config, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config_strict.yaml"), factories) + + assert.Nil(t, err) + require.NotNil(t, config) + + tests := []struct { + filterName string + expCfg *Config + }{ + { + filterName: "filter/empty", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/empty", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Include: &filtermetric.MatchProperties{ + MatchType: filtermetric.Strict, + }, + }, + }, + }, { + filterName: "filter/include", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/include", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Include: testDataMetricProperties, + }, + }, + }, { + filterName: "filter/exclude", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/exclude", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Exclude: testDataMetricProperties, + }, + }, + }, { + filterName: "filter/includeexclude", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/includeexclude", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Include: testDataMetricProperties, + Exclude: &filtermetric.MatchProperties{ + MatchType: filtermetric.Strict, + MetricNames: []string{"hello_world"}, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.filterName, func(t *testing.T) { + cfg := config.Processors[test.filterName] + assert.Equal(t, test.expCfg, cfg) + }) + } +} + +// TestLoadingConfigRegexp tests loading testdata/config_regexp.yaml +func TestLoadingConfigRegexp(t *testing.T) { + // list of filters used repeatedly on testdata/config.yaml + testDataFilters := []string{ + "prefix/.*", + "prefix_.*", + ".*/suffix", + ".*_suffix", + ".*/contains/.*", + ".*_contains_.*", + "full/name/match", + "full_name_match", + } + + testDataMetricProperties := &filtermetric.MatchProperties{ + MatchType: filtermetric.Regexp, + MetricNames: testDataFilters, + } + + factories, err := componenttest.ExampleComponents() + assert.Nil(t, err) + + factory := NewFactory() + factories.Processors[typeStr] = factory + config, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config_regexp.yaml"), factories) + + assert.Nil(t, err) + require.NotNil(t, config) + + tests := []struct { + filterName string + expCfg *Config + }{ + { + filterName: "filter/include", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/include", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Include: testDataMetricProperties, + }, + }, + }, { + filterName: "filter/exclude", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/exclude", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Exclude: testDataMetricProperties, + }, + }, + }, { + filterName: "filter/unlimitedcache", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/unlimitedcache", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Include: &filtermetric.MatchProperties{ + MatchType: filtermetric.Regexp, + RegexpConfig: &fsregexp.Config{ + CacheEnabled: true, + }, + MetricNames: testDataFilters, + }, + }, + }, + }, { + filterName: "filter/limitedcache", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/limitedcache", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Exclude: &filtermetric.MatchProperties{ + MatchType: filtermetric.Regexp, + RegexpConfig: &fsregexp.Config{ + CacheEnabled: true, + CacheMaxNumEntries: 10, + }, + MetricNames: testDataFilters, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.filterName, func(t *testing.T) { + cfg := config.Processors[test.filterName] + assert.Equal(t, test.expCfg, cfg) + }) + } +} + +func TestLoadingConfigExpr(t *testing.T) { + factories, err := componenttest.ExampleComponents() + require.NoError(t, err) + factory := NewFactory() + factories.Processors[configmodels.Type(typeStr)] = factory + config, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config_expr.yaml"), factories) + require.NoError(t, err) + require.NotNil(t, config) + + tests := []struct { + filterName string + expCfg configmodels.Processor + }{ + { + filterName: "filter/empty", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/empty", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Include: &filtermetric.MatchProperties{ + MatchType: filtermetric.Expr, + }, + }, + }, + }, + { + filterName: "filter/include", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/include", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Include: &filtermetric.MatchProperties{ + MatchType: filtermetric.Expr, + Expressions: []string{ + `Label("foo") == "bar"`, + `HasLabel("baz")`, + }, + }, + }, + }, + }, + { + filterName: "filter/exclude", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/exclude", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Exclude: &filtermetric.MatchProperties{ + MatchType: filtermetric.Expr, + Expressions: []string{ + `Label("foo") == "bar"`, + `HasLabel("baz")`, + }, + }, + }, + }, + }, + { + filterName: "filter/includeexclude", + expCfg: &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: "filter/includeexclude", + TypeVal: typeStr, + }, + Metrics: MetricFilters{ + Include: &filtermetric.MatchProperties{ + MatchType: filtermetric.Expr, + Expressions: []string{ + `HasLabel("foo")`, + }, + }, + Exclude: &filtermetric.MatchProperties{ + MatchType: filtermetric.Expr, + Expressions: []string{ + `HasLabel("bar")`, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.filterName, func(t *testing.T) { + cfg := config.Processors[test.filterName] + assert.Equal(t, test.expCfg, cfg) + }) + } +} diff --git a/internal/otel_collector/processor/filterprocessor/doc.go b/internal/otel_collector/processor/filterprocessor/doc.go new file mode 100644 index 00000000000..8c0dc18f116 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package filterprocessor implements a processor for filtering +// (dropping) metrics and/or spans by various properties. +package filterprocessor diff --git a/internal/otel_collector/processor/filterprocessor/expr_test.go b/internal/otel_collector/processor/filterprocessor/expr_test.go new file mode 100644 index 00000000000..2f7f99c6092 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/expr_test.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterprocessor + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/goldendataset" + "go.opentelemetry.io/collector/internal/processor/filtermetric" +) + +const filteredMetric = "p0_metric_1" +const filteredLblKey = "pt-label-key-1" +const filteredLblVal = "pt-label-val-1" + +func TestExprError(t *testing.T) { + for mdType := pdata.MetricDataTypeIntGauge; mdType <= pdata.MetricDataTypeDoubleHistogram; mdType++ { + testMatchError(t, mdType) + } +} + +func testMatchError(t *testing.T, mdType pdata.MetricDataType) { + // the "foo" expr expression will cause expr Run() to return an error + proc, next, logs := testProcessor(t, nil, []string{"foo"}) + pdm := testData("", 1, mdType) + err := proc.ConsumeMetrics(context.Background(), pdm) + assert.NoError(t, err) + // assert that metrics not be filtered as a result + assert.Equal(t, []pdata.Metrics{pdm}, next.AllMetrics()) + assert.Equal(t, 1, logs.Len()) + assert.Equal(t, "shouldKeepMetric failed", logs.All()[0].Message) +} + +func TestExprProcessor(t *testing.T) { + testFilter(t, pdata.MetricDataTypeIntGauge) + testFilter(t, pdata.MetricDataTypeDoubleGauge) + testFilter(t, pdata.MetricDataTypeIntSum) + testFilter(t, pdata.MetricDataTypeDoubleSum) + testFilter(t, pdata.MetricDataTypeIntHistogram) + testFilter(t, pdata.MetricDataTypeDoubleHistogram) +} + +func testFilter(t *testing.T, mdType pdata.MetricDataType) { + format := "MetricName == '%s' && Label('%s') == '%s'" + q := fmt.Sprintf(format, filteredMetric, filteredLblKey, filteredLblVal) + + mds := testDataSlice(2, mdType) + totMetricCount := 0 + for _, md := range mds { + totMetricCount += md.MetricCount() + } + expectedMetricCount := totMetricCount - 1 + filtered := filterMetrics(t, nil, []string{q}, mds) + filteredMetricCount := 0 + for _, metrics := range filtered { + filteredMetricCount += metrics.MetricCount() + rmsSlice := metrics.ResourceMetrics() + for i := 0; i < rmsSlice.Len(); i++ { + rms := rmsSlice.At(i) + ilms := rms.InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + metricSlice := ilm.Metrics() + for k := 0; k < metricSlice.Len(); k++ { + metric := metricSlice.At(k) + if metric.Name() == filteredMetric { + dt := metric.DataType() + switch dt { + case pdata.MetricDataTypeIntGauge: + pts := metric.IntGauge().DataPoints() + for l := 0; l < pts.Len(); l++ { + assertFiltered(t, pts.At(l).LabelsMap()) + } + case pdata.MetricDataTypeDoubleGauge: + pts := metric.DoubleGauge().DataPoints() + for l := 0; l < pts.Len(); l++ { + assertFiltered(t, pts.At(l).LabelsMap()) + } + case pdata.MetricDataTypeIntSum: + pts := metric.IntSum().DataPoints() + for l := 0; l < pts.Len(); l++ { + assertFiltered(t, pts.At(l).LabelsMap()) + } + case pdata.MetricDataTypeDoubleSum: + pts := metric.DoubleSum().DataPoints() + for l := 0; l < pts.Len(); l++ { + assertFiltered(t, pts.At(l).LabelsMap()) + } + case pdata.MetricDataTypeIntHistogram: + pts := metric.IntHistogram().DataPoints() + for l := 0; l < pts.Len(); l++ { + assertFiltered(t, pts.At(l).LabelsMap()) + } + case pdata.MetricDataTypeDoubleHistogram: + pts := metric.DoubleHistogram().DataPoints() + for l := 0; l < pts.Len(); l++ { + assertFiltered(t, pts.At(l).LabelsMap()) + } + } + } + } + } + } + } + assert.Equal(t, expectedMetricCount, filteredMetricCount) +} + +func assertFiltered(t *testing.T, lm pdata.StringMap) { + lm.ForEach(func(k string, v string) { + if k == filteredLblKey && v == filteredLblVal { + assert.Fail(t, "found metric that should have been filtered out") + } + }) +} + +func filterMetrics(t *testing.T, include []string, exclude []string, mds []pdata.Metrics) []pdata.Metrics { + proc, next, _ := testProcessor(t, include, exclude) + for _, md := range mds { + err := proc.ConsumeMetrics(context.Background(), md) + require.NoError(t, err) + } + return next.AllMetrics() +} + +func testProcessor(t *testing.T, include []string, exclude []string) (component.MetricsProcessor, *consumertest.MetricsSink, *observer.ObservedLogs) { + factory := NewFactory() + cfg := exprConfig(factory, include, exclude) + ctx := context.Background() + next := &consumertest.MetricsSink{} + core, logs := observer.New(zapcore.WarnLevel) + proc, err := factory.CreateMetricsProcessor( + ctx, + component.ProcessorCreateParams{ + Logger: zap.New(core), + }, + cfg, + next, + ) + require.NoError(t, err) + require.NotNil(t, proc) + return proc, next, logs +} + +func exprConfig(factory component.ProcessorFactory, include []string, exclude []string) configmodels.Processor { + cfg := factory.CreateDefaultConfig() + pCfg := cfg.(*Config) + pCfg.Metrics = MetricFilters{} + if include != nil { + pCfg.Metrics.Include = &filtermetric.MatchProperties{ + MatchType: "expr", + Expressions: include, + } + } + if exclude != nil { + pCfg.Metrics.Exclude = &filtermetric.MatchProperties{ + MatchType: "expr", + Expressions: exclude, + } + } + return cfg +} + +func testDataSlice(size int, mdType pdata.MetricDataType) []pdata.Metrics { + var out []pdata.Metrics + for i := 0; i < 16; i++ { + out = append(out, testData(fmt.Sprintf("p%d_", i), size, mdType)) + } + return out +} + +func testData(prefix string, size int, mdType pdata.MetricDataType) pdata.Metrics { + c := goldendataset.MetricCfg{ + MetricDescriptorType: mdType, + MetricNamePrefix: prefix, + NumILMPerResource: size, + NumMetricsPerILM: size, + NumPtLabels: size, + NumPtsPerMetric: size, + NumResourceAttrs: size, + NumResourceMetrics: size, + } + return goldendataset.MetricDataFromCfg(c) +} diff --git a/internal/otel_collector/processor/filterprocessor/factory.go b/internal/otel_collector/processor/filterprocessor/factory.go new file mode 100644 index 00000000000..383d16a2238 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/factory.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterprocessor + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "filter" +) + +var processorCapabilities = component.ProcessorCapabilities{MutatesConsumedData: false} + +// NewFactory returns a new factory for the Filter processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithMetrics(createMetricsProcessor)) +} + +func createDefaultConfig() configmodels.Processor { + return &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createMetricsProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.MetricsConsumer, +) (component.MetricsProcessor, error) { + fp, err := newFilterMetricProcessor(params.Logger, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewMetricsProcessor( + cfg, + nextConsumer, + fp, + processorhelper.WithCapabilities(processorCapabilities)) +} diff --git a/internal/otel_collector/processor/filterprocessor/factory_test.go b/internal/otel_collector/processor/filterprocessor/factory_test.go new file mode 100644 index 00000000000..74e34f3a7a3 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/factory_test.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterprocessor + +import ( + "context" + "fmt" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +func TestType(t *testing.T) { + factory := NewFactory() + pType := factory.Type() + + assert.Equal(t, pType, configmodels.Type("filter")) +} + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.Equal(t, cfg, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + NameVal: typeStr, + TypeVal: typeStr, + }, + }) + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateProcessors(t *testing.T) { + tests := []struct { + configName string + succeed bool + }{ + { + configName: "config_regexp.yaml", + succeed: true, + }, { + configName: "config_strict.yaml", + succeed: true, + }, { + configName: "config_invalid.yaml", + succeed: false, + }, + } + + for _, test := range tests { + factories, err := componenttest.ExampleComponents() + assert.Nil(t, err) + + factory := NewFactory() + factories.Processors[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", test.configName), factories) + assert.Nil(t, err) + + for name, cfg := range cfg.Processors { + t.Run(fmt.Sprintf("%s/%s", test.configName, name), func(t *testing.T) { + factory := NewFactory() + + tp, tErr := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewTracesNop()) + // Not implemented error + assert.NotNil(t, tErr) + assert.Nil(t, tp) + + mp, mErr := factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewMetricsNop()) + assert.Equal(t, test.succeed, mp != nil) + assert.Equal(t, test.succeed, mErr == nil) + }) + } + } +} diff --git a/internal/otel_collector/processor/filterprocessor/filter_processor.go b/internal/otel_collector/processor/filterprocessor/filter_processor.go new file mode 100644 index 00000000000..fe544165393 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/filter_processor.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterprocessor + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filtermetric" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +type filterMetricProcessor struct { + cfg *Config + include filtermetric.Matcher + exclude filtermetric.Matcher + logger *zap.Logger +} + +func newFilterMetricProcessor(logger *zap.Logger, cfg *Config) (*filterMetricProcessor, error) { + inc, err := createMatcher(cfg.Metrics.Include) + if err != nil { + return nil, err + } + + exc, err := createMatcher(cfg.Metrics.Exclude) + if err != nil { + return nil, err + } + + includeMatchType := "" + var includeExpressions []string + var includeMetricNames []string + if cfg.Metrics.Include != nil { + includeMatchType = string(cfg.Metrics.Include.MatchType) + includeExpressions = cfg.Metrics.Include.Expressions + includeMetricNames = cfg.Metrics.Include.MetricNames + } + + excludeMatchType := "" + var excludeExpressions []string + var excludeMetricNames []string + if cfg.Metrics.Exclude != nil { + excludeMatchType = string(cfg.Metrics.Exclude.MatchType) + excludeExpressions = cfg.Metrics.Exclude.Expressions + excludeMetricNames = cfg.Metrics.Exclude.MetricNames + } + + logger.Info( + "Metric filter configured", + zap.String("include match_type", includeMatchType), + zap.Strings("include expressions", includeExpressions), + zap.Strings("include metric names", includeMetricNames), + zap.String("exclude match_type", excludeMatchType), + zap.Strings("exclude expressions", excludeExpressions), + zap.Strings("exclude metric names", excludeMetricNames), + ) + + return &filterMetricProcessor{ + cfg: cfg, + include: inc, + exclude: exc, + logger: logger, + }, nil +} + +func createMatcher(mp *filtermetric.MatchProperties) (filtermetric.Matcher, error) { + // Nothing specified in configuration + if mp == nil { + return nil, nil + } + return filtermetric.NewMatcher(mp) +} + +// ProcessMetrics filters the given metrics based off the filterMetricProcessor's filters. +func (fmp *filterMetricProcessor) ProcessMetrics(_ context.Context, pdm pdata.Metrics) (pdata.Metrics, error) { + rms := pdm.ResourceMetrics() + idx := newMetricIndex() + for i := 0; i < rms.Len(); i++ { + ilms := rms.At(i).InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + ms := ilms.At(j).Metrics() + for k := 0; k < ms.Len(); k++ { + keep, err := fmp.shouldKeepMetric(ms.At(k)) + if err != nil { + fmp.logger.Error("shouldKeepMetric failed", zap.Error(err)) + // don't `continue`, keep the metric if there's an error + } + if keep { + idx.add(i, j, k) + } + } + } + } + if idx.isEmpty() { + return pdm, processorhelper.ErrSkipProcessingData + } + return idx.extract(pdm), nil +} + +func (fmp *filterMetricProcessor) shouldKeepMetric(metric pdata.Metric) (bool, error) { + if fmp.include != nil { + matches, err := fmp.include.MatchMetric(metric) + if err != nil { + // default to keep if there's an error + return true, err + } + if !matches { + return false, nil + } + } + + if fmp.exclude != nil { + matches, err := fmp.exclude.MatchMetric(metric) + if err != nil { + return true, err + } + if matches { + return false, nil + } + } + + return true, nil +} diff --git a/internal/otel_collector/processor/filterprocessor/filter_processor_test.go b/internal/otel_collector/processor/filterprocessor/filter_processor_test.go new file mode 100644 index 00000000000..a922a18b838 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/filter_processor_test.go @@ -0,0 +1,413 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterprocessor + +import ( + "context" + "fmt" + "testing" + "time" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/goldendataset" + "go.opentelemetry.io/collector/internal/processor/filtermetric" + "go.opentelemetry.io/collector/translator/internaldata" +) + +type metricNameTest struct { + name string + inc *filtermetric.MatchProperties + exc *filtermetric.MatchProperties + inMN [][]*metricspb.Metric // input Metric batches + outMN [][]string // output Metric names + allMetricsFiltered bool +} + +var ( + validFilters = []string{ + "prefix/.*", + "prefix_.*", + ".*/suffix", + ".*_suffix", + ".*/contains/.*", + ".*_contains_.*", + "full/name/match", + "full_name_match", + } + + inMetricNames = []string{ + "full_name_match", + "not_exact_string_match", + "prefix/test/match", + "prefix_test_match", + "prefixprefix/test/match", + "test/match/suffix", + "test_match_suffix", + "test/match/suffixsuffix", + "test/contains/match", + "test_contains_match", + "random", + "full/name/match", + "full_name_match", // repeats + "not_exact_string_match", + } + + regexpMetricsFilterProperties = &filtermetric.MatchProperties{ + MatchType: filtermetric.Regexp, + MetricNames: validFilters, + } + + standardTests = []metricNameTest{ + { + name: "includeFilter", + inc: regexpMetricsFilterProperties, + inMN: [][]*metricspb.Metric{metricsWithName(inMetricNames)}, + outMN: [][]string{{ + "full_name_match", + "prefix/test/match", + "prefix_test_match", + "prefixprefix/test/match", + "test/match/suffix", + "test_match_suffix", + "test/match/suffixsuffix", + "test/contains/match", + "test_contains_match", + "full/name/match", + "full_name_match", + }}, + }, + { + name: "excludeFilter", + exc: regexpMetricsFilterProperties, + inMN: [][]*metricspb.Metric{metricsWithName(inMetricNames)}, + outMN: [][]string{{ + "not_exact_string_match", + "random", + "not_exact_string_match", + }}, + }, + { + name: "includeAndExclude", + inc: regexpMetricsFilterProperties, + exc: &filtermetric.MatchProperties{ + MatchType: filtermetric.Strict, + MetricNames: []string{ + "prefix_test_match", + "test_contains_match", + }, + }, + inMN: [][]*metricspb.Metric{metricsWithName(inMetricNames)}, + outMN: [][]string{{ + "full_name_match", + "prefix/test/match", + // "prefix_test_match", excluded by exclude filter + "prefixprefix/test/match", + "test/match/suffix", + "test_match_suffix", + "test/match/suffixsuffix", + "test/contains/match", + // "test_contains_match", excluded by exclude filter + "full/name/match", + "full_name_match", + }}, + }, + { + name: "includeAndExcludeWithEmptyAndNil", + inc: regexpMetricsFilterProperties, + exc: &filtermetric.MatchProperties{ + MatchType: filtermetric.Strict, + MetricNames: []string{ + "prefix_test_match", + "test_contains_match", + }, + }, + inMN: [][]*metricspb.Metric{nil, metricsWithName(inMetricNames), {}}, + outMN: [][]string{ + { + "full_name_match", + "prefix/test/match", + // "prefix_test_match", excluded by exclude filter + "prefixprefix/test/match", + "test/match/suffix", + "test_match_suffix", + "test/match/suffixsuffix", + "test/contains/match", + // "test_contains_match", excluded by exclude filter + "full/name/match", + "full_name_match", + }, + }, + }, + { + name: "emptyFilterInclude", + inc: &filtermetric.MatchProperties{ + MatchType: filtermetric.Strict, + }, + inMN: [][]*metricspb.Metric{metricsWithName(inMetricNames)}, + allMetricsFiltered: true, + }, + { + name: "emptyFilterExclude", + exc: &filtermetric.MatchProperties{ + MatchType: filtermetric.Strict, + }, + inMN: [][]*metricspb.Metric{metricsWithName(inMetricNames)}, + outMN: [][]string{inMetricNames}, + }, + } +) + +func TestFilterMetricProcessor(t *testing.T) { + for _, test := range standardTests { + t.Run(test.name, func(t *testing.T) { + // next stores the results of the filter metric processor + next := new(consumertest.MetricsSink) + cfg := &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Metrics: MetricFilters{ + Include: test.inc, + Exclude: test.exc, + }, + } + factory := NewFactory() + fmp, err := factory.CreateMetricsProcessor( + context.Background(), + component.ProcessorCreateParams{ + Logger: zap.NewNop(), + }, + cfg, + next, + ) + assert.NotNil(t, fmp) + assert.Nil(t, err) + + caps := fmp.GetCapabilities() + assert.False(t, caps.MutatesConsumedData) + ctx := context.Background() + assert.NoError(t, fmp.Start(ctx, nil)) + + mds := make([]consumerdata.MetricsData, len(test.inMN)) + for i, metrics := range test.inMN { + mds[i] = consumerdata.MetricsData{ + Metrics: metrics, + } + } + cErr := fmp.ConsumeMetrics(context.Background(), internaldata.OCSliceToMetrics(mds)) + assert.Nil(t, cErr) + got := next.AllMetrics() + + if test.allMetricsFiltered { + require.Equal(t, 0, len(got)) + return + } + + require.Equal(t, 1, len(got)) + gotMD := internaldata.MetricsToOC(got[0]) + require.Equal(t, len(test.outMN), len(gotMD)) + for i, wantOut := range test.outMN { + assert.Equal(t, len(wantOut), len(gotMD[i].Metrics)) + for idx, out := range gotMD[i].Metrics { + assert.Equal(t, wantOut[idx], out.MetricDescriptor.Name) + } + } + assert.NoError(t, fmp.Shutdown(ctx)) + }) + } +} + +func metricsWithName(names []string) []*metricspb.Metric { + ret := make([]*metricspb.Metric, len(names)) + now := time.Now() + for i, name := range names { + ret[i] = &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: name, + Type: metricspb.MetricDescriptor_GAUGE_INT64, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + { + Timestamp: timestamppb.New(now.Add(10 * time.Second)), + Value: &metricspb.Point_Int64Value{ + Int64Value: int64(123), + }, + }, + }, + }, + }, + } + } + return ret +} + +func BenchmarkStrictFilter(b *testing.B) { + mp := &filtermetric.MatchProperties{ + MatchType: "strict", + MetricNames: []string{"p10_metric_0"}, + } + benchmarkFilter(b, mp) +} + +func BenchmarkRegexpFilter(b *testing.B) { + mp := &filtermetric.MatchProperties{ + MatchType: "regexp", + MetricNames: []string{"p10_metric_0"}, + } + benchmarkFilter(b, mp) +} + +func BenchmarkExprFilter(b *testing.B) { + mp := &filtermetric.MatchProperties{ + MatchType: "expr", + Expressions: []string{`MetricName == "p10_metric_0"`}, + } + benchmarkFilter(b, mp) +} + +func benchmarkFilter(b *testing.B, mp *filtermetric.MatchProperties) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + pcfg := cfg.(*Config) + pcfg.Metrics = MetricFilters{ + Exclude: mp, + } + ctx := context.Background() + proc, _ := factory.CreateMetricsProcessor( + ctx, + component.ProcessorCreateParams{}, + cfg, + consumertest.NewMetricsNop(), + ) + pdms := metricSlice(128) + for i := 0; i < b.N; i++ { + for _, pdm := range pdms { + _ = proc.ConsumeMetrics(ctx, pdm) + } + } +} + +func metricSlice(numMetrics int) []pdata.Metrics { + var out []pdata.Metrics + for i := 0; i < numMetrics; i++ { + const size = 2 + out = append(out, pdm(fmt.Sprintf("p%d_", i), size)) + } + return out +} + +func pdm(prefix string, size int) pdata.Metrics { + c := goldendataset.MetricCfg{ + MetricDescriptorType: pdata.MetricDataTypeIntGauge, + MetricNamePrefix: prefix, + NumILMPerResource: size, + NumMetricsPerILM: size, + NumPtLabels: size, + NumPtsPerMetric: size, + NumResourceAttrs: size, + NumResourceMetrics: size, + } + return goldendataset.MetricDataFromCfg(c) +} + +func TestMetricIndexSingle(t *testing.T) { + metrics := pdm("", 1) + idx := newMetricIndex() + idx.add(0, 0, 0) + extracted := idx.extract(metrics) + require.Equal(t, metrics, extracted) +} + +func TestMetricIndexAll(t *testing.T) { + metrics := pdm("", 2) + idx := newMetricIndex() + idx.add(0, 0, 0) + idx.add(0, 0, 1) + idx.add(0, 1, 0) + idx.add(0, 1, 1) + idx.add(1, 0, 0) + idx.add(1, 0, 1) + idx.add(1, 1, 0) + idx.add(1, 1, 1) + extracted := idx.extract(metrics) + require.Equal(t, metrics, extracted) +} + +func TestNilResourceMetrics(t *testing.T) { + metrics := pdata.NewMetrics() + rms := metrics.ResourceMetrics() + rms.Append(pdata.NewResourceMetrics()) + requireNotPanics(t, metrics) +} + +func TestNilILM(t *testing.T) { + metrics := pdata.NewMetrics() + rms := metrics.ResourceMetrics() + rm := pdata.NewResourceMetrics() + rms.Append(rm) + ilms := rm.InstrumentationLibraryMetrics() + ilms.Append(pdata.NewInstrumentationLibraryMetrics()) + requireNotPanics(t, metrics) +} + +func TestNilMetric(t *testing.T) { + metrics := pdata.NewMetrics() + rms := metrics.ResourceMetrics() + rm := pdata.NewResourceMetrics() + rms.Append(rm) + ilms := rm.InstrumentationLibraryMetrics() + ilm := pdata.NewInstrumentationLibraryMetrics() + ilms.Append(ilm) + ms := ilm.Metrics() + ms.Append(pdata.NewMetric()) + requireNotPanics(t, metrics) +} + +func requireNotPanics(t *testing.T, metrics pdata.Metrics) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + pcfg := cfg.(*Config) + pcfg.Metrics = MetricFilters{ + Exclude: &filtermetric.MatchProperties{ + MatchType: "strict", + MetricNames: []string{"foo"}, + }, + } + ctx := context.Background() + proc, _ := factory.CreateMetricsProcessor( + ctx, + component.ProcessorCreateParams{ + Logger: zap.NewNop(), + }, + cfg, + consumertest.NewMetricsNop(), + ) + require.NotPanics(t, func() { + _ = proc.ConsumeMetrics(ctx, metrics) + }) +} diff --git a/internal/otel_collector/processor/filterprocessor/metric_index.go b/internal/otel_collector/processor/filterprocessor/metric_index.go new file mode 100644 index 00000000000..50afee55b86 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/metric_index.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filterprocessor + +import ( + "sort" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// metricIndex holds paths to metrics in a pdata.Metrics struct via the indexes +// ResourceMetrics -> InstrumentationLibraryMetrics -> Metrics. Once these +// indexes are populated, you can extract a pdata.Metrics from an existing +// pdata.Metrics with just the metrics at the specified paths. The motivation +// for this type is to allow the output of filtered metrics to not contain +// parent structs (InstrumentationLibrary, Resource, etc.) for a MetricSlice +// that has become empty after filtering. +type metricIndex struct { + m map[int]map[int]map[int]bool +} + +func newMetricIndex() *metricIndex { + return &metricIndex{m: map[int]map[int]map[int]bool{}} +} + +func (idx metricIndex) add(rmIdx, ilmIdx, mIdx int) { + rmMap, ok := idx.m[rmIdx] + if !ok { + rmMap = map[int]map[int]bool{} + idx.m[rmIdx] = rmMap + } + ilmMap, ok := rmMap[ilmIdx] + if !ok { + ilmMap = map[int]bool{} + rmMap[ilmIdx] = ilmMap + } + ilmMap[mIdx] = true +} + +func (idx metricIndex) extract(pdm pdata.Metrics) pdata.Metrics { + out := pdata.NewMetrics() + rmSliceOut := out.ResourceMetrics() + + sortRMIdx := sortRM(idx.m) + rmsIn := pdm.ResourceMetrics() + rmSliceOut.Resize(len(sortRMIdx)) + pos := 0 + for _, rmIdx := range sortRMIdx { + rmIn := rmsIn.At(rmIdx) + ilmSliceIn := rmIn.InstrumentationLibraryMetrics() + + rmOut := rmSliceOut.At(pos) + pos++ + rmIn.Resource().CopyTo(rmOut.Resource()) + ilmSliceOut := rmOut.InstrumentationLibraryMetrics() + ilmIndexes := idx.m[rmIdx] + for _, ilmIdx := range sortILM(ilmIndexes) { + ilmIn := ilmSliceIn.At(ilmIdx) + mSliceIn := ilmIn.Metrics() + + ilmOut := pdata.NewInstrumentationLibraryMetrics() + ilmSliceOut.Append(ilmOut) + ilOut := ilmOut.InstrumentationLibrary() + ilmIn.InstrumentationLibrary().CopyTo(ilOut) + mSliceOut := ilmOut.Metrics() + for _, metricIdx := range sortMetrics(ilmIndexes[ilmIdx]) { + mSliceOut.Append(mSliceIn.At(metricIdx)) + } + } + } + return out +} + +func sortRM(rmIndexes map[int]map[int]map[int]bool) []int { + var rmSorted = make([]int, len(rmIndexes)) + i := 0 + for key := range rmIndexes { + rmSorted[i] = key + i++ + } + sort.Ints(rmSorted) + return rmSorted +} + +func sortILM(ilmIndexes map[int]map[int]bool) []int { + var ilmSorted = make([]int, len(ilmIndexes)) + i := 0 + for key := range ilmIndexes { + ilmSorted[i] = key + i++ + } + sort.Ints(ilmSorted) + return ilmSorted +} + +func sortMetrics(metricIndexes map[int]bool) []int { + var metricIdxSorted = make([]int, len(metricIndexes)) + i := 0 + for key := range metricIndexes { + metricIdxSorted[i] = key + i++ + } + sort.Ints(metricIdxSorted) + return metricIdxSorted +} + +func (idx metricIndex) isEmpty() bool { + return len(idx.m) == 0 +} diff --git a/internal/otel_collector/processor/filterprocessor/testdata/config_expr.yaml b/internal/otel_collector/processor/filterprocessor/testdata/config_expr.yaml new file mode 100644 index 00000000000..01afa028747 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/testdata/config_expr.yaml @@ -0,0 +1,41 @@ +receivers: + examplereceiver: + +processors: + filter/empty: + metrics: + include: + match_type: expr + filter/include: + metrics: + include: + match_type: expr + expressions: + - Label("foo") == "bar" + - HasLabel("baz") + filter/exclude: + metrics: + exclude: + match_type: expr + expressions: + - Label("foo") == "bar" + - HasLabel("baz") + filter/includeexclude: + metrics: + include: + match_type: expr + expressions: + - HasLabel("foo") + exclude: + match_type: expr + expressions: + - HasLabel("bar") +exporters: + exampleexporter: + +service: + pipelines: + metrics: + receivers: [examplereceiver] + processors: [filter/empty] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/filterprocessor/testdata/config_invalid.yaml b/internal/otel_collector/processor/filterprocessor/testdata/config_invalid.yaml new file mode 100644 index 00000000000..a2ed32c8c40 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/testdata/config_invalid.yaml @@ -0,0 +1,26 @@ +receivers: + examplereceiver: + +processors: + filter/include: + # any names NOT matching filters are excluded from remainder of pipeline + metrics: + include: + match_type: regexp + metric_names: + # re2 regexp patterns + - (\W|^)stock\stips(\W|$ + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [filter/include] + exporters: [exampleexporter] + metrics: + receivers: [examplereceiver] + processors: [filter/include] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/filterprocessor/testdata/config_regexp.yaml b/internal/otel_collector/processor/filterprocessor/testdata/config_regexp.yaml new file mode 100644 index 00000000000..6ad5a90c1f7 --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/testdata/config_regexp.yaml @@ -0,0 +1,79 @@ +receivers: + examplereceiver: + +processors: + filter: + filter/include: + # any names NOT matching filters are excluded from remainder of pipeline + metrics: + include: + match_type: regexp + metric_names: + # re2 regexp patterns + - prefix/.* + - prefix_.* + - .*/suffix + - .*_suffix + - .*/contains/.* + - .*_contains_.* + - full/name/match + - full_name_match + filter/exclude: + # any names matching filters are excluded from remainder of pipeline + metrics: + exclude: + match_type: regexp + metric_names: + - prefix/.* + - prefix_.* + - .*/suffix + - .*_suffix + - .*/contains/.* + - .*_contains_.* + - full/name/match + - full_name_match + filter/unlimitedcache: + metrics: + include: + match_type: regexp + regexp: + cacheenabled: true + metric_names: + - prefix/.* + - prefix_.* + - .*/suffix + - .*_suffix + - .*/contains/.* + - .*_contains_.* + - full/name/match + - full_name_match + filter/limitedcache: + metrics: + exclude: + match_type: regexp + metric_names: + - prefix/.* + - prefix_.* + - .*/suffix + - .*_suffix + - .*/contains/.* + - .*_contains_.* + - full/name/match + - full_name_match + regexp: + cacheenabled: true + cachemaxnumentries: 10 + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [filter] + exporters: [exampleexporter] + metrics: + receivers: [examplereceiver] + processors: [filter] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/filterprocessor/testdata/config_strict.yaml b/internal/otel_collector/processor/filterprocessor/testdata/config_strict.yaml new file mode 100644 index 00000000000..5eddd3b6cdf --- /dev/null +++ b/internal/otel_collector/processor/filterprocessor/testdata/config_strict.yaml @@ -0,0 +1,51 @@ +receivers: + examplereceiver: + +processors: + filter/empty: + metrics: + include: + match_type: strict + filter/include: + metrics: + # any names NOT matching filters are excluded from remainder of pipeline + include: + match_type: strict + metric_names: + - hello_world + - hello/world + filter/exclude: + metrics: + # any names matching filters are excluded from remainder of pipeline + exclude: + match_type: strict + metric_names: + - hello_world + - hello/world + filter/includeexclude: + metrics: + # if both include and exclude are specified, include filters are applied first + # the following configuration would only allow metrics named "hello/world" to pass through + include: + match_type: strict + metric_names: + - hello_world + - hello/world + exclude: + match_type: strict + metric_names: + - hello_world + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [filter/empty] + exporters: [exampleexporter] + metrics: + receivers: [examplereceiver] + processors: [filter/empty] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/memorylimiter/README.md b/internal/otel_collector/processor/memorylimiter/README.md new file mode 100644 index 00000000000..0b5008c5463 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/README.md @@ -0,0 +1,107 @@ +# Memory Limiter Processor + +Supported pipeline types: metrics, traces + +The memory limiter processor is used to prevent out of memory situations on +the collector. Given that the amount and type of data the collector processes is +environment specific and resource utilization of the collector is also dependent +on the configured processors, it is important to put checks in place regarding +memory usage. + +The memory_limiter processor allows to perform periodic checks of memory +usage if it exceeds defined limits will begin dropping data and forcing GC to reduce +memory consumption. + +The memory_limiter uses soft and hard memory limits. Hard limit is always above or equal +the soft limit. + +When the memory usage exceeds the soft limit the processor will start dropping the data and +return errors to the preceding component it in the pipeline (which should be normally a +receiver). + +When the memory usage is above the hard limit in addition to dropping the data the +processor will forcedly perform garbage collection in order to try to free memory. + +When the memory usage drop below the soft limit, the normal operation is resumed (data +will not longer be dropped and no forced garbage collection will be performed). + +The difference between the soft limit and hard limits is defined via `spike_limit_mib` +configuration option. The value of this option should be selected in a way that ensures +that between the memory check intervals the memory usage cannot increase by more than this +value (otherwise memory usage may exceed the hard limit - even if temporarily). +A good starting point for `spike_limit_mib` is 20% of the hard limit. Bigger +`spike_limit_mib` values may be necessary for spiky traffic or for longer check intervals. + +In addition, if the command line option `mem-ballast-size-mib` is used to specify a +ballast (see command line help for details), the same value that is provided via the +command line must also be defined in the memory_limiter processor using `ballast_size_mib` +config option. If the command line option value and config option value don't match +the behavior of the memory_limiter processor will be unpredictable. + +Note that while the processor can help mitigate out of memory situations, +it is not a replacement for properly sizing and configuring the +collector. Keep in mind that if the soft limit is crossed, the collector will +return errors to all receive operations until enough memory is freed. This will +result in dropped data. + +It is highly recommended to configure the ballast command line option as well as the +memory_limiter processor on every collector. The ballast should be configured to +be 1/3 to 1/2 of the memory allocated to the collector. The memory_limiter +processor should be the first processor defined in the pipeline (immediately after +the receivers). This is to ensure that backpressure can be sent to applicable +receivers and minimize the likelihood of dropped data when the memory_limiter gets +triggered. + +Please refer to [config.go](./config.go) for the config spec. + +The following configuration options **must be changed**: +- `check_interval` (default = 0s): Time between measurements of memory +usage. The recommended value is 1 second. +If the expected traffic to the Collector is very spiky then decrease the `check_interval` +or increase `spike_limit_mib` to avoid memory usage going over the hard limit. +- `limit_mib` (default = 0): Maximum amount of memory, in MiB, targeted to be +allocated by the process heap. Note that typically the total memory usage of +process will be about 50MiB higher than this value. This defines the hard limit. +- `spike_limit_mib` (default = 20% of `limit_mib`): Maximum spike expected between the +measurements of memory usage. The value must be less than `limit_mib`. The soft limit +value will be equal to (limit_mib - spike_limit_mib). +The recommended value for `spike_limit_mib` is about 20% `limit_mib`. +- `limit_percentage` (default = 0): Maximum amount of total memory targeted to be +allocated by the process heap. This configuration is supported on Linux systems with cgroups +and it's intended to be used in dynamic platforms like docker. +This option is used to calculate `memory_limit` from the total available memory. +For instance setting of 75% with the total memory of 1GiB will result in the limit of 750 MiB. +The fixed memory setting (`limit_mib`) takes precedence +over the percentage configuration. +- `spike_limit_percentage` (default = 0): Maximum spike expected between the +measurements of memory usage. The value must be less than `limit_percentage`. +This option is used to calculate `spike_limit_mib` from the total available memory. +For instance setting of 25% with the total memory of 1GiB will result in the spike limit of 250MiB. +This option is intended to be used only with `limit_percentage`. + +The following configuration options can also be modified: +- `ballast_size_mib` (default = 0): Must match the `mem-ballast-size-mib` +command line option. + +Examples: + +```yaml +processors: + memory_limiter: + ballast_size_mib: 2000 + check_interval: 1s + limit_mib: 4000 + spike_limit_mib: 800 +``` + +```yaml +processors: + memory_limiter: + ballast_size_mib: 2000 + check_interval: 1s + limit_percentage: 50 + spike_limit_percentage: 30 +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. diff --git a/internal/otel_collector/processor/memorylimiter/config.go b/internal/otel_collector/processor/memorylimiter/config.go new file mode 100644 index 00000000000..f750d902378 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/config.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package memorylimiter provides a processor for OpenTelemetry Service pipeline +// that drops data on the pipeline according to the current state of memory +// usage. +package memorylimiter + +import ( + "time" + + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for memory memoryLimiter processor. +type Config struct { + configmodels.ProcessorSettings `mapstructure:",squash"` + + // CheckInterval is the time between measurements of memory usage for the + // purposes of avoiding going over the limits. Defaults to zero, so no + // checks will be performed. + CheckInterval time.Duration `mapstructure:"check_interval"` + + // MemoryLimitMiB is the maximum amount of memory, in MiB, targeted to be + // allocated by the process. + MemoryLimitMiB uint32 `mapstructure:"limit_mib"` + + // MemorySpikeLimitMiB is the maximum, in MiB, spike expected between the + // measurements of memory usage. + MemorySpikeLimitMiB uint32 `mapstructure:"spike_limit_mib"` + + // BallastSizeMiB is the size, in MiB, of the ballast size being used by the + // process. + BallastSizeMiB uint32 `mapstructure:"ballast_size_mib"` + + // MemoryLimitPercentage is the maximum amount of memory, in %, targeted to be + // allocated by the process. The fixed memory settings MemoryLimitMiB has a higher precedence. + MemoryLimitPercentage uint32 `mapstructure:"limit_percentage"` + // MemorySpikePercentage is the maximum, in percents against the total memory, + // spike expected between the measurements of memory usage. + MemorySpikePercentage uint32 `mapstructure:"spike_limit_percentage"` +} + +// Name of BallastSizeMiB config option. +const ballastSizeMibKey = "ballast_size_mib" diff --git a/internal/otel_collector/processor/memorylimiter/config_test.go b/internal/otel_collector/processor/memorylimiter/config_test.go new file mode 100644 index 00000000000..5d513f0c087 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/config_test.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memorylimiter + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + require.NoError(t, err) + factory := NewFactory() + factories.Processors[typeStr] = factory + require.NoError(t, err) + + cfg, err := configtest.LoadConfigFile( + t, + path.Join(".", "testdata", "config.yaml"), + factories) + + require.Nil(t, err) + require.NotNil(t, cfg) + + p0 := cfg.Processors["memory_limiter"] + assert.Equal(t, p0, + &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "memory_limiter", + NameVal: "memory_limiter", + }, + }) + + p1 := cfg.Processors["memory_limiter/with-settings"] + assert.Equal(t, p1, + &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "memory_limiter", + NameVal: "memory_limiter/with-settings", + }, + CheckInterval: 5 * time.Second, + MemoryLimitMiB: 4000, + MemorySpikeLimitMiB: 500, + BallastSizeMiB: 2000, + }) +} diff --git a/internal/otel_collector/processor/memorylimiter/factory.go b/internal/otel_collector/processor/memorylimiter/factory.go new file mode 100644 index 00000000000..401957a42c4 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/factory.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memorylimiter + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // The value of "type" Attribute Key in configuration. + typeStr = "memory_limiter" +) + +var processorCapabilities = component.ProcessorCapabilities{MutatesConsumedData: false} + +// NewFactory returns a new factory for the Memory Limiter processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTraceProcessor), + processorhelper.WithMetrics(createMetricsProcessor), + processorhelper.WithLogs(createLogsProcessor)) +} + +// CreateDefaultConfig creates the default configuration for processor. Notice +// that the default configuration is expected to fail for this processor. +func createDefaultConfig() configmodels.Processor { + return &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createTraceProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.TracesConsumer, +) (component.TracesProcessor, error) { + ml, err := newMemoryLimiter(params.Logger, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewTraceProcessor( + cfg, + nextConsumer, + ml, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithShutdown(ml.shutdown)) +} + +func createMetricsProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.MetricsConsumer, +) (component.MetricsProcessor, error) { + ml, err := newMemoryLimiter(params.Logger, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewMetricsProcessor( + cfg, + nextConsumer, + ml, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithShutdown(ml.shutdown)) +} + +func createLogsProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.LogsConsumer, +) (component.LogsProcessor, error) { + ml, err := newMemoryLimiter(params.Logger, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewLogsProcessor( + cfg, + nextConsumer, + ml, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithShutdown(ml.shutdown)) +} diff --git a/internal/otel_collector/processor/memorylimiter/factory_test.go b/internal/otel_collector/processor/memorylimiter/factory_test.go new file mode 100644 index 00000000000..c7ee2eba04a --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/factory_test.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memorylimiter + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + require.NotNil(t, factory) + + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateProcessor(t *testing.T) { + factory := NewFactory() + require.NotNil(t, factory) + + cfg := factory.CreateDefaultConfig() + + // This processor can't be created with the default config. + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewTracesNop()) + assert.Nil(t, tp) + assert.Error(t, err, "created processor with invalid settings") + + mp, err := factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewMetricsNop()) + assert.Nil(t, mp) + assert.Error(t, err, "created processor with invalid settings") + + lp, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewLogsNop()) + assert.Nil(t, lp) + assert.Error(t, err, "created processor with invalid settings") + + // Create processor with a valid config. + pCfg := cfg.(*Config) + pCfg.MemoryLimitMiB = 5722 + pCfg.MemorySpikeLimitMiB = 1907 + pCfg.BallastSizeMiB = 2048 + pCfg.CheckInterval = 100 * time.Millisecond + + tp, err = factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewTracesNop()) + assert.NoError(t, err) + assert.NotNil(t, tp) + assert.NoError(t, tp.Shutdown(context.Background())) + + mp, err = factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewMetricsNop()) + assert.NoError(t, err) + assert.NotNil(t, mp) + assert.NoError(t, mp.Shutdown(context.Background())) + + lp, err = factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewLogsNop()) + assert.NoError(t, err) + assert.NotNil(t, lp) + assert.NoError(t, lp.Shutdown(context.Background())) +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroup.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroup.go new file mode 100644 index 00000000000..9af9e4cb90a --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroup.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strconv" +) + +// CGroup represents the data structure for a Linux control group. +type CGroup struct { + path string +} + +// NewCGroup returns a new *CGroup from a given path. +func NewCGroup(path string) *CGroup { + return &CGroup{path: path} +} + +// Path returns the path of the CGroup*. +func (cg *CGroup) Path() string { + return cg.path +} + +// ParamPath returns the path of the given cgroup param under itself. +func (cg *CGroup) ParamPath(param string) string { + return filepath.Join(cg.path, param) +} + +// readFirstLine reads the first line from a cgroup param file. +func (cg *CGroup) readFirstLine(param string) (string, error) { + paramFile, err := os.Open(cg.ParamPath(param)) + if err != nil { + return "", err + } + defer paramFile.Close() + + scanner := bufio.NewScanner(paramFile) + if scanner.Scan() { + return scanner.Text(), nil + } + if err := scanner.Err(); err != nil { + return "", err + } + return "", io.ErrUnexpectedEOF +} + +// readInt parses the first line from a cgroup param file as int. +func (cg *CGroup) readInt(param string) (int, error) { + text, err := cg.readFirstLine(param) + if err != nil { + return 0, err + } + return strconv.Atoi(text) +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroup_test.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroup_test.go new file mode 100644 index 00000000000..eae5b13788e --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroup_test.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCGroupParamPath(t *testing.T) { + cgroup := NewCGroup("/sys/fs/cgroup/cpu") + assert.Equal(t, "/sys/fs/cgroup/cpu", cgroup.Path()) + assert.Equal(t, "/sys/fs/cgroup/cpu/cpu.cfs_quota_us", cgroup.ParamPath("cpu.cfs_quota_us")) +} + +func TestCGroupReadFirstLine(t *testing.T) { + testTable := []struct { + name string + paramName string + expectedContent string + shouldHaveError bool + }{ + { + name: "cpu", + paramName: "cpu.cfs_period_us", + expectedContent: "100000", + shouldHaveError: false, + }, + { + name: "absent", + paramName: "cpu.stat", + expectedContent: "", + shouldHaveError: true, + }, + { + name: "empty", + paramName: "cpu.cfs_quota_us", + expectedContent: "", + shouldHaveError: true, + }, + } + + for _, tt := range testTable { + cgroupPath := filepath.Join(testDataCGroupsPath, tt.name) + cgroup := NewCGroup(cgroupPath) + + content, err := cgroup.readFirstLine(tt.paramName) + assert.Equal(t, tt.expectedContent, content, tt.name) + + if tt.shouldHaveError { + assert.Error(t, err, tt.name) + } else { + assert.NoError(t, err, tt.name) + } + } +} + +func TestCGroupReadInt(t *testing.T) { + testTable := []struct { + name string + paramName string + expectedValue int + shouldHaveError bool + }{ + { + name: "cpu", + paramName: "cpu.cfs_period_us", + expectedValue: 100000, + shouldHaveError: false, + }, + { + name: "empty", + paramName: "cpu.cfs_quota_us", + expectedValue: 0, + shouldHaveError: true, + }, + { + name: "invalid", + paramName: "cpu.cfs_quota_us", + expectedValue: 0, + shouldHaveError: true, + }, + { + name: "absent", + paramName: "cpu.cfs_quota_us", + expectedValue: 0, + shouldHaveError: true, + }, + } + + for _, tt := range testTable { + cgroupPath := filepath.Join(testDataCGroupsPath, tt.name) + cgroup := NewCGroup(cgroupPath) + + value, err := cgroup.readInt(tt.paramName) + assert.Equal(t, tt.expectedValue, value, "%s/%s", tt.name, tt.paramName) + + if tt.shouldHaveError { + assert.Error(t, err, tt.name) + } else { + assert.NoError(t, err, tt.name) + } + } +} + +func TestCGroupMemory(t *testing.T) { + process, err := NewCGroupsForCurrentProcess() + require.NoError(t, err) + quota, b, err := process.MemoryQuota() + require.True(t, b) + require.NoError(t, err) + fmt.Println(quota) +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups.go new file mode 100644 index 00000000000..27adbef7353 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups.go @@ -0,0 +1,122 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +const ( + // _cgroupFSType is the Linux CGroup file system type used in + // `/proc/$PID/mountinfo`. + _cgroupFSType = "cgroup" + // _cgroupSubsysCPU is the CPU CGroup subsystem. + _cgroupSubsysCPU = "cpu" + // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem. + _cgroupSubsysCPUAcct = "cpuacct" + // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem. + _cgroupSubsysCPUSet = "cpuset" + // _cgroupSubsysMemory is the Memory CGroup subsystem. + _cgroupSubsysMemory = "memory" + + _cgroupMemoryLimitBytes = "memory.limit_in_bytes" +) + +const ( + _procPathCGroup = "/proc/self/cgroup" + _procPathMountInfo = "/proc/self/mountinfo" +) + +// CGroups is a map that associates each CGroup with its subsystem name. +type CGroups map[string]*CGroup + +// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files +// under for some process under `/proc` file system (see also proc(5) for more +// information). +func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) { + cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + cgroups := make(CGroups) + newMountPoint := func(mp *MountPoint) error { + if mp.FSType != _cgroupFSType { + return nil + } + + for _, opt := range mp.SuperOptions { + subsys, exists := cgroupSubsystems[opt] + if !exists { + continue + } + + cgroupPath, err := mp.Translate(subsys.Name) + if err != nil { + return err + } + cgroups[opt] = NewCGroup(cgroupPath) + } + + return nil + } + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return nil, err + } + return cgroups, nil +} + +// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current +// process. +func NewCGroupsForCurrentProcess() (CGroups, error) { + return NewCGroups(_procPathMountInfo, _procPathCGroup) +} + +// MemoryQuota returns the total memory a +// It is a result of `memory.limit_in_bytes`. If the value of +// `memory.limit_in_bytes` was not set (-1), the method returns `(-1, false, nil)`. +func (cg CGroups) MemoryQuota() (int64, bool, error) { + cpuCGroup, exists := cg[_cgroupSubsysMemory] + if !exists { + return -1, false, nil + } + + memLimitBytes, err := cpuCGroup.readInt(_cgroupMemoryLimitBytes) + if defined := memLimitBytes > 0; err != nil || !defined { + return -1, defined, err + } + return int64(memLimitBytes), true, nil +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups_test.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups_test.go new file mode 100644 index 00000000000..8f81db70aca --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups_test.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewCGroups(t *testing.T) { + cgroupsProcCGroupPath := filepath.Join(testDataProcPath, "cgroups", "cgroup") + cgroupsProcMountInfoPath := filepath.Join(testDataProcPath, "cgroups", "mountinfo") + + testTable := []struct { + subsys string + path string + }{ + {_cgroupSubsysCPU, "/sys/fs/cgroup/cpu,cpuacct"}, + {_cgroupSubsysCPUAcct, "/sys/fs/cgroup/cpu,cpuacct"}, + {_cgroupSubsysCPUSet, "/sys/fs/cgroup/cpuset"}, + {_cgroupSubsysMemory, "/sys/fs/cgroup/memory/large"}, + } + + cgroups, err := NewCGroups(cgroupsProcMountInfoPath, cgroupsProcCGroupPath) + assert.Equal(t, len(testTable), len(cgroups)) + assert.NoError(t, err) + + for _, tt := range testTable { + cgroup, exists := cgroups[tt.subsys] + assert.True(t, exists, "%q expected to present in `cgroups`", tt.subsys) + assert.Equal(t, tt.path, cgroup.path, "%q expected for `cgroups[%q].path`, got %q", tt.path, tt.subsys, cgroup.path) + } +} + +func TestNewCGroupsWithErrors(t *testing.T) { + testTable := []struct { + mountInfoPath string + cgroupPath string + }{ + {"non-existing-file", "/dev/null"}, + {"/dev/null", "non-existing-file"}, + { + "/dev/null", + filepath.Join(testDataProcPath, "invalid-cgroup", "cgroup"), + }, + { + filepath.Join(testDataProcPath, "invalid-mountinfo", "mountinfo"), + "/dev/null", + }, + { + filepath.Join(testDataProcPath, "untranslatable", "mountinfo"), + filepath.Join(testDataProcPath, "untranslatable", "cgroup"), + }, + } + + for _, tt := range testTable { + cgroups, err := NewCGroups(tt.mountInfoPath, tt.cgroupPath) + assert.Nil(t, cgroups) + assert.Error(t, err) + } +} + +func TestCGroupsCPUQuota(t *testing.T) { + testTable := []struct { + name string + expectedQuota int64 + expectedDefined bool + shouldHaveError bool + }{ + { + name: "undefined", + expectedQuota: int64(-1.0), + expectedDefined: false, + shouldHaveError: false, + }, + } + + cgroups := make(CGroups) + + quota, defined, err := cgroups.MemoryQuota() + assert.Equal(t, int64(-1), quota, "nonexistent") + assert.False(t, defined, "nonexistent") + assert.NoError(t, err, "nonexistent") + + for _, tt := range testTable { + cgroupPath := filepath.Join(testDataCGroupsPath, tt.name) + cgroups[_cgroupSubsysCPU] = NewCGroup(cgroupPath) + + quota, defined, err := cgroups.MemoryQuota() + assert.Equal(t, tt.expectedQuota, quota, tt.name) + assert.Equal(t, tt.expectedDefined, defined, tt.name) + + if tt.shouldHaveError { + assert.Error(t, err, tt.name) + } else { + assert.NoError(t, err, tt.name) + } + } +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/doc.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/doc.go new file mode 100644 index 00000000000..ed5c98c7ec0 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/doc.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package cgroups provides utilities to access Linux control group (CGroups) +// parameters (total memory, for example) for a given process. +// The original implementation is taken from https://github.com/uber-go/automaxprocs +package cgroups diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/errors.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/errors.go new file mode 100644 index 00000000000..d9681758464 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/errors.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import "fmt" + +type cgroupSubsysFormatInvalidError struct { + line string +} + +type mountPointFormatInvalidError struct { + line string +} + +type pathNotExposedFromMountPointError struct { + mountPoint string + root string + path string +} + +func (err cgroupSubsysFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line) +} + +func (err mountPointFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for MountPoint: %q", err.line) +} + +func (err pathNotExposedFromMountPointError) Error() string { + return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint) +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/mountpoint.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/mountpoint.go new file mode 100644 index 00000000000..1fe78f43316 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/mountpoint.go @@ -0,0 +1,182 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + _mountInfoSep = " " + _mountInfoOptsSep = "," + _mountInfoOptionalFieldsSep = "-" +) + +const ( + _miFieldIDMountID = iota + _miFieldIDParentID + _miFieldIDDeviceID + _miFieldIDRoot + _miFieldIDMountPoint + _miFieldIDOptions + _miFieldIDOptionalFields + + _miFieldCountFirstHalf +) + +const ( + _miFieldOffsetFSType = iota + _miFieldOffsetMountSource + _miFieldOffsetSuperOptions + + _miFieldCountSecondHalf +) + +const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf + +// MountPoint is the data structure for the mount points in +// `/proc/$PID/mountinfo`. See also proc(5) for more information. +type MountPoint struct { + MountID int + ParentID int + DeviceID string + Root string + MountPoint string + Options []string + OptionalFields []string + FSType string + MountSource string + SuperOptions []string +} + +// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and +// returns a new *MountPoint. +func NewMountPointFromLine(line string) (*MountPoint, error) { + fields := strings.Split(line, _mountInfoSep) + + if len(fields) < _miFieldCountMin { + return nil, mountPointFormatInvalidError{line} + } + + mountID, err := strconv.Atoi(fields[_miFieldIDMountID]) + if err != nil { + return nil, err + } + + parentID, err := strconv.Atoi(fields[_miFieldIDParentID]) + if err != nil { + return nil, err + } + + for i, field := range fields[_miFieldIDOptionalFields:] { + if field == _mountInfoOptionalFieldsSep { + fsTypeStart := _miFieldIDOptionalFields + i + 1 + + if len(fields) != fsTypeStart+_miFieldCountSecondHalf { + return nil, mountPointFormatInvalidError{line} + } + + miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart + miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart + miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart + + return &MountPoint{ + MountID: mountID, + ParentID: parentID, + DeviceID: fields[_miFieldIDDeviceID], + Root: fields[_miFieldIDRoot], + MountPoint: fields[_miFieldIDMountPoint], + Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep), + OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)], + FSType: fields[miFieldIDFSType], + MountSource: fields[miFieldIDMountSource], + SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep), + }, nil + } + } + + return nil, mountPointFormatInvalidError{line} +} + +// Translate converts an absolute path inside the *MountPoint's file system to +// the host file system path in the mount namespace the *MountPoint belongs to. +func (mp *MountPoint) Translate(absPath string) (string, error) { + relPath, err := filepath.Rel(mp.Root, absPath) + + if err != nil { + return "", err + } + if relPath == ".." || strings.HasPrefix(relPath, "../") { + return "", pathNotExposedFromMountPointError{ + mountPoint: mp.MountPoint, + root: mp.Root, + path: absPath, + } + } + + return filepath.Join(mp.MountPoint, relPath), nil +} + +// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`) +// and yields parsed *MountPoint into newMountPoint. +func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error { + mountInfoFile, err := os.Open(procPathMountInfo) + if err != nil { + return err + } + defer mountInfoFile.Close() + + scanner := bufio.NewScanner(mountInfoFile) + + for scanner.Scan() { + mountPoint, err := NewMountPointFromLine(scanner.Text()) + if err != nil { + return err + } + if err := newMountPoint(mountPoint); err != nil { + return err + } + } + + return scanner.Err() +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/mountpoint_test.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/mountpoint_test.go new file mode 100644 index 00000000000..0fd9749c5e1 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/mountpoint_test.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewMountPointFromLine(t *testing.T) { + testTable := []struct { + name string + line string + expected *MountPoint + }{ + { + name: "root", + line: "1 0 252:0 / / rw,noatime - ext4 /dev/dm-0 rw,errors=remount-ro,data=ordered", + expected: &MountPoint{ + MountID: 1, + ParentID: 0, + DeviceID: "252:0", + Root: "/", + MountPoint: "/", + Options: []string{"rw", "noatime"}, + OptionalFields: []string{}, + FSType: "ext4", + MountSource: "/dev/dm-0", + SuperOptions: []string{"rw", "errors=remount-ro", "data=ordered"}, + }, + }, + { + name: "cgroup", + line: "31 23 0:24 /docker /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime shared:1 - cgroup cgroup rw,cpu", + expected: &MountPoint{ + MountID: 31, + ParentID: 23, + DeviceID: "0:24", + Root: "/docker", + MountPoint: "/sys/fs/cgroup/cpu", + Options: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + OptionalFields: []string{"shared:1"}, + FSType: "cgroup", + MountSource: "cgroup", + SuperOptions: []string{"rw", "cpu"}, + }, + }, + } + + for _, tt := range testTable { + mountPoint, err := NewMountPointFromLine(tt.line) + assert.Equal(t, tt.expected, mountPoint, tt.name) + assert.NoError(t, err, tt.name) + } +} + +func TestNewMountPointFromLineErr(t *testing.T) { + linesWithInvalidIDs := []string{ + "invalidMountID 0 252:0 / / rw,noatime - ext4 /dev/dm-0 rw,errors=remount-ro,data=ordered", + "1 invalidParentID 252:0 / / rw,noatime - ext4 /dev/dm-0 rw,errors=remount-ro,data=ordered", + "invalidMountID invalidParentID 252:0 / / rw,noatime - ext4 /dev/dm-0 rw,errors=remount-ro,data=ordered", + } + + for i, line := range linesWithInvalidIDs { + mountPoint, err := NewMountPointFromLine(line) + assert.Nil(t, mountPoint, "[%d] %q", i, line) + assert.Error(t, err, line) + } + + linesWithInvalidFields := []string{ + "1 0 252:0 / / rw,noatime ext4 /dev/dm-0 rw,errors=remount-ro,data=ordered", + "1 0 252:0 / / rw,noatime shared:1 - ext4 /dev/dm-0", + "1 0 252:0 / / rw,noatime shared:1 ext4 - /dev/dm-0 rw,errors=remount-ro,data=ordered", + "1 0 252:0 / / rw,noatime shared:1 ext4 /dev/dm-0 rw,errors=remount-ro,data=ordered", + "random line", + } + + for i, line := range linesWithInvalidFields { + mountPoint, err := NewMountPointFromLine(line) + errExpected := mountPointFormatInvalidError{line} + + assert.Nil(t, mountPoint, "[%d] %q", i, line) + assert.Equal(t, err, errExpected, "[%d] %q", i, line) + } +} + +func TestMountPointTranslate(t *testing.T) { + line := "31 23 0:24 /docker/0123456789abcdef /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime shared:1 - cgroup cgroup rw,cpu" + cgroupMountPoint, err := NewMountPointFromLine(line) + + assert.NotNil(t, cgroupMountPoint) + assert.NoError(t, err) + + testTable := []struct { + name string + pathToTranslate string + pathTranslated string + }{ + { + name: "root", + pathToTranslate: "/docker/0123456789abcdef", + pathTranslated: "/sys/fs/cgroup/cpu", + }, + { + name: "root-with-extra-slash", + pathToTranslate: "/docker/0123456789abcdef/", + pathTranslated: "/sys/fs/cgroup/cpu", + }, + { + name: "descendant-from-root", + pathToTranslate: "/docker/0123456789abcdef/large/cpu.cfs_quota_us", + pathTranslated: "/sys/fs/cgroup/cpu/large/cpu.cfs_quota_us", + }, + } + + for _, tt := range testTable { + path, err := cgroupMountPoint.Translate(tt.pathToTranslate) + assert.Equal(t, tt.pathTranslated, path, tt.name) + assert.NoError(t, err, tt.name) + } +} + +func TestMountPointTranslateError(t *testing.T) { + line := "31 23 0:24 /docker/0123456789abcdef /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime shared:1 - cgroup cgroup rw,cpu" + cgroupMountPoint, err := NewMountPointFromLine(line) + + assert.NotNil(t, cgroupMountPoint) + assert.NoError(t, err) + + inaccessiblePaths := []string{ + "/", + "/docker", + "/docker/0123456789abcdef-let-me-hack-this-path", + "/docker/0123456789abcde/abc/../../def", + "/system.slice/docker.service", + } + + for i, path := range inaccessiblePaths { + translated, err := cgroupMountPoint.Translate(path) + errExpected := pathNotExposedFromMountPointError{ + mountPoint: cgroupMountPoint.MountPoint, + root: cgroupMountPoint.Root, + path: path, + } + + assert.Equal(t, "", translated, "inaccessiblePaths[%d] == %q", i, path) + assert.Equal(t, errExpected, err, "inaccessiblePaths[%d] == %q", i, path) + } + + relPaths := []string{ + "docker", + "docker/0123456789abcde/large", + "system.slice/docker.service", + } + + for i, path := range relPaths { + translated, err := cgroupMountPoint.Translate(path) + + assert.Equal(t, "", translated, "relPaths[%d] == %q", i, path) + assert.Error(t, err, path) + } +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/subsys.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/subsys.go new file mode 100644 index 00000000000..b92119c501e --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/subsys.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +const ( + _cgroupSep = ":" + _cgroupSubsysSep = "," +) + +const ( + _csFieldIDID = iota + _csFieldIDSubsystems + _csFieldIDName + _csFieldCount +) + +// CGroupSubsys represents the data structure for entities in +// `/proc/$PID/cgroup`. See also proc(5) for more information. +type CGroupSubsys struct { + ID int + Subsystems []string + Name string +} + +// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in +// the format of `/proc/$PID/cgroup` +func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) { + fields := strings.Split(line, _cgroupSep) + + if len(fields) != _csFieldCount { + return nil, cgroupSubsysFormatInvalidError{line} + } + + id, err := strconv.Atoi(fields[_csFieldIDID]) + if err != nil { + return nil, err + } + + cgroup := &CGroupSubsys{ + ID: id, + Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep), + Name: fields[_csFieldIDName], + } + + return cgroup, nil +} + +// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`) +// and returns a new map[string]*CGroupSubsys. +func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) { + cgroupFile, err := os.Open(procPathCGroup) + if err != nil { + return nil, err + } + defer cgroupFile.Close() + + scanner := bufio.NewScanner(cgroupFile) + subsystems := make(map[string]*CGroupSubsys) + + for scanner.Scan() { + cgroup, err := NewCGroupSubsysFromLine(scanner.Text()) + if err != nil { + return nil, err + } + for _, subsys := range cgroup.Subsystems { + subsystems[subsys] = cgroup + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return subsystems, nil +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/subsys_test.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/subsys_test.go new file mode 100644 index 00000000000..550374209a1 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/subsys_test.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewCGroupSubsysFromLine(t *testing.T) { + testTable := []struct { + name string + line string + expectedSubsys *CGroupSubsys + }{ + { + name: "single-subsys", + line: "1:cpu:/", + expectedSubsys: &CGroupSubsys{ + ID: 1, + Subsystems: []string{"cpu"}, + Name: "/", + }, + }, + { + name: "multi-subsys", + line: "8:cpu,cpuacct,cpuset:/docker/1234567890abcdef", + expectedSubsys: &CGroupSubsys{ + ID: 8, + Subsystems: []string{"cpu", "cpuacct", "cpuset"}, + Name: "/docker/1234567890abcdef", + }, + }, + } + + for _, tt := range testTable { + subsys, err := NewCGroupSubsysFromLine(tt.line) + assert.Equal(t, tt.expectedSubsys, subsys, tt.name) + assert.NoError(t, err, tt.name) + } +} + +func TestNewCGroupSubsysFromLineErr(t *testing.T) { + lines := []string{ + "1:cpu", + "1:cpu,cpuacct:/:/necessary-field", + "not-a-number:cpu:/", + } + _, parseError := strconv.Atoi("not-a-number") + + testTable := []struct { + name string + line string + expectedError error + }{ + { + name: "fewer-fields", + line: lines[0], + expectedError: cgroupSubsysFormatInvalidError{lines[0]}, + }, + { + name: "more-fields", + line: lines[1], + expectedError: cgroupSubsysFormatInvalidError{lines[1]}, + }, + { + name: "illegal-id", + line: lines[2], + expectedError: parseError, + }, + } + + for _, tt := range testTable { + subsys, err := NewCGroupSubsysFromLine(tt.line) + assert.Nil(t, subsys, tt.name) + assert.Equal(t, tt.expectedError, err, tt.name) + } +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us new file mode 100644 index 00000000000..f7393e847d3 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us @@ -0,0 +1 @@ +100000 diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us new file mode 100644 index 00000000000..26f3b3ddf28 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us @@ -0,0 +1 @@ +600000 diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us new file mode 100644 index 00000000000..e69de29bb2d diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us new file mode 100644 index 00000000000..f43dfb15698 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us @@ -0,0 +1 @@ +non-an-integer diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us new file mode 100644 index 00000000000..959e88a89af --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us @@ -0,0 +1 @@ +800000 diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us new file mode 100644 index 00000000000..f7393e847d3 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us @@ -0,0 +1 @@ +100000 diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us new file mode 100644 index 00000000000..3a2e3f4984a --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us @@ -0,0 +1 @@ +-1 diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/cgroup b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/cgroup new file mode 100644 index 00000000000..1724dc83892 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/cgroup @@ -0,0 +1,3 @@ +3:memory:/docker/large +2:cpu,cpuacct:/docker +1:cpuset:/ diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/mountinfo b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/mountinfo new file mode 100644 index 00000000000..e68af08a576 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/mountinfo @@ -0,0 +1,8 @@ +1 0 8:1 / / rw,noatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro,data=reordered +2 1 0:1 / /dev rw,relatime shared:2 - devtmpfs udev rw,size=10240k,nr_inodes=16487629,mode=755 +3 1 0:2 / /proc rw,nosuid,nodev,noexec,relatime shared:3 - proc proc rw +4 1 0:3 / /sys rw,nosuid,nodev,noexec,relatime shared:4 - sysfs sysfs rw +5 4 0:4 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:5 - tmpfs tmpfs ro,mode=755 +6 5 0:5 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:6 - cgroup cgroup rw,cpuset +7 5 0:6 /docker /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:7 - cgroup cgroup rw,cpu,cpuacct +8 5 0:7 /docker /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:8 - cgroup cgroup rw,memory diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-cgroup/cgroup b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-cgroup/cgroup new file mode 100644 index 00000000000..6d9b22bd764 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-cgroup/cgroup @@ -0,0 +1,2 @@ +1:cpu:/cpu +invalid-line: diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo new file mode 100644 index 00000000000..3c8dabe4c91 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo @@ -0,0 +1 @@ +1 0 8:1 / / rw,noatime shared:1 - ext4 /dev/sda1 diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/cgroup b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/cgroup new file mode 100644 index 00000000000..44519662184 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/cgroup @@ -0,0 +1,2 @@ +1:cpu:/docker +2:cpuacct:/docker diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/mountinfo b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/mountinfo new file mode 100644 index 00000000000..245daae6eb4 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/mountinfo @@ -0,0 +1,2 @@ +31 23 0:24 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime shared:1 - cgroup cgroup rw,cpu +32 23 0:25 /docker/0123456789abcdef /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime shared:2 - cgroup cgroup rw,cpuacct diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/util_test.go b/internal/otel_collector/processor/memorylimiter/internal/cgroups/util_test.go new file mode 100644 index 00000000000..f4461055acb --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/cgroups/util_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "os" + "path/filepath" +) + +var ( + pwd = mustGetWd() + testDataPath = filepath.Join(pwd, "testdata") + testDataCGroupsPath = filepath.Join(testDataPath, "cgroups") + testDataProcPath = filepath.Join(testDataPath, "proc") +) + +func mustGetWd() string { + pwd, err := os.Getwd() + if err != nil { + panic(err) + } + return pwd +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux.go b/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux.go new file mode 100644 index 00000000000..611f4a0fac1 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package iruntime + +import "go.opentelemetry.io/collector/processor/memorylimiter/internal/cgroups" + +// TotalMemory returns total available memory. +// This implementation is meant for linux and uses cgroups to determine available memory. +func TotalMemory() (int64, error) { + cgroups, err := cgroups.NewCGroupsForCurrentProcess() + if err != nil { + return 0, err + } + memoryQuota, defined, err := cgroups.MemoryQuota() + if err != nil || !defined { + return 0, err + } + return memoryQuota, nil +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux_test.go b/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux_test.go new file mode 100644 index 00000000000..ead751ae048 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux_test.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package iruntime + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTotalMemory(t *testing.T) { + totalMemory, err := TotalMemory() + require.NoError(t, err) + assert.True(t, totalMemory > 0) +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other.go b/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other.go new file mode 100644 index 00000000000..304efbf728e --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux + +package iruntime + +import ( + "fmt" +) + +var errTotalMemoryNotAvailable = fmt.Errorf("reading cgroups total memory is available only on linux") + +// TotalMemory returns total available memory. +// This is non-Linux version that returns -1 and errTotalMemoryNotAvailable. +func TotalMemory() (int64, error) { + return -1, errTotalMemoryNotAvailable +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other_test.go b/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other_test.go new file mode 100644 index 00000000000..6ae3a9be712 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other_test.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux + +package iruntime + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTotalMemory(t *testing.T) { + totalMemory, err := TotalMemory() + require.Error(t, err) + assert.Equal(t, int64(-1), totalMemory) +} diff --git a/internal/otel_collector/processor/memorylimiter/memorylimiter.go b/internal/otel_collector/processor/memorylimiter/memorylimiter.go new file mode 100644 index 00000000000..5954e27f038 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/memorylimiter.go @@ -0,0 +1,339 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memorylimiter + +import ( + "context" + "errors" + "fmt" + "runtime" + "sync/atomic" + "time" + + "go.opencensus.io/stats" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/memorylimiter/internal/iruntime" +) + +const ( + mibBytes = 1024 * 1024 +) + +var ( + // errForcedDrop will be returned to callers of ConsumeTraceData to indicate + // that data is being dropped due to high memory usage. + errForcedDrop = errors.New("data dropped due to high memory usage") + + // Construction errors + + errCheckIntervalOutOfRange = errors.New( + "checkInterval must be greater than zero") + + errLimitOutOfRange = errors.New( + "memAllocLimit or memoryLimitPercentage must be greater than zero") + + errMemSpikeLimitOutOfRange = errors.New( + "memSpikeLimit must be smaller than memAllocLimit") + + errPercentageLimitOutOfRange = errors.New( + "memoryLimitPercentage and memorySpikePercentage must be greater than zero and less than or equal to hundred", + ) +) + +// make it overridable by tests +var getMemoryFn = iruntime.TotalMemory + +type memoryLimiter struct { + usageChecker memUsageChecker + + memCheckWait time.Duration + ballastSize uint64 + + // forceDrop is used atomically to indicate when data should be dropped. + forceDrop int64 + + ticker *time.Ticker + + lastGCDone time.Time + + // The function to read the mem values is set as a reference to help with + // testing different values. + readMemStatsFn func(m *runtime.MemStats) + + // Fields used for logging. + procName string + logger *zap.Logger + configMismatchedLogged bool + + obsrep *obsreport.ProcessorObsReport +} + +// Minimum interval between forced GC when in soft limited mode. We don't want to +// do GCs too frequently since it is a CPU-heavy operation. +const minGCIntervalWhenSoftLimited = 10 * time.Second + +// newMemoryLimiter returns a new memorylimiter processor. +func newMemoryLimiter(logger *zap.Logger, cfg *Config) (*memoryLimiter, error) { + ballastSize := uint64(cfg.BallastSizeMiB) * mibBytes + + if cfg.CheckInterval <= 0 { + return nil, errCheckIntervalOutOfRange + } + if cfg.MemoryLimitMiB == 0 && cfg.MemoryLimitPercentage == 0 { + return nil, errLimitOutOfRange + } + + usageChecker, err := getMemUsageChecker(cfg, logger) + if err != nil { + return nil, err + } + + logger.Info("Memory limiter configured", + zap.Uint64("limit_mib", usageChecker.memAllocLimit), + zap.Uint64("spike_limit_mib", usageChecker.memSpikeLimit), + zap.Duration("check_interval", cfg.CheckInterval)) + + ml := &memoryLimiter{ + usageChecker: *usageChecker, + memCheckWait: cfg.CheckInterval, + ballastSize: ballastSize, + ticker: time.NewTicker(cfg.CheckInterval), + readMemStatsFn: runtime.ReadMemStats, + procName: cfg.Name(), + logger: logger, + obsrep: obsreport.NewProcessorObsReport(configtelemetry.GetMetricsLevelFlagValue(), cfg.Name()), + } + + ml.startMonitoring() + + return ml, nil +} + +func getMemUsageChecker(cfg *Config, logger *zap.Logger) (*memUsageChecker, error) { + memAllocLimit := uint64(cfg.MemoryLimitMiB) * mibBytes + memSpikeLimit := uint64(cfg.MemorySpikeLimitMiB) * mibBytes + if cfg.MemoryLimitMiB != 0 { + return newFixedMemUsageChecker(memAllocLimit, memSpikeLimit) + } + totalMemory, err := getMemoryFn() + if err != nil { + return nil, fmt.Errorf("failed to get total memory, use fixed memory settings (limit_mib): %w", err) + } + logger.Info("Using percentage memory limiter", + zap.Int64("total_memory", totalMemory), + zap.Uint32("limit_percentage", cfg.MemoryLimitPercentage), + zap.Uint32("spike_limit_percentage", cfg.MemorySpikePercentage)) + return newPercentageMemUsageChecker(totalMemory, int64(cfg.MemoryLimitPercentage), int64(cfg.MemorySpikePercentage)) +} + +func (ml *memoryLimiter) shutdown(context.Context) error { + ml.ticker.Stop() + return nil +} + +// ProcessTraces implements the TProcessor interface +func (ml *memoryLimiter) ProcessTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { + numSpans := td.SpanCount() + if ml.forcingDrop() { + stats.Record( + ctx, + processor.StatDroppedSpanCount.M(int64(numSpans)), + processor.StatTraceBatchesDroppedCount.M(1)) + + // TODO: actually to be 100% sure that this is "refused" and not "dropped" + // it is necessary to check the pipeline to see if this is directly connected + // to a receiver (ie.: a receiver is on the call stack). For now it + // assumes that the pipeline is properly configured and a receiver is on the + // callstack. + ml.obsrep.TracesRefused(ctx, numSpans) + + return td, errForcedDrop + } + + // Even if the next consumer returns error record the data as accepted by + // this processor. + ml.obsrep.TracesAccepted(ctx, numSpans) + return td, nil +} + +// ProcessMetrics implements the MProcessor interface +func (ml *memoryLimiter) ProcessMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { + _, numDataPoints := md.MetricAndDataPointCount() + if ml.forcingDrop() { + // TODO: actually to be 100% sure that this is "refused" and not "dropped" + // it is necessary to check the pipeline to see if this is directly connected + // to a receiver (ie.: a receiver is on the call stack). For now it + // assumes that the pipeline is properly configured and a receiver is on the + // callstack. + ml.obsrep.MetricsRefused(ctx, numDataPoints) + + return md, errForcedDrop + } + + // Even if the next consumer returns error record the data as accepted by + // this processor. + ml.obsrep.MetricsAccepted(ctx, numDataPoints) + return md, nil +} + +// ProcessLogs implements the LProcessor interface +func (ml *memoryLimiter) ProcessLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { + numRecords := ld.LogRecordCount() + if ml.forcingDrop() { + // TODO: actually to be 100% sure that this is "refused" and not "dropped" + // it is necessary to check the pipeline to see if this is directly connected + // to a receiver (ie.: a receiver is on the call stack). For now it + // assumes that the pipeline is properly configured and a receiver is on the + // callstack. + ml.obsrep.LogsRefused(ctx, numRecords) + + return ld, errForcedDrop + } + + // Even if the next consumer returns error record the data as accepted by + // this processor. + ml.obsrep.LogsAccepted(ctx, numRecords) + return ld, nil +} + +func (ml *memoryLimiter) readMemStats() *runtime.MemStats { + ms := &runtime.MemStats{} + ml.readMemStatsFn(ms) + // If proper configured ms.Alloc should be at least ml.ballastSize but since + // a misconfiguration is possible check for that here. + if ms.Alloc >= ml.ballastSize { + ms.Alloc -= ml.ballastSize + } else if !ml.configMismatchedLogged { + // This indicates misconfiguration. Log it once. + ml.configMismatchedLogged = true + ml.logger.Warn(typeStr + " is likely incorrectly configured. " + ballastSizeMibKey + + " must be set equal to --mem-ballast-size-mib command line option.") + } + + return ms +} + +// startMonitoring starts a ticker'd goroutine that will check memory usage +// every checkInterval period. +func (ml *memoryLimiter) startMonitoring() { + go func() { + for range ml.ticker.C { + ml.checkMemLimits() + } + }() +} + +// forcingDrop indicates when memory resources need to be released. +func (ml *memoryLimiter) forcingDrop() bool { + return atomic.LoadInt64(&ml.forceDrop) != 0 +} + +func (ml *memoryLimiter) setForcingDrop(b bool) { + var i int64 + if b { + i = 1 + } + atomic.StoreInt64(&ml.forceDrop, i) +} + +func memstatToZapField(ms *runtime.MemStats) zap.Field { + return zap.Uint64("cur_mem_mib", ms.Alloc/1024/1024) +} + +func (ml *memoryLimiter) doGCandReadMemStats() *runtime.MemStats { + runtime.GC() + ml.lastGCDone = time.Now() + ms := ml.readMemStats() + ml.logger.Info("Memory usage after GC.", memstatToZapField(ms)) + return ms +} + +func (ml *memoryLimiter) checkMemLimits() { + ms := ml.readMemStats() + + ml.logger.Debug("Currently used memory.", memstatToZapField(ms)) + + if ml.usageChecker.aboveHardLimit(ms) { + ml.logger.Warn("Memory usage is above hard limit. Forcing a GC.", memstatToZapField(ms)) + ms = ml.doGCandReadMemStats() + } + + // Remember current dropping state. + wasForcingDrop := ml.forcingDrop() + + // Check if the memory usage is above the soft limit. + mustForceDrop := ml.usageChecker.aboveSoftLimit(ms) + + if wasForcingDrop && !mustForceDrop { + // Was previously dropping but enough memory is available now, no need to limit. + ml.logger.Info("Memory usage back within limits. Resuming normal operation.", memstatToZapField(ms)) + } + + if !wasForcingDrop && mustForceDrop { + // We are above soft limit, do a GC if it wasn't done recently and see if + // it brings memory usage below the soft limit. + if time.Since(ml.lastGCDone) > minGCIntervalWhenSoftLimited { + ml.logger.Info("Memory usage is above soft limit. Forcing a GC.", memstatToZapField(ms)) + ms = ml.doGCandReadMemStats() + // Check the limit again to see if GC helped. + mustForceDrop = ml.usageChecker.aboveSoftLimit(ms) + } + + if mustForceDrop { + ml.logger.Warn("Memory usage is above soft limit. Dropping data.", memstatToZapField(ms)) + } + } + + ml.setForcingDrop(mustForceDrop) +} + +type memUsageChecker struct { + memAllocLimit uint64 + memSpikeLimit uint64 +} + +func (d memUsageChecker) aboveSoftLimit(ms *runtime.MemStats) bool { + return ms.Alloc >= d.memAllocLimit-d.memSpikeLimit +} + +func (d memUsageChecker) aboveHardLimit(ms *runtime.MemStats) bool { + return ms.Alloc >= d.memAllocLimit +} + +func newFixedMemUsageChecker(memAllocLimit, memSpikeLimit uint64) (*memUsageChecker, error) { + if memSpikeLimit >= memAllocLimit { + return nil, errMemSpikeLimitOutOfRange + } + if memSpikeLimit == 0 { + // If spike limit is unspecified use 20% of mem limit. + memSpikeLimit = memAllocLimit / 5 + } + return &memUsageChecker{ + memAllocLimit: memAllocLimit, + memSpikeLimit: memSpikeLimit, + }, nil +} + +func newPercentageMemUsageChecker(totalMemory int64, percentageLimit, percentageSpike int64) (*memUsageChecker, error) { + if percentageLimit > 100 || percentageLimit <= 0 || percentageSpike > 100 || percentageSpike <= 0 { + return nil, errPercentageLimitOutOfRange + } + return newFixedMemUsageChecker(uint64(percentageLimit*totalMemory)/100, uint64(percentageSpike*totalMemory)/100) +} diff --git a/internal/otel_collector/processor/memorylimiter/memorylimiter_test.go b/internal/otel_collector/processor/memorylimiter/memorylimiter_test.go new file mode 100644 index 00000000000..396038e5f9d --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/memorylimiter_test.go @@ -0,0 +1,410 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memorylimiter + +import ( + "context" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/processor/memorylimiter/internal/iruntime" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +func TestNew(t *testing.T) { + type args struct { + nextConsumer consumer.TracesConsumer + checkInterval time.Duration + memoryLimitMiB uint32 + memorySpikeLimitMiB uint32 + } + sink := new(consumertest.TracesSink) + tests := []struct { + name string + args args + wantErr error + }{ + { + name: "zero_checkInterval", + args: args{ + nextConsumer: sink, + }, + wantErr: errCheckIntervalOutOfRange, + }, + { + name: "zero_memAllocLimit", + args: args{ + nextConsumer: sink, + checkInterval: 100 * time.Millisecond, + }, + wantErr: errLimitOutOfRange, + }, + { + name: "memSpikeLimit_gt_memAllocLimit", + args: args{ + nextConsumer: sink, + checkInterval: 100 * time.Millisecond, + memoryLimitMiB: 1, + memorySpikeLimitMiB: 2, + }, + wantErr: errMemSpikeLimitOutOfRange, + }, + { + name: "success", + args: args{ + nextConsumer: sink, + checkInterval: 100 * time.Millisecond, + memoryLimitMiB: 1024, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.CheckInterval = tt.args.checkInterval + cfg.MemoryLimitMiB = tt.args.memoryLimitMiB + cfg.MemorySpikeLimitMiB = tt.args.memorySpikeLimitMiB + got, err := newMemoryLimiter(zap.NewNop(), cfg) + if err != tt.wantErr { + t.Errorf("newMemoryLimiter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != nil { + assert.NoError(t, got.shutdown(context.Background())) + } + }) + } +} + +// TestMetricsMemoryPressureResponse manipulates results from querying memory and +// check expected side effects. +func TestMetricsMemoryPressureResponse(t *testing.T) { + var currentMemAlloc uint64 + ml := &memoryLimiter{ + usageChecker: memUsageChecker{ + memAllocLimit: 1024, + }, + readMemStatsFn: func(ms *runtime.MemStats) { + ms.Alloc = currentMemAlloc + }, + obsrep: obsreport.NewProcessorObsReport(configtelemetry.LevelNone, ""), + logger: zap.NewNop(), + } + mp, err := processorhelper.NewMetricsProcessor( + &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + }, + consumertest.NewMetricsNop(), + ml, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithShutdown(ml.shutdown)) + require.NoError(t, err) + + ctx := context.Background() + md := pdata.NewMetrics() + + // Below memAllocLimit. + currentMemAlloc = 800 + ml.checkMemLimits() + assert.NoError(t, mp.ConsumeMetrics(ctx, md)) + + // Above memAllocLimit. + currentMemAlloc = 1800 + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, mp.ConsumeMetrics(ctx, md)) + + // Check ballast effect + ml.ballastSize = 1000 + + // Below memAllocLimit accounting for ballast. + currentMemAlloc = 800 + ml.ballastSize + ml.checkMemLimits() + assert.NoError(t, mp.ConsumeMetrics(ctx, md)) + + // Above memAllocLimit even accountiing for ballast. + currentMemAlloc = 1800 + ml.ballastSize + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, mp.ConsumeMetrics(ctx, md)) + + // Restore ballast to default. + ml.ballastSize = 0 + + // Check spike limit + ml.usageChecker.memSpikeLimit = 512 + + // Below memSpikeLimit. + currentMemAlloc = 500 + ml.checkMemLimits() + assert.NoError(t, mp.ConsumeMetrics(ctx, md)) + + // Above memSpikeLimit. + currentMemAlloc = 550 + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, mp.ConsumeMetrics(ctx, md)) + +} + +// TestTraceMemoryPressureResponse manipulates results from querying memory and +// check expected side effects. +func TestTraceMemoryPressureResponse(t *testing.T) { + var currentMemAlloc uint64 + ml := &memoryLimiter{ + usageChecker: memUsageChecker{ + memAllocLimit: 1024, + }, + readMemStatsFn: func(ms *runtime.MemStats) { + ms.Alloc = currentMemAlloc + }, + obsrep: obsreport.NewProcessorObsReport(configtelemetry.LevelNone, ""), + logger: zap.NewNop(), + } + tp, err := processorhelper.NewTraceProcessor( + &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + }, + consumertest.NewTracesNop(), + ml, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithShutdown(ml.shutdown)) + require.NoError(t, err) + + ctx := context.Background() + td := pdata.NewTraces() + + // Below memAllocLimit. + currentMemAlloc = 800 + ml.checkMemLimits() + assert.NoError(t, tp.ConsumeTraces(ctx, td)) + + // Above memAllocLimit. + currentMemAlloc = 1800 + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, tp.ConsumeTraces(ctx, td)) + + // Check ballast effect + ml.ballastSize = 1000 + + // Below memAllocLimit accounting for ballast. + currentMemAlloc = 800 + ml.ballastSize + ml.checkMemLimits() + assert.NoError(t, tp.ConsumeTraces(ctx, td)) + + // Above memAllocLimit even accountiing for ballast. + currentMemAlloc = 1800 + ml.ballastSize + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, tp.ConsumeTraces(ctx, td)) + + // Restore ballast to default. + ml.ballastSize = 0 + + // Check spike limit + ml.usageChecker.memSpikeLimit = 512 + + // Below memSpikeLimit. + currentMemAlloc = 500 + ml.checkMemLimits() + assert.NoError(t, tp.ConsumeTraces(ctx, td)) + + // Above memSpikeLimit. + currentMemAlloc = 550 + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, tp.ConsumeTraces(ctx, td)) + +} + +// TestLogMemoryPressureResponse manipulates results from querying memory and +// check expected side effects. +func TestLogMemoryPressureResponse(t *testing.T) { + var currentMemAlloc uint64 + ml := &memoryLimiter{ + usageChecker: memUsageChecker{ + memAllocLimit: 1024, + }, + readMemStatsFn: func(ms *runtime.MemStats) { + ms.Alloc = currentMemAlloc + }, + obsrep: obsreport.NewProcessorObsReport(configtelemetry.LevelNone, ""), + logger: zap.NewNop(), + } + lp, err := processorhelper.NewLogsProcessor( + &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + }, + consumertest.NewLogsNop(), + ml, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithShutdown(ml.shutdown)) + require.NoError(t, err) + + ctx := context.Background() + ld := pdata.NewLogs() + + // Below memAllocLimit. + currentMemAlloc = 800 + ml.checkMemLimits() + assert.NoError(t, lp.ConsumeLogs(ctx, ld)) + + // Above memAllocLimit. + currentMemAlloc = 1800 + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, lp.ConsumeLogs(ctx, ld)) + + // Check ballast effect + ml.ballastSize = 1000 + + // Below memAllocLimit accounting for ballast. + currentMemAlloc = 800 + ml.ballastSize + ml.checkMemLimits() + assert.NoError(t, lp.ConsumeLogs(ctx, ld)) + + // Above memAllocLimit even accountiing for ballast. + currentMemAlloc = 1800 + ml.ballastSize + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, lp.ConsumeLogs(ctx, ld)) + + // Restore ballast to default. + ml.ballastSize = 0 + + // Check spike limit + ml.usageChecker.memSpikeLimit = 512 + + // Below memSpikeLimit. + currentMemAlloc = 500 + ml.checkMemLimits() + assert.NoError(t, lp.ConsumeLogs(ctx, ld)) + + // Above memSpikeLimit. + currentMemAlloc = 550 + ml.checkMemLimits() + assert.Equal(t, errForcedDrop, lp.ConsumeLogs(ctx, ld)) +} + +func TestGetDecision(t *testing.T) { + t.Run("fixed_limit", func(t *testing.T) { + d, err := getMemUsageChecker(&Config{MemoryLimitMiB: 100, MemorySpikeLimitMiB: 20}, zap.NewNop()) + require.NoError(t, err) + assert.Equal(t, &memUsageChecker{ + memAllocLimit: 100 * mibBytes, + memSpikeLimit: 20 * mibBytes, + }, d) + }) + t.Run("fixed_limit_error", func(t *testing.T) { + d, err := getMemUsageChecker(&Config{MemoryLimitMiB: 20, MemorySpikeLimitMiB: 100}, zap.NewNop()) + require.Error(t, err) + assert.Nil(t, d) + }) + + t.Cleanup(func() { + getMemoryFn = iruntime.TotalMemory + }) + getMemoryFn = func() (int64, error) { + return 100 * mibBytes, nil + } + t.Run("percentage_limit", func(t *testing.T) { + d, err := getMemUsageChecker(&Config{MemoryLimitPercentage: 50, MemorySpikePercentage: 10}, zap.NewNop()) + require.NoError(t, err) + assert.Equal(t, &memUsageChecker{ + memAllocLimit: 50 * mibBytes, + memSpikeLimit: 10 * mibBytes, + }, d) + }) + t.Run("percentage_limit_error", func(t *testing.T) { + d, err := getMemUsageChecker(&Config{MemoryLimitPercentage: 101, MemorySpikePercentage: 10}, zap.NewNop()) + require.Error(t, err) + assert.Nil(t, d) + d, err = getMemUsageChecker(&Config{MemoryLimitPercentage: 99, MemorySpikePercentage: 101}, zap.NewNop()) + require.Error(t, err) + assert.Nil(t, d) + }) +} + +func TestDropDecision(t *testing.T) { + decison1000Limit30Spike30, err := newPercentageMemUsageChecker(1000, 60, 30) + require.NoError(t, err) + decison1000Limit60Spike50, err := newPercentageMemUsageChecker(1000, 60, 50) + require.NoError(t, err) + decison1000Limit40Spike20, err := newPercentageMemUsageChecker(1000, 40, 20) + require.NoError(t, err) + decison1000Limit40Spike60, err := newPercentageMemUsageChecker(1000, 40, 60) + require.Error(t, err) + assert.Nil(t, decison1000Limit40Spike60) + + tests := []struct { + name string + usageChecker memUsageChecker + ms *runtime.MemStats + shouldDrop bool + }{ + { + name: "should drop over limit", + usageChecker: *decison1000Limit30Spike30, + ms: &runtime.MemStats{Alloc: 600}, + shouldDrop: true, + }, + { + name: "should not drop", + usageChecker: *decison1000Limit30Spike30, + ms: &runtime.MemStats{Alloc: 100}, + shouldDrop: false, + }, + { + name: "should not drop spike, fixed usageChecker", + usageChecker: memUsageChecker{ + memAllocLimit: 600, + memSpikeLimit: 500, + }, + ms: &runtime.MemStats{Alloc: 300}, + shouldDrop: true, + }, + { + name: "should drop, spike, percentage usageChecker", + usageChecker: *decison1000Limit60Spike50, + ms: &runtime.MemStats{Alloc: 300}, + shouldDrop: true, + }, + { + name: "should drop, spike, percentage usageChecker", + usageChecker: *decison1000Limit40Spike20, + ms: &runtime.MemStats{Alloc: 250}, + shouldDrop: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + shouldDrop := test.usageChecker.aboveSoftLimit(test.ms) + assert.Equal(t, test.shouldDrop, shouldDrop) + }) + } +} diff --git a/internal/otel_collector/processor/memorylimiter/testdata/config.yaml b/internal/otel_collector/processor/memorylimiter/testdata/config.yaml new file mode 100644 index 00000000000..5f6504ba93c --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/testdata/config.yaml @@ -0,0 +1,36 @@ +receivers: + examplereceiver: + +processors: + memory_limiter: + # empty config + + memory_limiter/with-settings: + # check_interval is the time between measurements of memory usage for the + # purposes of avoiding going over the limits. Defaults to zero, so no + # checks will be performed. Values below 1 second are not recommended since + # it can result in unnecessary CPU consumption. + check_interval: 5s + + # Maximum amount of memory, in MiB, targeted to be allocated by the process heap. + # Note that typically the total memory usage of process will be about 50MiB higher + # than this value. + limit_mib: 4000 + + # The maximum, in MiB, spike expected between the measurements of memory usage. + spike_limit_mib: 500 + + # BallastSizeMiB is the size, in MiB, of the ballast size being used by the process. + # This must match the value of mem-ballast-size-mib command line option (if used) + # otherwise the memory limiter will not work correctly. + ballast_size_mib: 2000 + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [memory_limiter/with-settings] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/metrics.go b/internal/otel_collector/processor/metrics.go new file mode 100644 index 00000000000..faf78477544 --- /dev/null +++ b/internal/otel_collector/processor/metrics.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processor + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/translator/conventions" +) + +// Keys and stats for telemetry. +var ( + TagServiceNameKey, _ = tag.NewKey("service") + TagProcessorNameKey, _ = tag.NewKey(obsreport.ProcessorKey) + + StatReceivedSpanCount = stats.Int64( + "spans_received", + "counts the number of spans received", + stats.UnitDimensionless) + StatDroppedSpanCount = stats.Int64( + "spans_dropped", + "counts the number of spans dropped", + stats.UnitDimensionless) + + StatTraceBatchesDroppedCount = stats.Int64( + "trace_batches_dropped", + "counts the number of trace batches dropped", + stats.UnitDimensionless) +) + +// SpanCountStats represents span count stats grouped by service if DETAILED telemetry level is set, +// otherwise only overall span count is stored in serviceSpansCounts. +type SpanCountStats struct { + serviceSpansCounts map[string]int + allSpansCount int + isDetailed bool +} + +func NewSpanCountStats(td pdata.Traces) *SpanCountStats { + scm := &SpanCountStats{ + allSpansCount: td.SpanCount(), + } + if serviceTagsEnabled() { + scm.serviceSpansCounts = spanCountByResourceStringAttribute(td, conventions.AttributeServiceName) + scm.isDetailed = true + } + return scm +} + +func (scm *SpanCountStats) GetAllSpansCount() int { + return scm.allSpansCount +} + +// MetricTagKeys returns the metric tag keys according to the given telemetry level. +func MetricTagKeys() []tag.Key { + return []tag.Key{ + TagProcessorNameKey, + TagServiceNameKey, + } +} + +// MetricViews return the metrics views according to given telemetry level. +func MetricViews() []*view.View { + tagKeys := MetricTagKeys() + // There are some metrics enabled, return the views. + receivedBatchesView := &view.View{ + Name: "batches_received", + Measure: StatReceivedSpanCount, + Description: "The number of span batches received.", + TagKeys: tagKeys, + Aggregation: view.Count(), + } + droppedBatchesView := &view.View{ + Measure: StatTraceBatchesDroppedCount, + Description: "The number of span batches dropped.", + TagKeys: tagKeys, + Aggregation: view.Sum(), + } + receivedSpansView := &view.View{ + Name: StatReceivedSpanCount.Name(), + Measure: StatReceivedSpanCount, + Description: "The number of spans received.", + TagKeys: tagKeys, + Aggregation: view.Sum(), + } + droppedSpansView := &view.View{ + Name: StatDroppedSpanCount.Name(), + Measure: StatDroppedSpanCount, + Description: "The number of spans dropped.", + TagKeys: tagKeys, + Aggregation: view.Sum(), + } + + legacyViews := []*view.View{ + receivedBatchesView, + droppedBatchesView, + receivedSpansView, + droppedSpansView, + } + + return obsreport.ProcessorMetricViews("", legacyViews) +} + +// RecordsSpanCountMetrics reports span count metrics for specified measure. +func RecordsSpanCountMetrics(ctx context.Context, scm *SpanCountStats, measure *stats.Int64Measure) { + if scm.isDetailed { + for serviceName, spanCount := range scm.serviceSpansCounts { + statsTags := []tag.Mutator{tag.Insert(TagServiceNameKey, serviceName)} + _ = stats.RecordWithTags(ctx, statsTags, measure.M(int64(spanCount))) + } + return + } + + stats.Record(ctx, measure.M(int64(scm.allSpansCount))) +} + +func serviceTagsEnabled() bool { + level := configtelemetry.GetMetricsLevelFlagValue() + return level == configtelemetry.LevelDetailed +} + +// spanCountByResourceStringAttribute calculates the number of spans by resource specified by +// provided string attribute attrKey. +func spanCountByResourceStringAttribute(td pdata.Traces, attrKey string) map[string]int { + spanCounts := make(map[string]int) + + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + var attrStringVal string + if attrVal, ok := rs.Resource().Attributes().Get(attrKey); ok { + attrStringVal = attrVal.StringVal() + } + ilss := rs.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + spanCounts[attrStringVal] += ilss.At(j).Spans().Len() + } + } + return spanCounts +} diff --git a/internal/otel_collector/processor/metrics_test.go b/internal/otel_collector/processor/metrics_test.go new file mode 100644 index 00000000000..a3c45b677cf --- /dev/null +++ b/internal/otel_collector/processor/metrics_test.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processor + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestSpanCountByResourceStringAttribute(t *testing.T) { + td := testdata.GenerateTraceDataEmpty() + require.EqualValues(t, 0, len(spanCountByResourceStringAttribute(td, "resource-attr"))) + + td = testdata.GenerateTraceDataOneSpan() + spanCounts := spanCountByResourceStringAttribute(td, "resource-attr") + require.EqualValues(t, 1, len(spanCounts)) + require.EqualValues(t, 1, spanCounts["resource-attr-val-1"]) + + td = testdata.GenerateTraceDataTwoSpansSameResource() + spanCounts = spanCountByResourceStringAttribute(td, "resource-attr") + require.EqualValues(t, 1, len(spanCounts)) + require.EqualValues(t, 2, spanCounts["resource-attr-val-1"]) + + td = testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent() + spanCounts = spanCountByResourceStringAttribute(td, "resource-attr") + require.EqualValues(t, 2, len(spanCounts)) + require.EqualValues(t, 2, spanCounts["resource-attr-val-1"]) + require.EqualValues(t, 1, spanCounts["resource-attr-val-2"]) +} diff --git a/internal/otel_collector/processor/processorhelper/attraction.go b/internal/otel_collector/processor/processorhelper/attraction.go new file mode 100644 index 00000000000..87961240dc5 --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/attraction.go @@ -0,0 +1,283 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "fmt" + "regexp" + "strings" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterhelper" +) + +// Settings +type Settings struct { + // Actions specifies the list of attributes to act on. + // The set of actions are {INSERT, UPDATE, UPSERT, DELETE, HASH, EXTRACT}. + // This is a required field. + Actions []ActionKeyValue `mapstructure:"actions"` +} + +// ActionKeyValue specifies the attribute key to act upon. +type ActionKeyValue struct { + // Key specifies the attribute to act upon. + // This is a required field. + Key string `mapstructure:"key"` + + // Value specifies the value to populate for the key. + // The type of the value is inferred from the configuration. + Value interface{} `mapstructure:"value"` + + // A regex pattern must be specified for the action EXTRACT. + // It uses the attribute specified by `key' to extract values from + // The target keys are inferred based on the names of the matcher groups + // provided and the names will be inferred based on the values of the + // matcher group. + // Note: All subexpressions must have a name. + // Note: The value type of the source key must be a string. If it isn't, + // no extraction will occur. + RegexPattern string `mapstructure:"pattern"` + + // FromAttribute specifies the attribute to use to populate + // the value. If the attribute doesn't exist, no action is performed. + FromAttribute string `mapstructure:"from_attribute"` + + // Action specifies the type of action to perform. + // The set of values are {INSERT, UPDATE, UPSERT, DELETE, HASH}. + // Both lower case and upper case are supported. + // INSERT - Inserts the key/value to attributes when the key does not exist. + // No action is applied to attributes where the key already exists. + // Either Value or FromAttribute must be set. + // UPDATE - Updates an existing key with a value. No action is applied + // to attributes where the key does not exist. + // Either Value or FromAttribute must be set. + // UPSERT - Performs insert or update action depending on the attributes + // containing the key. The key/value is insert to attributes + // that did not originally have the key. The key/value is updated + // for attributes where the key already existed. + // Either Value or FromAttribute must be set. + // DELETE - Deletes the attribute. If the key doesn't exist, + // no action is performed. + // HASH - Calculates the SHA-1 hash of an existing value and overwrites the + // value with it's SHA-1 hash result. + // EXTRACT - Extracts values using a regular expression rule from the input + // 'key' to target keys specified in the 'rule'. If a target key + // already exists, it will be overridden. + // This is a required field. + Action Action `mapstructure:"action"` +} + +// Action is the enum to capture the four types of actions to perform on an +// attribute. +type Action string + +const ( + // INSERT adds the key/value to attributes when the key does not exist. + // No action is applied to attributes where the key already exists. + INSERT Action = "insert" + + // UPDATE updates an existing key with a value. No action is applied + // to attributes where the key does not exist. + UPDATE Action = "update" + + // UPSERT performs the INSERT or UPDATE action. The key/value is + // insert to attributes that did not originally have the key. The key/value is + // updated for attributes where the key already existed. + UPSERT Action = "upsert" + + // DELETE deletes the attribute. If the key doesn't exist, no action is performed. + DELETE Action = "delete" + + // HASH calculates the SHA-1 hash of an existing value and overwrites the + // value with it's SHA-1 hash result. + HASH Action = "hash" + + // EXTRACT extracts values using a regular expression rule from the input + // 'key' to target keys specified in the 'rule'. If a target key already + // exists, it will be overridden. + EXTRACT Action = "extract" +) + +type attributeAction struct { + Key string + FromAttribute string + // Compiled regex if provided + Regex *regexp.Regexp + // Attribute names extracted from the regexp's subexpressions. + AttrNames []string + // Number of non empty strings in above array + + // TODO https://go.opentelemetry.io/collector/issues/296 + // Do benchmark testing between having action be of type string vs integer. + // The reason is attributes processor will most likely be commonly used + // and could impact performance. + Action Action + AttributeValue *pdata.AttributeValue +} + +type AttrProc struct { + actions []attributeAction +} + +// NewAttrProc validates that the input configuration has all of the required fields for the processor +// and returns a AttrProc to be used to process attributes. +// An error is returned if there are any invalid inputs. +func NewAttrProc(settings *Settings) (*AttrProc, error) { + var attributeActions []attributeAction + for i, a := range settings.Actions { + // `key` is a required field + if a.Key == "" { + return nil, fmt.Errorf("error creating AttrProc due to missing required field \"key\" at the %d-th actions", i) + } + + // Convert `action` to lowercase for comparison. + a.Action = Action(strings.ToLower(string(a.Action))) + action := attributeAction{ + Key: a.Key, + Action: a.Action, + } + + switch a.Action { + case INSERT, UPDATE, UPSERT: + if a.Value == nil && a.FromAttribute == "" { + return nil, fmt.Errorf("error creating AttrProc. Either field \"value\" or \"from_attribute\" setting must be specified for %d-th action", i) + } + + if a.Value != nil && a.FromAttribute != "" { + return nil, fmt.Errorf("error creating AttrProc due to both fields \"value\" and \"from_attribute\" being set at the %d-th actions", i) + } + if a.RegexPattern != "" { + return nil, fmt.Errorf("error creating AttrProc. Action \"%s\" does not use the \"pattern\" field. This must not be specified for %d-th action", a.Action, i) + + } + // Convert the raw value from the configuration to the internal trace representation of the value. + if a.Value != nil { + val, err := filterhelper.NewAttributeValueRaw(a.Value) + if err != nil { + return nil, err + } + action.AttributeValue = &val + } else { + action.FromAttribute = a.FromAttribute + } + case HASH, DELETE: + if a.Value != nil || a.FromAttribute != "" || a.RegexPattern != "" { + return nil, fmt.Errorf("error creating AttrProc. Action \"%s\" does not use \"value\", \"pattern\" or \"from_attribute\" field. These must not be specified for %d-th action", a.Action, i) + } + case EXTRACT: + if a.Value != nil || a.FromAttribute != "" { + return nil, fmt.Errorf("error creating AttrProc. Action \"%s\" does not use \"value\" or \"from_attribute\" field. These must not be specified for %d-th action", a.Action, i) + } + if a.RegexPattern == "" { + return nil, fmt.Errorf("error creating AttrProc due to missing required field \"pattern\" for action \"%s\" at the %d-th action", a.Action, i) + + } + re, err := regexp.Compile(a.RegexPattern) + if err != nil { + return nil, fmt.Errorf("error creating AttrProc. Field \"pattern\" has invalid pattern: \"%s\" to be set at the %d-th actions", a.RegexPattern, i) + } + attrNames := re.SubexpNames() + if len(attrNames) <= 1 { + return nil, fmt.Errorf("error creating AttrProc. Field \"pattern\" contains no named matcher groups at the %d-th actions", i) + } + + for subExpIndex := 1; subExpIndex < len(attrNames); subExpIndex++ { + if attrNames[subExpIndex] == "" { + return nil, fmt.Errorf("error creating AttrProc. Field \"pattern\" contains at least one unnamed matcher group at the %d-th actions", i) + } + } + action.Regex = re + action.AttrNames = attrNames + default: + return nil, fmt.Errorf("error creating AttrProc due to unsupported action %q at the %d-th actions", a.Action, i) + } + + attributeActions = append(attributeActions, action) + } + return &AttrProc{actions: attributeActions}, nil +} + +func (ap *AttrProc) Process(attrs pdata.AttributeMap) { + for _, action := range ap.actions { + // TODO https://go.opentelemetry.io/collector/issues/296 + // Do benchmark testing between having action be of type string vs integer. + // The reason is attributes processor will most likely be commonly used + // and could impact performance. + switch action.Action { + case DELETE: + attrs.Delete(action.Key) + case INSERT: + av, found := getSourceAttributeValue(action, attrs) + if !found { + continue + } + attrs.Insert(action.Key, av) + case UPDATE: + av, found := getSourceAttributeValue(action, attrs) + if !found { + continue + } + attrs.Update(action.Key, av) + case UPSERT: + av, found := getSourceAttributeValue(action, attrs) + if !found { + continue + } + attrs.Upsert(action.Key, av) + case HASH: + hashAttribute(action, attrs) + case EXTRACT: + extractAttributes(action, attrs) + } + } +} + +func getSourceAttributeValue(action attributeAction, attrs pdata.AttributeMap) (pdata.AttributeValue, bool) { + // Set the key with a value from the configuration. + if action.AttributeValue != nil { + return *action.AttributeValue, true + } + + return attrs.Get(action.FromAttribute) +} + +func hashAttribute(action attributeAction, attrs pdata.AttributeMap) { + if value, exists := attrs.Get(action.Key); exists { + sha1Hasher(value) + } +} + +func extractAttributes(action attributeAction, attrs pdata.AttributeMap) { + value, found := attrs.Get(action.Key) + + // Extracting values only functions on strings. + if !found || value.Type() != pdata.AttributeValueSTRING { + return + } + + // Note: The number of matches will always be equal to number of + // subexpressions. + matches := action.Regex.FindStringSubmatch(value.StringVal()) + if matches == nil { + return + } + + // Start from index 1, which is the first submatch (index 0 is the entire + // match). + for i := 1; i < len(matches); i++ { + attrs.UpsertString(action.AttrNames[i], matches[i]) + } +} diff --git a/internal/otel_collector/processor/processorhelper/attraction_test.go b/internal/otel_collector/processor/processorhelper/attraction_test.go new file mode 100644 index 00000000000..50c181b7084 --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/attraction_test.go @@ -0,0 +1,881 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "crypto/sha1" // #nosec + "encoding/binary" + "errors" + "fmt" + "math" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// Common structure for all the Tests +type testCase struct { + name string + inputAttributes map[string]pdata.AttributeValue + expectedAttributes map[string]pdata.AttributeValue +} + +// runIndividualTestCase is the common logic of passing trace data through a configured attributes processor. +func runIndividualTestCase(t *testing.T, tt testCase, ap *AttrProc) { + t.Run(tt.name, func(t *testing.T) { + attrMap := pdata.NewAttributeMap().InitFromMap(tt.inputAttributes) + ap.Process(attrMap) + attrMap.Sort() + require.Equal(t, pdata.NewAttributeMap().InitFromMap(tt.expectedAttributes).Sort(), attrMap) + }) +} + +func TestAttributes_InsertValue(t *testing.T) { + testCases := []testCase{ + // Ensure `attribute1` is set for spans with no attributes. + { + name: "InsertEmptyAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + // Ensure `attribute1` is set. + { + name: "InsertKeyNoExists", + inputAttributes: map[string]pdata.AttributeValue{ + "anotherkey": pdata.NewAttributeValueString("bob"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "anotherkey": pdata.NewAttributeValueString("bob"), + "attribute1": pdata.NewAttributeValueInt(123), + }, + }, + // Ensures no insert is performed because the keys `attribute1` already exists. + { + name: "InsertKeyExists", + inputAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueString("bob"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "attribute1": pdata.NewAttributeValueString("bob"), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "attribute1", Action: INSERT, Value: 123}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_InsertFromAttribute(t *testing.T) { + + testCases := []testCase{ + // Ensure no attribute is inserted because because attributes do not exist. + { + name: "InsertEmptyAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + // Ensure no attribute is inserted because because from_attribute `string_key` does not exist. + { + name: "InsertMissingFromAttribute", + inputAttributes: map[string]pdata.AttributeValue{ + "bob": pdata.NewAttributeValueInt(1), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "bob": pdata.NewAttributeValueInt(1), + }, + }, + // Ensure `string key` is set. + { + name: "InsertAttributeExists", + inputAttributes: map[string]pdata.AttributeValue{ + "anotherkey": pdata.NewAttributeValueInt(8892342), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "anotherkey": pdata.NewAttributeValueInt(8892342), + "string key": pdata.NewAttributeValueInt(8892342), + }, + }, + // Ensures no insert is performed because the keys `string key` already exist. + { + name: "InsertKeysExists", + inputAttributes: map[string]pdata.AttributeValue{ + "anotherkey": pdata.NewAttributeValueInt(8892342), + "string key": pdata.NewAttributeValueString("here"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "anotherkey": pdata.NewAttributeValueInt(8892342), + "string key": pdata.NewAttributeValueString("here"), + }, + }, + } + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "string key", Action: INSERT, FromAttribute: "anotherkey"}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_UpdateValue(t *testing.T) { + + testCases := []testCase{ + // Ensure no changes to the span as there is no attributes map. + { + name: "UpdateNoAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + // Ensure no changes to the span as the key does not exist. + { + name: "UpdateKeyNoExist", + inputAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("foo"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("foo"), + }, + }, + // Ensure the attribute `db.secret` is updated. + { + name: "UpdateAttributes", + inputAttributes: map[string]pdata.AttributeValue{ + "db.secret": pdata.NewAttributeValueString("password1234"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "db.secret": pdata.NewAttributeValueString("redacted"), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "db.secret", Action: UPDATE, Value: "redacted"}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_UpdateFromAttribute(t *testing.T) { + + testCases := []testCase{ + // Ensure no changes to the span as there is no attributes map. + { + name: "UpdateNoAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + // Ensure the attribute `boo` isn't updated because attribute `foo` isn't present in the span. + { + name: "UpdateKeyNoExistFromAttribute", + inputAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("bob"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("bob"), + }, + }, + // Ensure no updates as the target key `boo` doesn't exists. + { + name: "UpdateKeyNoExistMainAttributed", + inputAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("over there"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("over there"), + }, + }, + // Ensure no updates as the target key `boo` doesn't exists. + { + name: "UpdateKeyFromExistingAttribute", + inputAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("there is a party over here"), + "boo": pdata.NewAttributeValueString("not here"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("there is a party over here"), + "boo": pdata.NewAttributeValueString("there is a party over here"), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "boo", Action: UPDATE, FromAttribute: "foo"}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_UpsertValue(t *testing.T) { + testCases := []testCase{ + // Ensure `region` is set for spans with no attributes. + { + name: "UpsertNoAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{ + "region": pdata.NewAttributeValueString("planet-earth"), + }, + }, + // Ensure `region` is inserted for spans with some attributes(the key doesn't exist). + { + name: "UpsertAttributeNoExist", + inputAttributes: map[string]pdata.AttributeValue{ + "mission": pdata.NewAttributeValueString("to mars"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "mission": pdata.NewAttributeValueString("to mars"), + "region": pdata.NewAttributeValueString("planet-earth"), + }, + }, + // Ensure `region` is updated for spans with the attribute key `region`. + { + name: "UpsertAttributeExists", + inputAttributes: map[string]pdata.AttributeValue{ + "mission": pdata.NewAttributeValueString("to mars"), + "region": pdata.NewAttributeValueString("solar system"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "mission": pdata.NewAttributeValueString("to mars"), + "region": pdata.NewAttributeValueString("planet-earth"), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "region", Action: UPSERT, Value: "planet-earth"}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_Extract(t *testing.T) { + testCases := []testCase{ + // Ensure `new_user_key` is not set for spans with no attributes. + { + name: "UpsertEmptyAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + // Ensure `new_user_key` is not inserted for spans with missing attribute `user_key`. + { + name: "No extract with no target key", + inputAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + }, + // Ensure `new_user_key` is not inserted for spans with missing attribute `user_key`. + { + name: "No extract with non string target key", + inputAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + "user_key": pdata.NewAttributeValueInt(1234), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + "user_key": pdata.NewAttributeValueInt(1234), + }, + }, + // Ensure `new_user_key` is not updated for spans with attribute + // `user_key` because `user_key` does not match the regular expression. + { + name: "No extract with no pattern matching", + inputAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("does not match"), + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("does not match"), + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + }, + // Ensure `new_user_key` is not updated for spans with attribute + // `user_key` because `user_key` does not match all of the regular + // expression. + { + name: "No extract with no pattern matching", + inputAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("/api/v1/document/12345678/update"), + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("/api/v1/document/12345678/update"), + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + }, + // Ensure `new_user_key` and `version` is inserted for spans with attribute `user_key`. + { + name: "Extract insert new values.", + inputAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("/api/v1/document/12345678/update/v1"), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("/api/v1/document/12345678/update/v1"), + "new_user_key": pdata.NewAttributeValueString("12345678"), + "version": pdata.NewAttributeValueString("v1"), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + }, + // Ensure `new_user_key` and `version` is updated for spans with attribute `user_key`. + { + name: "Extract updates existing values ", + inputAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("/api/v1/document/12345678/update/v1"), + "new_user_key": pdata.NewAttributeValueString("2321"), + "version": pdata.NewAttributeValueString("na"), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("/api/v1/document/12345678/update/v1"), + "new_user_key": pdata.NewAttributeValueString("12345678"), + "version": pdata.NewAttributeValueString("v1"), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + }, + // Ensure `new_user_key` is updated and `version` is inserted for spans with attribute `user_key`. + { + name: "Extract upserts values", + inputAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("/api/v1/document/12345678/update/v1"), + "new_user_key": pdata.NewAttributeValueString("2321"), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueString("/api/v1/document/12345678/update/v1"), + "new_user_key": pdata.NewAttributeValueString("12345678"), + "version": pdata.NewAttributeValueString("v1"), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + + {Key: "user_key", RegexPattern: "^\\/api\\/v1\\/document\\/(?P.*)\\/update\\/(?P.*)$", Action: EXTRACT}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_UpsertFromAttribute(t *testing.T) { + + testCases := []testCase{ + // Ensure `new_user_key` is not set for spans with no attributes. + { + name: "UpsertEmptyAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + // Ensure `new_user_key` is not inserted for spans with missing attribute `user_key`. + { + name: "UpsertFromAttributeNoExist", + inputAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + }, + // Ensure `new_user_key` is inserted for spans with attribute `user_key`. + { + name: "UpsertFromAttributeExistsInsert", + inputAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueInt(2245), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueInt(2245), + "new_user_key": pdata.NewAttributeValueInt(2245), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + }, + // Ensure `new_user_key` is updated for spans with attribute `user_key`. + { + name: "UpsertFromAttributeExistsUpdate", + inputAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueInt(2245), + "new_user_key": pdata.NewAttributeValueInt(5422), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "user_key": pdata.NewAttributeValueInt(2245), + "new_user_key": pdata.NewAttributeValueInt(2245), + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "new_user_key", Action: UPSERT, FromAttribute: "user_key"}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_Delete(t *testing.T) { + testCases := []testCase{ + // Ensure the span contains no changes. + { + name: "DeleteEmptyAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + // Ensure the span contains no changes because the key doesn't exist. + { + name: "DeleteAttributeNoExist", + inputAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + }, + // Ensure `duplicate_key` is deleted for spans with the attribute set. + { + name: "DeleteAttributeExists", + inputAttributes: map[string]pdata.AttributeValue{ + "duplicate_key": pdata.NewAttributeValueDouble(3245.6), + "original_key": pdata.NewAttributeValueDouble(3245.6), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "original_key": pdata.NewAttributeValueDouble(3245.6), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "duplicate_key", Action: DELETE}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_HashValue(t *testing.T) { + + intVal := int64(24) + intBytes := make([]byte, int64ByteSize) + binary.LittleEndian.PutUint64(intBytes, uint64(intVal)) + + doubleVal := 2.4 + doubleBytes := make([]byte, float64ByteSize) + binary.LittleEndian.PutUint64(doubleBytes, math.Float64bits(doubleVal)) + + testCases := []testCase{ + // Ensure no changes to the span as there is no attributes map. + { + name: "HashNoAttributes", + inputAttributes: map[string]pdata.AttributeValue{}, + expectedAttributes: map[string]pdata.AttributeValue{}, + }, + // Ensure no changes to the span as the key does not exist. + { + name: "HashKeyNoExist", + inputAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("foo"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("foo"), + }, + }, + // Ensure string data types are hashed correctly + { + name: "HashString", + inputAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueString("foo"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueString(sha1Hash([]byte("foo"))), + }, + }, + // Ensure int data types are hashed correctly + { + name: "HashInt", + inputAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueInt(intVal), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueString(sha1Hash(intBytes)), + }, + }, + // Ensure double data types are hashed correctly + { + name: "HashDouble", + inputAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueDouble(doubleVal), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueString(sha1Hash(doubleBytes)), + }, + }, + // Ensure bool data types are hashed correctly + { + name: "HashBoolTrue", + inputAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueBool(true), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueString(sha1Hash([]byte{1})), + }, + }, + // Ensure bool data types are hashed correctly + { + name: "HashBoolFalse", + inputAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueBool(false), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "updateme": pdata.NewAttributeValueString(sha1Hash([]byte{0})), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "updateme", Action: HASH}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestAttributes_FromAttributeNoChange(t *testing.T) { + tc := testCase{ + name: "FromAttributeNoChange", + inputAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "boo": pdata.NewAttributeValueString("ghosts are scary"), + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "boo", Action: INSERT, FromAttribute: "boo"}, + {Key: "boo", Action: UPDATE, FromAttribute: "boo"}, + {Key: "boo", Action: UPSERT, FromAttribute: "boo"}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + runIndividualTestCase(t, tc, ap) +} + +func TestAttributes_Ordering(t *testing.T) { + testCases := []testCase{ + // For this example, the operations performed are + // 1. insert `operation`: `default` + // 2. insert `svc.operation`: `default` + // 3. delete `operation`. + { + name: "OrderingApplyAllSteps", + inputAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + "svc.operation": pdata.NewAttributeValueString("default"), + }, + }, + // For this example, the operations performed are + // 1. do nothing for the first action of insert `operation`: `default` + // 2. insert `svc.operation`: `arithmetic` + // 3. delete `operation`. + { + name: "OrderingOperationExists", + inputAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + "operation": pdata.NewAttributeValueString("arithmetic"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + "svc.operation": pdata.NewAttributeValueString("arithmetic"), + }, + }, + + // For this example, the operations performed are + // 1. insert `operation`: `default` + // 2. update `svc.operation` to `default` + // 3. delete `operation`. + { + name: "OrderingSvcOperationExists", + inputAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + "svc.operation": pdata.NewAttributeValueString("some value"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + "svc.operation": pdata.NewAttributeValueString("default"), + }, + }, + + // For this example, the operations performed are + // 1. do nothing for the first action of insert `operation`: `default` + // 2. update `svc.operation` to `arithmetic` + // 3. delete `operation`. + { + name: "OrderingBothAttributesExist", + inputAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + "operation": pdata.NewAttributeValueString("arithmetic"), + "svc.operation": pdata.NewAttributeValueString("add"), + }, + expectedAttributes: map[string]pdata.AttributeValue{ + "foo": pdata.NewAttributeValueString("casper the friendly ghost"), + "svc.operation": pdata.NewAttributeValueString("arithmetic"), + }, + }, + } + + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "operation", Action: INSERT, Value: "default"}, + {Key: "svc.operation", Action: UPSERT, FromAttribute: "operation"}, + {Key: "operation", Action: DELETE}, + }, + } + + ap, err := NewAttrProc(cfg) + require.Nil(t, err) + require.NotNil(t, ap) + + for _, tt := range testCases { + runIndividualTestCase(t, tt, ap) + } +} + +func TestInvalidConfig(t *testing.T) { + testcase := []struct { + name string + actionLists []ActionKeyValue + errorString string + }{ + { + name: "missing key", + actionLists: []ActionKeyValue{ + {Key: "one", Action: DELETE}, + {Key: "", Value: 123, Action: UPSERT}, + }, + errorString: "error creating AttrProc due to missing required field \"key\" at the 1-th actions", + }, + { + name: "invalid action", + actionLists: []ActionKeyValue{ + {Key: "invalid", Action: "invalid"}, + }, + errorString: "error creating AttrProc due to unsupported action \"invalid\" at the 0-th actions", + }, + { + name: "unsupported value", + actionLists: []ActionKeyValue{ + {Key: "UnsupportedValue", Value: []int{}, Action: UPSERT}, + }, + errorString: "error unsupported value type \"[]int\"", + }, + { + name: "missing value or from attribute", + actionLists: []ActionKeyValue{ + {Key: "MissingValueFromAttributes", Action: INSERT}, + }, + errorString: "error creating AttrProc. Either field \"value\" or \"from_attribute\" setting must be specified for 0-th action", + }, + { + name: "both set value and from attribute", + actionLists: []ActionKeyValue{ + {Key: "BothSet", Value: 123, FromAttribute: "aa", Action: UPSERT}, + }, + errorString: "error creating AttrProc due to both fields \"value\" and \"from_attribute\" being set at the 0-th actions", + }, + { + name: "pattern shouldn't be specified", + actionLists: []ActionKeyValue{ + {Key: "key", RegexPattern: "(?P.*?)$", FromAttribute: "aa", Action: INSERT}, + }, + errorString: "error creating AttrProc. Action \"insert\" does not use the \"pattern\" field. This must not be specified for 0-th action", + }, + { + name: "missing rule for extract", + actionLists: []ActionKeyValue{ + {Key: "aa", Action: EXTRACT}, + }, + errorString: "error creating AttrProc due to missing required field \"pattern\" for action \"extract\" at the 0-th action", + }, + {name: "set value for extract", + actionLists: []ActionKeyValue{ + {Key: "Key", RegexPattern: "(?P.*?)$", Value: "value", Action: EXTRACT}, + }, + errorString: "error creating AttrProc. Action \"extract\" does not use \"value\" or \"from_attribute\" field. These must not be specified for 0-th action", + }, + { + name: "set from attribute for extract", + actionLists: []ActionKeyValue{ + {Key: "key", RegexPattern: "(?P.*?)$", FromAttribute: "aa", Action: EXTRACT}, + }, + errorString: "error creating AttrProc. Action \"extract\" does not use \"value\" or \"from_attribute\" field. These must not be specified for 0-th action", + }, + { + name: "invalid regex", + actionLists: []ActionKeyValue{ + {Key: "aa", RegexPattern: "(?P.*?)$", Action: EXTRACT}, + }, + errorString: "error creating AttrProc. Field \"pattern\" has invalid pattern: \"(?P.*?)$\" to be set at the 0-th actions", + }, + { + name: "delete with regex", + actionLists: []ActionKeyValue{ + {RegexPattern: "(?P.*?)$", Key: "ab", Action: DELETE}, + }, + errorString: "error creating AttrProc. Action \"delete\" does not use \"value\", \"pattern\" or \"from_attribute\" field. These must not be specified for 0-th action", + }, + { + name: "regex with unnamed capture group", + actionLists: []ActionKeyValue{ + {Key: "aa", RegexPattern: ".*$", Action: EXTRACT}, + }, + errorString: "error creating AttrProc. Field \"pattern\" contains no named matcher groups at the 0-th actions", + }, + { + name: "regex with one unnamed capture groups", + actionLists: []ActionKeyValue{ + {Key: "aa", RegexPattern: "^\\/api\\/v1\\/document\\/(?P.*)\\/update\\/(.*)$", Action: EXTRACT}, + }, + errorString: "error creating AttrProc. Field \"pattern\" contains at least one unnamed matcher group at the 0-th actions", + }, + } + + for _, tc := range testcase { + t.Run(tc.name, func(t *testing.T) { + ap, err := NewAttrProc(&Settings{Actions: tc.actionLists}) + assert.Nil(t, ap) + assert.EqualValues(t, errors.New(tc.errorString), err) + }) + } +} + +func TestValidConfiguration(t *testing.T) { + cfg := &Settings{ + Actions: []ActionKeyValue{ + {Key: "one", Action: "Delete"}, + {Key: "two", Value: 123, Action: "INSERT"}, + {Key: "three", FromAttribute: "two", Action: "upDaTE"}, + {Key: "five", FromAttribute: "two", Action: "upsert"}, + {Key: "two", RegexPattern: "^\\/api\\/v1\\/document\\/(?P.*)\\/update$", Action: "EXTRact"}, + }, + } + ap, err := NewAttrProc(cfg) + require.NoError(t, err) + + av := pdata.NewAttributeValueInt(123) + compiledRegex := regexp.MustCompile(`^\/api\/v1\/document\/(?P.*)\/update$`) + assert.Equal(t, []attributeAction{ + {Key: "one", Action: DELETE}, + {Key: "two", Action: INSERT, + AttributeValue: &av, + }, + {Key: "three", FromAttribute: "two", Action: UPDATE}, + {Key: "five", FromAttribute: "two", Action: UPSERT}, + {Key: "two", Regex: compiledRegex, AttrNames: []string{"", "documentId"}, Action: EXTRACT}, + }, ap.actions) + +} + +func sha1Hash(b []byte) string { + // #nosec + h := sha1.New() + h.Write(b) + return fmt.Sprintf("%x", h.Sum(nil)) +} diff --git a/internal/otel_collector/processor/processorhelper/factory.go b/internal/otel_collector/processor/processorhelper/factory.go new file mode 100644 index 00000000000..c5175f762d2 --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/factory.go @@ -0,0 +1,149 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + + "github.com/spf13/viper" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +// FactoryOption apply changes to ProcessorOptions. +type FactoryOption func(o *factory) + +// CreateDefaultConfig is the equivalent of component.ProcessorFactory.CreateDefaultConfig() +type CreateDefaultConfig func() configmodels.Processor + +// CreateTraceProcessor is the equivalent of component.ProcessorFactory.CreateTracesProcessor() +type CreateTraceProcessor func(context.Context, component.ProcessorCreateParams, configmodels.Processor, consumer.TracesConsumer) (component.TracesProcessor, error) + +// CreateMetricsProcessor is the equivalent of component.ProcessorFactory.CreateMetricsProcessor() +type CreateMetricsProcessor func(context.Context, component.ProcessorCreateParams, configmodels.Processor, consumer.MetricsConsumer) (component.MetricsProcessor, error) + +// CreateLogsProcessor is the equivalent of component.ProcessorFactory.CreateLogsProcessor() +type CreateLogsProcessor func(context.Context, component.ProcessorCreateParams, configmodels.Processor, consumer.LogsConsumer) (component.LogsProcessor, error) + +type factory struct { + cfgType configmodels.Type + customUnmarshaler component.CustomUnmarshaler + createDefaultConfig CreateDefaultConfig + createTraceProcessor CreateTraceProcessor + createMetricsProcessor CreateMetricsProcessor + createLogsProcessor CreateLogsProcessor +} + +// WithCustomUnmarshaler implements component.ConfigUnmarshaler. +func WithCustomUnmarshaler(customUnmarshaler component.CustomUnmarshaler) FactoryOption { + return func(o *factory) { + o.customUnmarshaler = customUnmarshaler + } +} + +// WithTraces overrides the default "error not supported" implementation for CreateTraceProcessor. +func WithTraces(createTraceProcessor CreateTraceProcessor) FactoryOption { + return func(o *factory) { + o.createTraceProcessor = createTraceProcessor + } +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetricsProcessor. +func WithMetrics(createMetricsProcessor CreateMetricsProcessor) FactoryOption { + return func(o *factory) { + o.createMetricsProcessor = createMetricsProcessor + } +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogsProcessor. +func WithLogs(createLogsProcessor CreateLogsProcessor) FactoryOption { + return func(o *factory) { + o.createLogsProcessor = createLogsProcessor + } +} + +// NewFactory returns a component.ProcessorFactory. +func NewFactory( + cfgType configmodels.Type, + createDefaultConfig CreateDefaultConfig, + options ...FactoryOption) component.ProcessorFactory { + f := &factory{ + cfgType: cfgType, + createDefaultConfig: createDefaultConfig, + } + for _, opt := range options { + opt(f) + } + var ret component.ProcessorFactory + if f.customUnmarshaler != nil { + ret = &factoryWithUnmarshaler{f} + } else { + ret = f + } + return ret +} + +// Type gets the type of the Processor config created by this factory. +func (f *factory) Type() configmodels.Type { + return f.cfgType +} + +// CreateDefaultConfig creates the default configuration for processor. +func (f *factory) CreateDefaultConfig() configmodels.Processor { + return f.createDefaultConfig() +} + +// CreateTraceProcessor creates a component.TracesProcessor based on this config. +func (f *factory) CreateTracesProcessor(ctx context.Context, params component.ProcessorCreateParams, cfg configmodels.Processor, nextConsumer consumer.TracesConsumer) (component.TracesProcessor, error) { + if f.createTraceProcessor != nil { + return f.createTraceProcessor(ctx, params, cfg, nextConsumer) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsProcessor creates a consumer.MetricsConsumer based on this config. +func (f *factory) CreateMetricsProcessor(ctx context.Context, params component.ProcessorCreateParams, cfg configmodels.Processor, nextConsumer consumer.MetricsConsumer) (component.MetricsProcessor, error) { + if f.createMetricsProcessor != nil { + return f.createMetricsProcessor(ctx, params, cfg, nextConsumer) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateLogsProcessor creates a metrics processor based on this config. +func (f *factory) CreateLogsProcessor( + ctx context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.LogsConsumer, +) (component.LogsProcessor, error) { + if f.createLogsProcessor != nil { + return f.createLogsProcessor(ctx, params, cfg, nextConsumer) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +var _ component.ConfigUnmarshaler = (*factoryWithUnmarshaler)(nil) + +type factoryWithUnmarshaler struct { + *factory +} + +// Unmarshal un-marshals the config using the provided custom unmarshaler. +func (f *factoryWithUnmarshaler) Unmarshal(componentViperSection *viper.Viper, intoCfg interface{}) error { + return f.customUnmarshaler(componentViperSection, intoCfg) +} diff --git a/internal/otel_collector/processor/processorhelper/factory_test.go b/internal/otel_collector/processor/processorhelper/factory_test.go new file mode 100644 index 00000000000..690a334fbbe --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/factory_test.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +const typeStr = "test" + +var defaultCfg = &configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, +} + +func TestNewTrace(t *testing.T) { + factory := NewFactory( + typeStr, + defaultConfig) + assert.EqualValues(t, typeStr, factory.Type()) + assert.EqualValues(t, defaultCfg, factory.CreateDefaultConfig()) + _, ok := factory.(component.ConfigUnmarshaler) + assert.False(t, ok) + _, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, defaultCfg, nil) + assert.Error(t, err) + _, err = factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{}, defaultCfg, nil) + assert.Error(t, err) + _, err = factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, defaultCfg, nil) + assert.Error(t, err) +} + +func TestNewMetrics_WithConstructors(t *testing.T) { + factory := NewFactory( + typeStr, + defaultConfig, + WithTraces(createTraceProcessor), + WithMetrics(createMetricsProcessor), + WithLogs(createLogsProcessor), + WithCustomUnmarshaler(customUnmarshaler)) + assert.EqualValues(t, typeStr, factory.Type()) + assert.EqualValues(t, defaultCfg, factory.CreateDefaultConfig()) + + fu, ok := factory.(component.ConfigUnmarshaler) + assert.True(t, ok) + assert.Equal(t, errors.New("my error"), fu.Unmarshal(nil, nil)) + + _, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, defaultCfg, nil) + assert.NoError(t, err) + + _, err = factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{}, defaultCfg, nil) + assert.NoError(t, err) + + _, err = factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, defaultCfg, nil) + assert.NoError(t, err) +} + +func defaultConfig() configmodels.Processor { + return defaultCfg +} + +func createTraceProcessor(context.Context, component.ProcessorCreateParams, configmodels.Processor, consumer.TracesConsumer) (component.TracesProcessor, error) { + return nil, nil +} + +func createMetricsProcessor(context.Context, component.ProcessorCreateParams, configmodels.Processor, consumer.MetricsConsumer) (component.MetricsProcessor, error) { + return nil, nil +} + +func createLogsProcessor(context.Context, component.ProcessorCreateParams, configmodels.Processor, consumer.LogsConsumer) (component.LogsProcessor, error) { + return nil, nil +} + +func customUnmarshaler(*viper.Viper, interface{}) error { + return errors.New("my error") +} diff --git a/internal/otel_collector/processor/processorhelper/hasher.go b/internal/otel_collector/processor/processorhelper/hasher.go new file mode 100644 index 00000000000..6f0999a9568 --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/hasher.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + // #nosec + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "math" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +const ( + int64ByteSize = 8 + float64ByteSize = 8 +) + +var ( + byteTrue = [1]byte{1} + byteFalse = [1]byte{0} +) + +// sha1Hasher hashes an AttributeValue using SHA1 and returns a +// hashed version of the attribute. In practice, this would mostly be used +// for string attributes but we support all types for completeness/correctness +// and eliminate any surprises. +func sha1Hasher(attr pdata.AttributeValue) { + var val []byte + switch attr.Type() { + case pdata.AttributeValueSTRING: + val = []byte(attr.StringVal()) + case pdata.AttributeValueBOOL: + if attr.BoolVal() { + val = byteTrue[:] + } else { + val = byteFalse[:] + } + case pdata.AttributeValueINT: + val = make([]byte, int64ByteSize) + binary.LittleEndian.PutUint64(val, uint64(attr.IntVal())) + case pdata.AttributeValueDOUBLE: + val = make([]byte, float64ByteSize) + binary.LittleEndian.PutUint64(val, math.Float64bits(attr.DoubleVal())) + } + + var hashed string + if len(val) > 0 { + // #nosec + h := sha1.New() + h.Write(val) + val = h.Sum(nil) + hashedBytes := make([]byte, hex.EncodedLen(len(val))) + hex.Encode(hashedBytes, val) + hashed = string(hashedBytes) + } + + attr.SetStringVal(hashed) +} diff --git a/internal/otel_collector/processor/processorhelper/processor.go b/internal/otel_collector/processor/processorhelper/processor.go new file mode 100644 index 00000000000..0846d9d2846 --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/processor.go @@ -0,0 +1,256 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +// ErrSkipProcessingData is a sentinel value to indicate when traces or metrics should intentionally be dropped +// from further processing in the pipeline because the data is determined to be irrelevant. A processor can return this error +// to stop further processing without propagating an error back up the pipeline to logs. +var ErrSkipProcessingData = errors.New("sentinel error to skip processing data from the remainder of the pipeline") + +// TProcessor is a helper interface that allows avoiding implementing all functions in TracesProcessor by using NewTraceProcessor. +type TProcessor interface { + // ProcessTraces is a helper function that processes the incoming data and returns the data to be sent to the next component. + // If error is returned then returned data are ignored. It MUST not call the next component. + ProcessTraces(context.Context, pdata.Traces) (pdata.Traces, error) +} + +// MProcessor is a helper interface that allows avoiding implementing all functions in MetricsProcessor by using NewTraceProcessor. +type MProcessor interface { + // ProcessMetrics is a helper function that processes the incoming data and returns the data to be sent to the next component. + // If error is returned then returned data are ignored. It MUST not call the next component. + ProcessMetrics(context.Context, pdata.Metrics) (pdata.Metrics, error) +} + +// LProcessor is a helper interface that allows avoiding implementing all functions in LogsProcessor by using NewLogsProcessor. +type LProcessor interface { + // ProcessLogs is a helper function that processes the incoming data and returns the data to be sent to the next component. + // If error is returned then returned data are ignored. It MUST not call the next component. + ProcessLogs(context.Context, pdata.Logs) (pdata.Logs, error) +} + +// Option apply changes to internalOptions. +type Option func(*baseSettings) + +// WithStart overrides the default Start function for an processor. +// The default shutdown function does nothing and always returns nil. +func WithStart(start componenthelper.Start) Option { + return func(o *baseSettings) { + o.Start = start + } +} + +// WithShutdown overrides the default Shutdown function for an processor. +// The default shutdown function does nothing and always returns nil. +func WithShutdown(shutdown componenthelper.Shutdown) Option { + return func(o *baseSettings) { + o.Shutdown = shutdown + } +} + +// WithShutdown overrides the default GetCapabilities function for an processor. +// The default GetCapabilities function returns mutable capabilities. +func WithCapabilities(capabilities component.ProcessorCapabilities) Option { + return func(o *baseSettings) { + o.capabilities = capabilities + } +} + +type baseSettings struct { + *componenthelper.ComponentSettings + capabilities component.ProcessorCapabilities +} + +// fromOptions returns the internal settings starting from the default and applying all options. +func fromOptions(options []Option) *baseSettings { + // Start from the default options: + opts := &baseSettings{ + ComponentSettings: componenthelper.DefaultComponentSettings(), + capabilities: component.ProcessorCapabilities{MutatesConsumedData: true}, + } + + for _, op := range options { + op(opts) + } + + return opts +} + +// internalOptions contains internalOptions concerning how an Processor is configured. +type baseProcessor struct { + component.Component + fullName string + capabilities component.ProcessorCapabilities + traceAttributes []trace.Attribute +} + +// Construct the internalOptions from multiple Option. +func newBaseProcessor(fullName string, options ...Option) baseProcessor { + bs := fromOptions(options) + be := baseProcessor{ + Component: componenthelper.NewComponent(bs.ComponentSettings), + fullName: fullName, + capabilities: bs.capabilities, + traceAttributes: []trace.Attribute{ + trace.StringAttribute(obsreport.ProcessorKey, fullName), + }, + } + + return be +} + +func (bp *baseProcessor) GetCapabilities() component.ProcessorCapabilities { + return bp.capabilities +} + +type tracesProcessor struct { + baseProcessor + processor TProcessor + nextConsumer consumer.TracesConsumer +} + +func (tp *tracesProcessor) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + span := trace.FromContext(ctx) + span.Annotate(tp.traceAttributes, "Start processing.") + var err error + td, err = tp.processor.ProcessTraces(ctx, td) + span.Annotate(tp.traceAttributes, "End processing.") + if err != nil { + return err + } + return tp.nextConsumer.ConsumeTraces(ctx, td) +} + +// NewTraceProcessor creates a TracesProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewTraceProcessor( + config configmodels.Processor, + nextConsumer consumer.TracesConsumer, + processor TProcessor, + options ...Option, +) (component.TracesProcessor, error) { + if processor == nil { + return nil, errors.New("nil processor") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + return &tracesProcessor{ + baseProcessor: newBaseProcessor(config.Name(), options...), + processor: processor, + nextConsumer: nextConsumer, + }, nil +} + +type metricsProcessor struct { + baseProcessor + processor MProcessor + nextConsumer consumer.MetricsConsumer +} + +func (mp *metricsProcessor) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + span := trace.FromContext(ctx) + span.Annotate(mp.traceAttributes, "Start processing.") + var err error + md, err = mp.processor.ProcessMetrics(ctx, md) + span.Annotate(mp.traceAttributes, "End processing.") + if err != nil { + if err == ErrSkipProcessingData { + return nil + } + return err + } + return mp.nextConsumer.ConsumeMetrics(ctx, md) +} + +// NewMetricsProcessor creates a MetricsProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewMetricsProcessor( + config configmodels.Processor, + nextConsumer consumer.MetricsConsumer, + processor MProcessor, + options ...Option, +) (component.MetricsProcessor, error) { + if processor == nil { + return nil, errors.New("nil processor") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + return &metricsProcessor{ + baseProcessor: newBaseProcessor(config.Name(), options...), + processor: processor, + nextConsumer: nextConsumer, + }, nil +} + +type logProcessor struct { + baseProcessor + processor LProcessor + nextConsumer consumer.LogsConsumer +} + +func (lp *logProcessor) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { + span := trace.FromContext(ctx) + span.Annotate(lp.traceAttributes, "Start processing.") + var err error + ld, err = lp.processor.ProcessLogs(ctx, ld) + span.Annotate(lp.traceAttributes, "End processing.") + if err != nil { + return err + } + return lp.nextConsumer.ConsumeLogs(ctx, ld) +} + +// NewLogsProcessor creates a LogsProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewLogsProcessor( + config configmodels.Processor, + nextConsumer consumer.LogsConsumer, + processor LProcessor, + options ...Option, +) (component.LogsProcessor, error) { + if processor == nil { + return nil, errors.New("nil processor") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + return &logProcessor{ + baseProcessor: newBaseProcessor(config.Name(), options...), + processor: processor, + nextConsumer: nextConsumer, + }, nil +} diff --git a/internal/otel_collector/processor/processorhelper/processor_test.go b/internal/otel_collector/processor/processorhelper/processor_test.go new file mode 100644 index 00000000000..ae5fe637c04 --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/processor_test.go @@ -0,0 +1,171 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +const testFullName = "testFullName" + +var testCfg = &configmodels.ProcessorSettings{ + TypeVal: testFullName, + NameVal: testFullName, +} + +func TestDefaultOptions(t *testing.T) { + bp := newBaseProcessor(testFullName) + assert.True(t, bp.GetCapabilities().MutatesConsumedData) + assert.NoError(t, bp.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, bp.Shutdown(context.Background())) +} + +func TestWithOptions(t *testing.T) { + want := errors.New("my_error") + bp := newBaseProcessor(testFullName, + WithStart(func(context.Context, component.Host) error { return want }), + WithShutdown(func(context.Context) error { return want }), + WithCapabilities(component.ProcessorCapabilities{MutatesConsumedData: false})) + assert.Equal(t, want, bp.Start(context.Background(), componenttest.NewNopHost())) + assert.Equal(t, want, bp.Shutdown(context.Background())) + assert.False(t, bp.GetCapabilities().MutatesConsumedData) +} + +func TestNewTraceExporter(t *testing.T) { + me, err := NewTraceProcessor(testCfg, consumertest.NewTracesNop(), newTestTProcessor(nil)) + require.NoError(t, err) + + assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, me.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) + assert.NoError(t, me.Shutdown(context.Background())) +} + +func TestNewTraceExporter_NilRequiredFields(t *testing.T) { + _, err := NewTraceProcessor(testCfg, consumertest.NewTracesNop(), nil) + assert.Error(t, err) + + _, err = NewTraceProcessor(testCfg, nil, newTestTProcessor(nil)) + assert.Equal(t, componenterror.ErrNilNextConsumer, err) +} + +func TestNewTraceExporter_ProcessTraceError(t *testing.T) { + want := errors.New("my_error") + me, err := NewTraceProcessor(testCfg, consumertest.NewTracesNop(), newTestTProcessor(want)) + require.NoError(t, err) + assert.Equal(t, want, me.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) +} + +func TestNewMetricsExporter(t *testing.T) { + me, err := NewMetricsProcessor(testCfg, consumertest.NewMetricsNop(), newTestMProcessor(nil)) + require.NoError(t, err) + + assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, me.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) + assert.NoError(t, me.Shutdown(context.Background())) +} + +func TestNewMetricsExporter_NilRequiredFields(t *testing.T) { + _, err := NewMetricsProcessor(testCfg, consumertest.NewMetricsNop(), nil) + assert.Error(t, err) + + _, err = NewMetricsProcessor(testCfg, nil, newTestMProcessor(nil)) + assert.Equal(t, componenterror.ErrNilNextConsumer, err) +} + +func TestNewMetricsExporter_ProcessMetricsError(t *testing.T) { + want := errors.New("my_error") + me, err := NewMetricsProcessor(testCfg, consumertest.NewMetricsNop(), newTestMProcessor(want)) + require.NoError(t, err) + assert.Equal(t, want, me.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) +} + +func TestNewMetricsExporter_ProcessMetricsErrSkipProcessingData(t *testing.T) { + me, err := NewMetricsProcessor(testCfg, consumertest.NewMetricsNop(), newTestMProcessor(ErrSkipProcessingData)) + require.NoError(t, err) + assert.Equal(t, nil, me.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) +} + +func TestNewLogsExporter(t *testing.T) { + me, err := NewLogsProcessor(testCfg, consumertest.NewLogsNop(), newTestLProcessor(nil)) + require.NoError(t, err) + + assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, me.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) + assert.NoError(t, me.Shutdown(context.Background())) +} + +func TestNewLogsExporter_NilRequiredFields(t *testing.T) { + _, err := NewLogsProcessor(testCfg, consumertest.NewLogsNop(), nil) + assert.Error(t, err) + + _, err = NewLogsProcessor(testCfg, nil, newTestLProcessor(nil)) + assert.Equal(t, componenterror.ErrNilNextConsumer, err) +} + +func TestNewLogsExporter_ProcessLogError(t *testing.T) { + want := errors.New("my_error") + me, err := NewLogsProcessor(testCfg, consumertest.NewLogsNop(), newTestLProcessor(want)) + require.NoError(t, err) + assert.Equal(t, want, me.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) +} + +type testTProcessor struct { + retError error +} + +func newTestTProcessor(retError error) TProcessor { + return &testTProcessor{retError: retError} +} + +func (ttp *testTProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { + return td, ttp.retError +} + +type testMProcessor struct { + retError error +} + +func newTestMProcessor(retError error) MProcessor { + return &testMProcessor{retError: retError} +} + +func (tmp *testMProcessor) ProcessMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { + return md, tmp.retError +} + +type testLProcessor struct { + retError error +} + +func newTestLProcessor(retError error) LProcessor { + return &testLProcessor{retError: retError} +} + +func (tlp *testLProcessor) ProcessLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { + return ld, tlp.retError +} diff --git a/internal/otel_collector/processor/queuedprocessor/README.md b/internal/otel_collector/processor/queuedprocessor/README.md new file mode 100644 index 00000000000..9d50c03d04c --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/README.md @@ -0,0 +1,3 @@ +# Queued Retry Processor + +QueuedRetry processor is deprecated. Use exporter queued retry config. diff --git a/internal/otel_collector/processor/queuedprocessor/config.go b/internal/otel_collector/processor/queuedprocessor/config.go new file mode 100644 index 00000000000..47dbb271aaa --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/config.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package queuedprocessor + +import ( + "time" + + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for Attributes processor. +type Config struct { + configmodels.ProcessorSettings `mapstructure:",squash"` + + // NumConsumers is the number of queue workers that dequeue batches and send them out. + NumWorkers int `mapstructure:"num_workers"` + // QueueSize is the maximum number of batches allowed in queue at a given time. + QueueSize int `mapstructure:"queue_size"` + // Retry indicates whether queue processor should retry span batches in case of processing failure. + RetryOnFailure bool `mapstructure:"retry_on_failure"` + // BackoffDelay is the amount of time a worker waits after a failed send before retrying. + BackoffDelay time.Duration `mapstructure:"backoff_delay"` +} diff --git a/internal/otel_collector/processor/queuedprocessor/config_test.go b/internal/otel_collector/processor/queuedprocessor/config_test.go new file mode 100644 index 00000000000..f07ee9fc898 --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/config_test.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package queuedprocessor + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Processors[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.Nil(t, err) + require.NotNil(t, cfg) + + p0 := cfg.Processors["queued_retry"] + assert.Equal(t, p0, factory.CreateDefaultConfig()) + + p1 := cfg.Processors["queued_retry/2"] + assert.Equal(t, p1, + &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "queued_retry", + NameVal: "queued_retry/2", + }, + NumWorkers: 2, + QueueSize: 10, + RetryOnFailure: true, + BackoffDelay: time.Second * 5, + }) +} diff --git a/internal/otel_collector/processor/queuedprocessor/factory.go b/internal/otel_collector/processor/queuedprocessor/factory.go new file mode 100644 index 00000000000..482ecc57a54 --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/factory.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package queuedprocessor + +import ( + "context" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "queued_retry" +) + +// NewFactory returns a new factory for the Queued processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTraceProcessor), + processorhelper.WithMetrics(createMetricsProcessor)) +} + +func createDefaultConfig() configmodels.Processor { + return &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + NumWorkers: 10, + QueueSize: 5000, + RetryOnFailure: true, + BackoffDelay: time.Second * 5, + } +} + +func createTraceProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.TracesConsumer, +) (component.TracesProcessor, error) { + params.Logger.Warn("QueuedRetry processor is deprecated. Use exporter's queued retry config.") + return newQueuedTracesProcessor(params, nextConsumer, cfg.(*Config)), nil +} + +func createMetricsProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.MetricsConsumer, +) (component.MetricsProcessor, error) { + params.Logger.Warn("QueuedRetry processor is deprecated. Use exporter's queued retry config.") + return newQueuedMetricsProcessor(params, nextConsumer, cfg.(*Config)), nil +} diff --git a/internal/otel_collector/processor/queuedprocessor/factory_test.go b/internal/otel_collector/processor/queuedprocessor/factory_test.go new file mode 100644 index 00000000000..a4a69cc9f8b --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/factory_test.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package queuedprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateProcessor(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + + tp, err := factory.CreateTracesProcessor(context.Background(), creationParams, cfg, nil) + assert.NotNil(t, tp) + assert.NoError(t, err, "cannot create trace processor") + + mp, err := factory.CreateMetricsProcessor(context.Background(), creationParams, cfg, nil) + assert.NotNil(t, mp) + assert.NoError(t, err, "cannot create metrics processor") +} diff --git a/internal/otel_collector/processor/queuedprocessor/metrics.go b/internal/otel_collector/processor/queuedprocessor/metrics.go new file mode 100644 index 00000000000..ccb39e1b4b4 --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/metrics.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package queuedprocessor + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/processor" +) + +// Variables related to metrics specific to queued processor. +var ( + statInQueueLatencyMs = stats.Int64("queue_latency", "Latency (in milliseconds) that a batch stayed in queue", stats.UnitMilliseconds) + statSendLatencyMs = stats.Int64("send_latency", "Latency (in milliseconds) to send a batch", stats.UnitMilliseconds) + statSuccessSendOps = stats.Int64("success_send", "Number of successful send operations", stats.UnitDimensionless) + statFailedSendOps = stats.Int64("fail_send", "Number of failed send operations", stats.UnitDimensionless) + statQueueLength = stats.Int64("queue_length", "Current length of the queue (in batches)", stats.UnitDimensionless) + + latencyDistributionAggregation = view.Distribution(10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000) + + queueLengthView = &view.View{ + Name: statQueueLength.Name(), + Measure: statQueueLength, + Description: "Current number of batches in the queue", + TagKeys: []tag.Key{processor.TagProcessorNameKey}, + Aggregation: view.LastValue(), + } + sendLatencyView = &view.View{ + Name: statSendLatencyMs.Name(), + Measure: statSendLatencyMs, + Description: "The latency of the successful send operations.", + TagKeys: []tag.Key{processor.TagProcessorNameKey}, + Aggregation: latencyDistributionAggregation, + } + inQueueLatencyView = &view.View{ + Name: statInQueueLatencyMs.Name(), + Measure: statInQueueLatencyMs, + Description: "The \"in queue\" latency of the successful send operations.", + TagKeys: []tag.Key{processor.TagProcessorNameKey}, + Aggregation: latencyDistributionAggregation, + } +) + +// MetricViews return the metrics views according to given telemetry level. +func MetricViews() []*view.View { + tagKeys := processor.MetricTagKeys() + + countSuccessSendView := &view.View{ + Name: statSuccessSendOps.Name(), + Measure: statSuccessSendOps, + Description: "The number of successful send operations performed by queued_retry processor", + TagKeys: tagKeys, + Aggregation: view.Sum(), + } + countFailuresSendView := &view.View{ + Name: statFailedSendOps.Name(), + Measure: statFailedSendOps, + Description: "The number of failed send operations performed by queued_retry processor", + TagKeys: tagKeys, + Aggregation: view.Sum(), + } + + legacyViews := []*view.View{queueLengthView, countSuccessSendView, countFailuresSendView, sendLatencyView, inQueueLatencyView} + + return obsreport.ProcessorMetricViews(typeStr, legacyViews) +} diff --git a/internal/otel_collector/processor/queuedprocessor/queued_processor.go b/internal/otel_collector/processor/queuedprocessor/queued_processor.go new file mode 100644 index 00000000000..cbe52810084 --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/queued_processor.go @@ -0,0 +1,345 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package queuedprocessor + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/jaegertracing/jaeger/pkg/queue" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/processor" +) + +type queuedProcessor struct { + name string + queue *queue.BoundedQueue + logger *zap.Logger + traceNext consumer.TracesConsumer + metricNext consumer.MetricsConsumer + numWorkers int + retryOnProcessingFailure bool + backoffDelay time.Duration + stopCh chan struct{} + stopOnce sync.Once + obsrep *obsreport.ProcessorObsReport +} + +var _ consumer.TracesConsumer = (*queuedProcessor)(nil) +var errorRefused = errors.New("failed to add to the queue") + +type queueItem interface { + context() context.Context + queuedTime() time.Time + export(sp *queuedProcessor) error + onAccepted() + // Returns a new queue item that contains the items left to be exported. + onPartialError(partialErr consumererror.PartialError) queueItem + onRefused(logger *zap.Logger, err error) + onDropped(logger *zap.Logger, err error) +} + +type baseQueueItem struct { + ctx context.Context + qt time.Time + obsrep *obsreport.ProcessorObsReport +} + +func (item *baseQueueItem) context() context.Context { + return item.ctx +} + +func (item *baseQueueItem) queuedTime() time.Time { + return item.qt +} + +type traceQueueItem struct { + baseQueueItem + td pdata.Traces + spanCountStats *processor.SpanCountStats +} + +func newTraceQueueItem(ctx context.Context, td pdata.Traces, obsrep *obsreport.ProcessorObsReport) queueItem { + return &traceQueueItem{ + baseQueueItem: baseQueueItem{ctx: ctx, qt: time.Now(), obsrep: obsrep}, + td: td, + spanCountStats: processor.NewSpanCountStats(td), + } +} + +func (item *traceQueueItem) onAccepted() { + processor.RecordsSpanCountMetrics(item.ctx, item.spanCountStats, processor.StatReceivedSpanCount) + item.obsrep.TracesAccepted(item.ctx, item.spanCountStats.GetAllSpansCount()) +} + +func (item *traceQueueItem) onPartialError(partialErr consumererror.PartialError) queueItem { + return newTraceQueueItem(item.ctx, partialErr.GetTraces(), item.obsrep) +} + +func (item *traceQueueItem) onRefused(logger *zap.Logger, err error) { + // Count the StatReceivedSpanCount even if items were refused. + processor.RecordsSpanCountMetrics(item.ctx, item.spanCountStats, processor.StatReceivedSpanCount) + + item.obsrep.TracesRefused(item.ctx, item.spanCountStats.GetAllSpansCount()) + + // TODO: in principle this may not end in data loss because this can be + // in the same call stack as the receiver, ie.: the call from the receiver + // to here is synchronous. This means that actually it could be proper to + // record this as "refused" instead of "dropped". + stats.Record(item.ctx, processor.StatTraceBatchesDroppedCount.M(int64(1))) + processor.RecordsSpanCountMetrics(item.ctx, item.spanCountStats, processor.StatDroppedSpanCount) + + logger.Error("Failed to process batch, refused", zap.Int("#spans", item.spanCountStats.GetAllSpansCount()), zap.Error(err)) +} + +func (item *traceQueueItem) onDropped(logger *zap.Logger, err error) { + item.obsrep.TracesDropped(item.ctx, item.spanCountStats.GetAllSpansCount()) + + stats.Record(item.ctx, processor.StatTraceBatchesDroppedCount.M(int64(1))) + processor.RecordsSpanCountMetrics(item.ctx, item.spanCountStats, processor.StatDroppedSpanCount) + logger.Error("Failed to process batch, discarding", zap.Int("#spans", item.spanCountStats.GetAllSpansCount()), zap.Error(err)) +} + +func (item *traceQueueItem) export(sp *queuedProcessor) error { + return sp.traceNext.ConsumeTraces(item.ctx, item.td) +} + +type metricsQueueItem struct { + baseQueueItem + md pdata.Metrics + numPoints int +} + +func newMetricsQueueItem(ctx context.Context, md pdata.Metrics, obsrep *obsreport.ProcessorObsReport) queueItem { + _, numPoints := md.MetricAndDataPointCount() + return &metricsQueueItem{ + baseQueueItem: baseQueueItem{ctx: ctx, qt: time.Now(), obsrep: obsrep}, + md: md, + numPoints: numPoints, + } +} + +func (item *metricsQueueItem) onAccepted() { + item.obsrep.MetricsAccepted(item.ctx, item.numPoints) +} + +func (item *metricsQueueItem) onPartialError(consumererror.PartialError) queueItem { + // TODO: implement this. + return item +} + +func (item *metricsQueueItem) onRefused(logger *zap.Logger, err error) { + item.obsrep.MetricsRefused(item.ctx, item.numPoints) + + logger.Error("Failed to process batch, refused", zap.Int("#points", item.numPoints), zap.Error(err)) +} + +func (item *metricsQueueItem) onDropped(logger *zap.Logger, err error) { + stats.Record(item.ctx, processor.StatTraceBatchesDroppedCount.M(int64(1))) + item.obsrep.MetricsDropped(item.ctx, item.numPoints) + + logger.Error("Failed to process batch, discarding", zap.Int("#points", item.numPoints), zap.Error(err)) +} + +func (item *metricsQueueItem) export(sp *queuedProcessor) error { + return sp.metricNext.ConsumeMetrics(item.ctx, item.md) +} + +func newQueuedTracesProcessor( + params component.ProcessorCreateParams, + nextConsumer consumer.TracesConsumer, + cfg *Config, +) *queuedProcessor { + return &queuedProcessor{ + name: cfg.Name(), + queue: queue.NewBoundedQueue(cfg.QueueSize, func(item interface{}) {}), + logger: params.Logger, + numWorkers: cfg.NumWorkers, + traceNext: nextConsumer, + metricNext: nil, + retryOnProcessingFailure: cfg.RetryOnFailure, + backoffDelay: cfg.BackoffDelay, + stopCh: make(chan struct{}), + obsrep: obsreport.NewProcessorObsReport(configtelemetry.GetMetricsLevelFlagValue(), cfg.Name()), + } +} + +func newQueuedMetricsProcessor( + params component.ProcessorCreateParams, + nextConsumer consumer.MetricsConsumer, + cfg *Config, +) *queuedProcessor { + return &queuedProcessor{ + name: cfg.Name(), + queue: queue.NewBoundedQueue(cfg.QueueSize, func(item interface{}) {}), + logger: params.Logger, + numWorkers: cfg.NumWorkers, + traceNext: nil, + metricNext: nextConsumer, + retryOnProcessingFailure: cfg.RetryOnFailure, + backoffDelay: cfg.BackoffDelay, + stopCh: make(chan struct{}), + obsrep: obsreport.NewProcessorObsReport(configtelemetry.GetMetricsLevelFlagValue(), cfg.Name()), + } +} + +// Start is invoked during service startup. +func (sp *queuedProcessor) Start(ctx context.Context, _ component.Host) error { + // emit 0's so that the metric is present and reported, rather than absent + statsTags := []tag.Mutator{tag.Insert(processor.TagProcessorNameKey, sp.name)} + _ = stats.RecordWithTags( + ctx, + statsTags, + processor.StatTraceBatchesDroppedCount.M(int64(0)), + processor.StatDroppedSpanCount.M(int64(0))) + + sp.queue.StartConsumers(sp.numWorkers, func(item interface{}) { + value := item.(queueItem) + sp.processItemFromQueue(value) + }) + + // Start a timer to report the queue length. + ticker := time.NewTicker(1 * time.Second) + go func() { + defer ticker.Stop() + for { + select { + case <-sp.stopCh: + return + case <-ticker.C: + _ = stats.RecordWithTags( + context.Background(), + statsTags, + statQueueLength.M(int64(sp.queue.Size()))) + } + } + }() + + return nil +} + +// ConsumeTraces implements the TracesProcessor interface +func (sp *queuedProcessor) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + item := newTraceQueueItem(ctx, td, sp.obsrep) + + addedToQueue := sp.queue.Produce(item) + if !addedToQueue { + item.onRefused(sp.logger, errorRefused) + return errorRefused + } + + item.onAccepted() + return nil +} + +// ConsumeMetrics implements the MetricsProcessor interface +func (sp *queuedProcessor) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + item := newMetricsQueueItem(ctx, md, sp.obsrep) + + addedToQueue := sp.queue.Produce(item) + if !addedToQueue { + item.onRefused(sp.logger, errorRefused) + return errorRefused + } + + item.onAccepted() + return nil +} + +func (sp *queuedProcessor) GetCapabilities() component.ProcessorCapabilities { + return component.ProcessorCapabilities{MutatesConsumedData: false} +} + +// Shutdown is invoked during service shutdown. +func (sp *queuedProcessor) Shutdown(context.Context) error { + err := componenterror.ErrAlreadyStopped + sp.stopOnce.Do(func() { + err = nil + close(sp.stopCh) + sp.queue.Stop() + }) + return err +} + +func (sp *queuedProcessor) processItemFromQueue(item queueItem) { + startTime := time.Now() + err := item.export(sp) + if err == nil { + // Record latency metrics and return + sendLatencyMs := int64(time.Since(startTime) / time.Millisecond) + inQueueLatencyMs := int64(time.Since(item.queuedTime()) / time.Millisecond) + stats.Record(item.context(), + statSuccessSendOps.M(1), + statSendLatencyMs.M(sendLatencyMs), + statInQueueLatencyMs.M(inQueueLatencyMs)) + + return + } + + // There was an error + stats.Record(item.context(), statFailedSendOps.M(1)) + + // Immediately drop data on permanent errors. + if consumererror.IsPermanent(err) { + // throw away the batch + item.onDropped(sp.logger, err) + return + } + + // If partial error, update data and stats with non exported data. + if partialErr, isPartial := err.(consumererror.PartialError); isPartial { + item = item.onPartialError(partialErr) + } + + // Immediately drop data on no retries configured. + if !sp.retryOnProcessingFailure { + // throw away the batch + item.onDropped(sp.logger, fmt.Errorf("no retry processing %w", err)) + return + } + + // TODO: (@pjanotti) do not put it back on the end of the queue, retry with it directly. + // This will have the benefit of keeping the batch closer to related ones in time. + if !sp.queue.Produce(item) { + item.onDropped(sp.logger, fmt.Errorf("failed to re-enqueue: %w", err)) + return + } + + // back-off for configured delay, but get interrupted when shutting down + if sp.backoffDelay > 0 { + sp.logger.Warn("Backing off before next attempt", zap.Duration("backoff_delay", sp.backoffDelay)) + select { + case <-sp.stopCh: + sp.logger.Info("Interrupted due to shutdown") + break + case <-time.After(sp.backoffDelay): + sp.logger.Info("Resume processing") + break + } + } +} diff --git a/internal/otel_collector/processor/queuedprocessor/queued_processor_test.go b/internal/otel_collector/processor/queuedprocessor/queued_processor_test.go new file mode 100644 index 00000000000..9dc712263b2 --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/queued_processor_test.go @@ -0,0 +1,449 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package queuedprocessor + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport/obsreporttest" + "go.opentelemetry.io/collector/processor" +) + +func TestTraceQueueProcessor_NoEnqueueOnPermanentError(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + td := testdata.GenerateTraceDataOneSpan() + + mockP := newMockConcurrentSpanProcessor() + mockP.updateError(consumererror.Permanent(errors.New("bad data"))) + + cfg := createDefaultConfig().(*Config) + cfg.RetryOnFailure = true + cfg.BackoffDelay = time.Hour + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + + qp := newQueuedTracesProcessor(creationParams, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + mockP.stop() + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + mockP.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + require.NoError(t, qp.ConsumeTraces(context.Background(), td)) + }) + mockP.awaitAsyncProcessing() + <-time.After(200 * time.Millisecond) + require.Zero(t, qp.queue.Size()) + obsreporttest.CheckProcessorTracesViews(t, cfg.Name(), 1, 0, 1) +} + +func TestTraceQueueProcessor_EnqueueOnNoRetry(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + td := testdata.GenerateTraceDataOneSpan() + + mockP := newMockConcurrentSpanProcessor() + mockP.updateError(errors.New("transient error")) + + cfg := createDefaultConfig().(*Config) + cfg.RetryOnFailure = false + cfg.BackoffDelay = 0 + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + + qp := newQueuedTracesProcessor(creationParams, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + mockP.stop() + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + mockP.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + require.NoError(t, qp.ConsumeTraces(context.Background(), td)) + }) + mockP.awaitAsyncProcessing() + <-time.After(200 * time.Millisecond) + require.Zero(t, qp.queue.Size()) + obsreporttest.CheckProcessorTracesViews(t, cfg.Name(), 1, 0, 1) +} + +func TestTraceQueueProcessor_PartialError(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + partialErr := consumererror.PartialTracesError(errors.New("some error"), testdata.GenerateTraceDataOneSpan()) + td := testdata.GenerateTraceDataTwoSpansSameResource() + + mockP := newMockConcurrentSpanProcessor() + mockP.updateError(partialErr) + + cfg := createDefaultConfig().(*Config) + cfg.NumWorkers = 1 + cfg.RetryOnFailure = true + cfg.BackoffDelay = time.Second + + qp := newQueuedTracesProcessor(component.ProcessorCreateParams{Logger: zap.NewNop()}, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + mockP.stop() + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + mockP.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + require.NoError(t, qp.ConsumeTraces(context.Background(), td)) + }) + mockP.awaitAsyncProcessing() + // There is a small race condition in this test, but expect to execute this in less than 1 second. + mockP.updateError(nil) + mockP.waitGroup.Add(1) + mockP.awaitAsyncProcessing() + + mockP.checkNumBatches(t, 2) + mockP.checkNumSpans(t, 2+1) + + obsreporttest.CheckProcessorTracesViews(t, cfg.Name(), 2, 0, 0) +} + +func TestTraceQueueProcessor_EnqueueOnError(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + td := testdata.GenerateTraceDataOneSpan() + + mockP := newMockConcurrentSpanProcessor() + mockP.updateError(errors.New("transient error")) + + cfg := createDefaultConfig().(*Config) + cfg.NumWorkers = 1 + cfg.QueueSize = 1 + cfg.RetryOnFailure = true + cfg.BackoffDelay = time.Hour + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + + qp := newQueuedTracesProcessor(creationParams, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + mockP.stop() + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + mockP.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + require.NoError(t, qp.ConsumeTraces(context.Background(), td)) + }) + mockP.awaitAsyncProcessing() + <-time.After(200 * time.Millisecond) + require.Equal(t, 1, qp.queue.Size()) + + mockP.run(func() { + // The queue is full, cannot enqueue other item + require.Error(t, qp.ConsumeTraces(context.Background(), td)) + }) + obsreporttest.CheckProcessorTracesViews(t, cfg.Name(), 1, 1, 0) +} + +func TestMetricsQueueProcessor_NoEnqueueOnPermanentError(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + md := testdata.GenerateMetricsTwoMetrics() + + mockP := newMockConcurrentSpanProcessor() + mockP.updateError(consumererror.Permanent(errors.New("bad data"))) + + cfg := createDefaultConfig().(*Config) + cfg.RetryOnFailure = true + cfg.BackoffDelay = time.Hour + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + + qp := newQueuedMetricsProcessor(creationParams, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + mockP.stop() + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + mockP.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + require.NoError(t, qp.ConsumeMetrics(context.Background(), md)) + }) + mockP.awaitAsyncProcessing() + <-time.After(200 * time.Millisecond) + require.Zero(t, qp.queue.Size()) + obsreporttest.CheckProcessorMetricsViews(t, cfg.Name(), 4, 0, 4) +} + +func TestMetricsQueueProcessor_NoEnqueueOnNoRetry(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + md := testdata.GenerateMetricsTwoMetrics() + + mockP := newMockConcurrentSpanProcessor() + mockP.updateError(errors.New("transient error")) + + cfg := createDefaultConfig().(*Config) + cfg.RetryOnFailure = false + cfg.BackoffDelay = 0 + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + + qp := newQueuedMetricsProcessor(creationParams, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + mockP.stop() + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + mockP.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + require.NoError(t, qp.ConsumeMetrics(context.Background(), md)) + }) + mockP.awaitAsyncProcessing() + <-time.After(200 * time.Millisecond) + require.Zero(t, qp.queue.Size()) + obsreporttest.CheckProcessorMetricsViews(t, cfg.Name(), 4, 0, 4) +} + +func TestMetricsQueueProcessor_EnqueueOnError(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + md := testdata.GenerateMetricsTwoMetrics() + + mockP := newMockConcurrentSpanProcessor() + mockP.updateError(errors.New("transient error")) + + cfg := createDefaultConfig().(*Config) + cfg.NumWorkers = 1 + cfg.QueueSize = 1 + cfg.RetryOnFailure = true + cfg.BackoffDelay = time.Hour + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + + qp := newQueuedMetricsProcessor(creationParams, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + mockP.stop() + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + mockP.run(func() { + // This is asynchronous so it should just enqueue, no errors expected. + require.NoError(t, qp.ConsumeMetrics(context.Background(), md)) + }) + mockP.awaitAsyncProcessing() + <-time.After(200 * time.Millisecond) + require.Equal(t, 1, qp.queue.Size()) + + mockP.run(func() { + // The queue is full, cannot enqueue other item + require.Error(t, qp.ConsumeMetrics(context.Background(), md)) + }) + obsreporttest.CheckProcessorMetricsViews(t, cfg.Name(), 4, 4, 0) +} + +func TestTraceQueueProcessorHappyPath(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + views := processor.MetricViews() + assert.NoError(t, view.Register(views...)) + defer view.Unregister(views...) + + mockP := newMockConcurrentSpanProcessor() + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + cfg := createDefaultConfig().(*Config) + qp := newQueuedTracesProcessor(creationParams, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + mockP.stop() + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + wantBatches := 10 + wantSpans := 20 + for i := 0; i < wantBatches; i++ { + td := testdata.GenerateTraceDataTwoSpansSameResource() + mockP.run(func() { + require.NoError(t, qp.ConsumeTraces(context.Background(), td)) + }) + } + + // Wait until all batches received + mockP.awaitAsyncProcessing() + + mockP.checkNumBatches(t, wantBatches) + mockP.checkNumSpans(t, wantSpans) + + droppedView, err := findViewNamed(views, "processor/"+processor.StatDroppedSpanCount.Name()) + require.NoError(t, err) + + data, err := view.RetrieveData(droppedView.Name) + require.NoError(t, err) + require.Len(t, data, 1) + assert.Equal(t, 0.0, data[0].Data.(*view.SumData).Value) + + data, err = view.RetrieveData("processor/" + processor.StatTraceBatchesDroppedCount.Name()) + require.NoError(t, err) + assert.Equal(t, 0.0, data[0].Data.(*view.SumData).Value) + obsreporttest.CheckProcessorTracesViews(t, cfg.Name(), int64(wantSpans), 0, 0) +} + +func TestMetricsQueueProcessorHappyPath(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + mockP := newMockConcurrentSpanProcessor() + creationParams := component.ProcessorCreateParams{Logger: zap.NewNop()} + cfg := createDefaultConfig().(*Config) + qp := newQueuedMetricsProcessor(creationParams, mockP, cfg) + require.NoError(t, qp.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { + assert.NoError(t, qp.Shutdown(context.Background())) + }) + + wantBatches := 10 + wantMetricPoints := 2 * 20 + for i := 0; i < wantBatches; i++ { + md := testdata.GenerateMetricsTwoMetrics() + mockP.run(func() { + require.NoError(t, qp.ConsumeMetrics(context.Background(), md)) + }) + } + + // Wait until all batches received + mockP.awaitAsyncProcessing() + + mockP.checkNumBatches(t, wantBatches) + mockP.checkNumPoints(t, wantMetricPoints) + obsreporttest.CheckProcessorMetricsViews(t, cfg.Name(), int64(wantMetricPoints), 0, 0) +} + +type mockConcurrentSpanProcessor struct { + waitGroup *sync.WaitGroup + mu sync.Mutex + consumeError error + batchCount int64 + spanCount int64 + metricPointsCount int64 + stopped int32 +} + +var _ consumer.TracesConsumer = (*mockConcurrentSpanProcessor)(nil) +var _ consumer.MetricsConsumer = (*mockConcurrentSpanProcessor)(nil) + +func newMockConcurrentSpanProcessor() *mockConcurrentSpanProcessor { + return &mockConcurrentSpanProcessor{waitGroup: new(sync.WaitGroup)} +} + +func (p *mockConcurrentSpanProcessor) ConsumeTraces(_ context.Context, td pdata.Traces) error { + if atomic.LoadInt32(&p.stopped) == 1 { + return nil + } + atomic.AddInt64(&p.batchCount, 1) + atomic.AddInt64(&p.spanCount, int64(td.SpanCount())) + p.mu.Lock() + defer p.mu.Unlock() + defer p.waitGroup.Done() + return p.consumeError +} + +func (p *mockConcurrentSpanProcessor) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + if atomic.LoadInt32(&p.stopped) == 1 { + return nil + } + atomic.AddInt64(&p.batchCount, 1) + _, mpc := md.MetricAndDataPointCount() + atomic.AddInt64(&p.metricPointsCount, int64(mpc)) + p.mu.Lock() + defer p.mu.Unlock() + defer p.waitGroup.Done() + return p.consumeError +} + +func (p *mockConcurrentSpanProcessor) GetCapabilities() component.ProcessorCapabilities { + return component.ProcessorCapabilities{MutatesConsumedData: false} +} + +func (p *mockConcurrentSpanProcessor) checkNumBatches(t *testing.T, want int) { + assert.EqualValues(t, want, atomic.LoadInt64(&p.batchCount)) +} + +func (p *mockConcurrentSpanProcessor) checkNumSpans(t *testing.T, want int) { + assert.EqualValues(t, want, atomic.LoadInt64(&p.spanCount)) +} + +func (p *mockConcurrentSpanProcessor) checkNumPoints(t *testing.T, want int) { + assert.EqualValues(t, want, atomic.LoadInt64(&p.metricPointsCount)) +} + +func (p *mockConcurrentSpanProcessor) updateError(err error) { + p.mu.Lock() + defer p.mu.Unlock() + p.consumeError = err +} + +func (p *mockConcurrentSpanProcessor) run(fn func()) { + p.waitGroup.Add(1) + fn() +} + +func (p *mockConcurrentSpanProcessor) awaitAsyncProcessing() { + p.waitGroup.Wait() +} + +func (p *mockConcurrentSpanProcessor) stop() { + atomic.StoreInt32(&p.stopped, 1) +} + +func findViewNamed(views []*view.View, name string) (*view.View, error) { + for _, v := range views { + if v.Name == name { + return v, nil + } + } + return nil, fmt.Errorf("view %s not found", name) +} diff --git a/internal/otel_collector/processor/queuedprocessor/testdata/config.yaml b/internal/otel_collector/processor/queuedprocessor/testdata/config.yaml new file mode 100644 index 00000000000..095948de2a3 --- /dev/null +++ b/internal/otel_collector/processor/queuedprocessor/testdata/config.yaml @@ -0,0 +1,20 @@ +receivers: + examplereceiver: + +processors: + queued_retry: + queued_retry/2: + num_workers: 2 + queue_size: 10 + retry_on_failure: true + backoff_delay: 5s + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [queued_retry/2] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/resourceprocessor/README.md b/internal/otel_collector/processor/resourceprocessor/README.md new file mode 100644 index 00000000000..222c79fa5ae --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/README.md @@ -0,0 +1,28 @@ +# Resource Processor + +Supported pipeline types: metrics, traces, logs + +The resource processor can be used to apply changes on resource attributes. +Please refer to [config.go](./config.go) for the config spec. + +`attributes` represents actions that can be applied on resource attributes. +See processor/attributesprocessor/README.md for more details on supported attributes actions. + +Examples: + +```yaml +processors: + resource: + attributes: + - key: cloud.zone + value: "zone-1" + action: upsert + - key: k8s.cluster.name + from_attribute: k8s-cluster + action: insert + - key: redundant-attribute + action: delete +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. diff --git a/internal/otel_collector/processor/resourceprocessor/config.go b/internal/otel_collector/processor/resourceprocessor/config.go new file mode 100644 index 00000000000..3085241f7d8 --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/config.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourceprocessor + +import ( + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +// Config defines configuration for Resource processor. +type Config struct { + configmodels.ProcessorSettings `mapstructure:",squash"` + + // AttributesActions specifies the list of actions to be applied on resource attributes. + // The set of actions are {INSERT, UPDATE, UPSERT, DELETE, HASH, EXTRACT}. + AttributesActions []processorhelper.ActionKeyValue `mapstructure:"attributes"` + + // ResourceType field is deprecated. Set "opencensus.type" key in "attributes.upsert" map instead. + ResourceType string `mapstructure:"type"` + + // Deprecated: Use "attributes.upsert" instead. + Labels map[string]string `mapstructure:"labels"` +} diff --git a/internal/otel_collector/processor/resourceprocessor/config_test.go b/internal/otel_collector/processor/resourceprocessor/config_test.go new file mode 100644 index 00000000000..68dfffd5c3a --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/config_test.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourceprocessor + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factories.Processors[typeStr] = NewFactory() + + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + assert.NoError(t, err) + assert.NotNil(t, cfg) + + assert.Equal(t, cfg.Processors["resource"], &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "resource", + NameVal: "resource", + }, + AttributesActions: []processorhelper.ActionKeyValue{ + {Key: "cloud.zone", Value: "zone-1", Action: processorhelper.UPSERT}, + {Key: "k8s.cluster.name", FromAttribute: "k8s-cluster", Action: processorhelper.INSERT}, + {Key: "redundant-attribute", Action: processorhelper.DELETE}, + }, + }) + + assert.Equal(t, cfg.Processors["resource/invalid"], &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "resource", + NameVal: "resource/invalid", + }, + }) +} diff --git a/internal/otel_collector/processor/resourceprocessor/doc.go b/internal/otel_collector/processor/resourceprocessor/doc.go new file mode 100644 index 00000000000..7e350de1f07 --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resourceprocessor implements a processor for specifying resource +// labels to be added to OpenCensus trace data and metrics data. +package resourceprocessor diff --git a/internal/otel_collector/processor/resourceprocessor/factory.go b/internal/otel_collector/processor/resourceprocessor/factory.go new file mode 100644 index 00000000000..b76ef57398e --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/factory.go @@ -0,0 +1,141 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourceprocessor + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" + "go.opentelemetry.io/collector/translator/conventions" +) + +const ( + // The value of "type" key in configuration. + typeStr = "resource" +) + +var processorCapabilities = component.ProcessorCapabilities{MutatesConsumedData: true} + +// NewFactory returns a new factory for the Resource processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTraceProcessor), + processorhelper.WithMetrics(createMetricsProcessor), + processorhelper.WithLogs(createLogsProcessor)) +} + +// Note: This isn't a valid configuration because the processor would do no work. +func createDefaultConfig() configmodels.Processor { + return &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createTraceProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.TracesConsumer) (component.TracesProcessor, error) { + attrProc, err := createAttrProcessor(cfg.(*Config), params.Logger) + if err != nil { + return nil, err + } + return processorhelper.NewTraceProcessor( + cfg, + nextConsumer, + &resourceProcessor{attrProc: attrProc}, + processorhelper.WithCapabilities(processorCapabilities)) +} + +func createMetricsProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.MetricsConsumer) (component.MetricsProcessor, error) { + attrProc, err := createAttrProcessor(cfg.(*Config), params.Logger) + if err != nil { + return nil, err + } + return processorhelper.NewMetricsProcessor( + cfg, + nextConsumer, + &resourceProcessor{attrProc: attrProc}, + processorhelper.WithCapabilities(processorCapabilities)) +} + +func createLogsProcessor( + _ context.Context, + params component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.LogsConsumer) (component.LogsProcessor, error) { + attrProc, err := createAttrProcessor(cfg.(*Config), params.Logger) + if err != nil { + return nil, err + } + return processorhelper.NewLogsProcessor( + cfg, + nextConsumer, + &resourceProcessor{attrProc: attrProc}, + processorhelper.WithCapabilities(processorCapabilities)) +} + +func createAttrProcessor(cfg *Config, logger *zap.Logger) (*processorhelper.AttrProc, error) { + handleDeprecatedFields(cfg, logger) + if len(cfg.AttributesActions) == 0 { + return nil, fmt.Errorf("error creating \"%q\" processor due to missing required field \"attributes\"", cfg.Name()) + } + attrProc, err := processorhelper.NewAttrProc(&processorhelper.Settings{Actions: cfg.AttributesActions}) + if err != nil { + return nil, fmt.Errorf("error creating \"%q\" processor: %w", cfg.Name(), err) + } + return attrProc, nil +} + +// handleDeprecatedFields converts deprecated ResourceType and Labels fields into Attributes.Upsert +func handleDeprecatedFields(cfg *Config, logger *zap.Logger) { + + // Upsert value from deprecated ResourceType config to resource attributes with "opencensus.resourcetype" key + if cfg.ResourceType != "" { + logger.Warn("[DEPRECATED] \"type\" field is deprecated and will be removed in future release. " + + "Please set the value to \"attributes\" with key=opencensus.resourcetype and action=upsert.") + upsertResourceType := processorhelper.ActionKeyValue{ + Action: processorhelper.UPSERT, + Key: conventions.OCAttributeResourceType, + Value: cfg.ResourceType, + } + cfg.AttributesActions = append(cfg.AttributesActions, upsertResourceType) + } + + // Upsert values from deprecated Labels config to resource attributes + if len(cfg.Labels) > 0 { + logger.Warn("[DEPRECATED] \"labels\" field is deprecated and will be removed in future release. " + + "Please use \"attributes\" field instead.") + for k, v := range cfg.Labels { + action := processorhelper.ActionKeyValue{Action: processorhelper.UPSERT, Key: k, Value: v} + cfg.AttributesActions = append(cfg.AttributesActions, action) + } + } +} diff --git a/internal/otel_collector/processor/resourceprocessor/factory_test.go b/internal/otel_collector/processor/resourceprocessor/factory_test.go new file mode 100644 index 00000000000..8fe08c12f78 --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/factory_test.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourceprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, configcheck.ValidateConfig(cfg)) + assert.NotNil(t, cfg) +} + +func TestCreateProcessor(t *testing.T) { + factory := NewFactory() + cfg := &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "resource", + NameVal: "resource", + }, + AttributesActions: []processorhelper.ActionKeyValue{ + {Key: "cloud.zone", Value: "zone-1", Action: processorhelper.UPSERT}, + }, + } + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + assert.NoError(t, err) + assert.NotNil(t, tp) + + mp, err := factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewMetricsNop()) + assert.NoError(t, err) + assert.NotNil(t, mp) +} + +func TestInvalidEmptyActions(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + _, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + assert.Error(t, err) + + _, err = factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewMetricsNop()) + assert.Error(t, err) +} + +func TestInvalidAttributeActions(t *testing.T) { + factory := NewFactory() + cfg := &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "resource", + NameVal: "resource", + }, + AttributesActions: []processorhelper.ActionKeyValue{ + {Key: "k", Value: "v", Action: "invalid-action"}, + }, + } + + _, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, nil) + assert.Error(t, err) + + _, err = factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, nil) + assert.Error(t, err) +} + +func TestDeprecatedConfig(t *testing.T) { + cfg := &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "resource", + NameVal: "resource", + }, + ResourceType: "host", + Labels: map[string]string{ + "cloud.zone": "zone-1", + }, + } + + handleDeprecatedFields(cfg, zap.NewNop()) + + assert.EqualValues(t, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "resource", + NameVal: "resource", + }, + ResourceType: "host", + Labels: map[string]string{ + "cloud.zone": "zone-1", + }, + AttributesActions: []processorhelper.ActionKeyValue{ + {Key: "opencensus.resourcetype", Value: "host", Action: processorhelper.UPSERT}, + {Key: "cloud.zone", Value: "zone-1", Action: processorhelper.UPSERT}, + }, + }, cfg) +} diff --git a/internal/otel_collector/processor/resourceprocessor/resource_processor.go b/internal/otel_collector/processor/resourceprocessor/resource_processor.go new file mode 100644 index 00000000000..a9595f0b243 --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/resource_processor.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourceprocessor + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +type resourceProcessor struct { + attrProc *processorhelper.AttrProc +} + +// ProcessTraces implements the TProcessor interface +func (rp *resourceProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + resource := rss.At(i).Resource() + attrs := resource.Attributes() + rp.attrProc.Process(attrs) + } + return td, nil +} + +// ProcessMetrics implements the MProcessor interface +func (rp *resourceProcessor) ProcessMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + resource := rms.At(i).Resource() + if resource.Attributes().Len() == 0 { + resource.Attributes().InitEmptyWithCapacity(1) + } + rp.attrProc.Process(resource.Attributes()) + } + return md, nil +} + +// ProcessLogs implements the LProcessor interface +func (rp *resourceProcessor) ProcessLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { + rls := ld.ResourceLogs() + for i := 0; i < rls.Len(); i++ { + resource := rls.At(i).Resource() + attrs := resource.Attributes() + rp.attrProc.Process(attrs) + } + return ld, nil +} diff --git a/internal/otel_collector/processor/resourceprocessor/resource_processor_test.go b/internal/otel_collector/processor/resourceprocessor/resource_processor_test.go new file mode 100644 index 00000000000..bc9b14901ec --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/resource_processor_test.go @@ -0,0 +1,247 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourceprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +var ( + processorSettings = configmodels.ProcessorSettings{ + TypeVal: "resource", + NameVal: "resource", + } + + cfg = &Config{ + ProcessorSettings: processorSettings, + AttributesActions: []processorhelper.ActionKeyValue{ + {Key: "cloud.zone", Value: "zone-1", Action: processorhelper.UPSERT}, + {Key: "k8s.cluster.name", FromAttribute: "k8s-cluster", Action: processorhelper.INSERT}, + {Key: "redundant-attribute", Action: processorhelper.DELETE}, + }, + } +) + +func TestResourceProcessorAttributesUpsert(t *testing.T) { + tests := []struct { + name string + config *Config + sourceAttributes map[string]string + wantAttributes map[string]string + }{ + { + name: "config_with_attributes_applied_on_nil_resource", + config: cfg, + sourceAttributes: nil, + wantAttributes: map[string]string{ + "cloud.zone": "zone-1", + }, + }, + { + name: "config_with_attributes_applied_on_empty_resource", + config: cfg, + sourceAttributes: map[string]string{}, + wantAttributes: map[string]string{ + "cloud.zone": "zone-1", + }, + }, + { + name: "config_attributes_applied_on_existing_resource_attributes", + config: cfg, + sourceAttributes: map[string]string{ + "cloud.zone": "to-be-replaced", + "k8s-cluster": "test-cluster", + "redundant-attribute": "to-be-removed", + }, + wantAttributes: map[string]string{ + "cloud.zone": "zone-1", + "k8s-cluster": "test-cluster", + "k8s.cluster.name": "test-cluster", + }, + }, + { + name: "config_attributes_replacement", + config: &Config{ + ProcessorSettings: processorSettings, + AttributesActions: []processorhelper.ActionKeyValue{ + {Key: "k8s.cluster.name", FromAttribute: "k8s-cluster", Action: processorhelper.INSERT}, + {Key: "k8s-cluster", Action: processorhelper.DELETE}, + }, + }, + sourceAttributes: map[string]string{ + "k8s-cluster": "test-cluster", + }, + wantAttributes: map[string]string{ + "k8s.cluster.name": "test-cluster", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test trace consumer + ttn := &testTraceConsumer{} + + factory := NewFactory() + rtp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, tt.config, ttn) + require.NoError(t, err) + assert.True(t, rtp.GetCapabilities().MutatesConsumedData) + + sourceTraceData := generateTraceData(tt.sourceAttributes) + wantTraceData := generateTraceData(tt.wantAttributes) + err = rtp.ConsumeTraces(context.Background(), sourceTraceData) + require.NoError(t, err) + assert.EqualValues(t, wantTraceData, ttn.td) + + // Test metrics consumer + tmn := &testMetricsConsumer{} + rmp, err := factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{}, tt.config, tmn) + require.NoError(t, err) + assert.True(t, rtp.GetCapabilities().MutatesConsumedData) + + sourceMetricData := generateMetricData(tt.sourceAttributes) + wantMetricData := generateMetricData(tt.wantAttributes) + err = rmp.ConsumeMetrics(context.Background(), sourceMetricData) + require.NoError(t, err) + assert.EqualValues(t, wantMetricData, tmn.md) + + // Test logs consumer + tln := &testLogsConsumer{} + rlp, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, tt.config, tln) + require.NoError(t, err) + assert.True(t, rtp.GetCapabilities().MutatesConsumedData) + + sourceLogData := generateLogData(tt.sourceAttributes) + wantLogData := generateLogData(tt.wantAttributes) + err = rlp.ConsumeLogs(context.Background(), sourceLogData) + require.NoError(t, err) + assert.EqualValues(t, wantLogData, tln.ld) + }) + } +} + +func TestResourceProcessorError(t *testing.T) { + ttn := &testTraceConsumer{} + + badCfg := &Config{ + ProcessorSettings: processorSettings, + AttributesActions: nil, + } + + factory := NewFactory() + rtp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, badCfg, ttn) + require.Error(t, err) + require.Nil(t, rtp) + + // Test metrics consumer + tmn := &testMetricsConsumer{} + rmp, err := factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{}, badCfg, tmn) + require.Error(t, err) + require.Nil(t, rmp) + + // Test logs consumer + tln := &testLogsConsumer{} + rlp, err := factory.CreateLogsProcessor(context.Background(), component.ProcessorCreateParams{}, badCfg, tln) + require.Error(t, err) + require.Nil(t, rlp) +} + +func generateTraceData(attributes map[string]string) pdata.Traces { + td := testdata.GenerateTraceDataOneSpanNoResource() + if attributes == nil { + return td + } + resource := td.ResourceSpans().At(0).Resource() + for k, v := range attributes { + resource.Attributes().InsertString(k, v) + } + resource.Attributes().Sort() + return td +} + +func generateMetricData(attributes map[string]string) pdata.Metrics { + md := testdata.GenerateMetricsOneMetricNoResource() + if attributes == nil { + return md + } + resource := md.ResourceMetrics().At(0).Resource() + for k, v := range attributes { + resource.Attributes().InsertString(k, v) + } + resource.Attributes().Sort() + return md +} + +func generateLogData(attributes map[string]string) pdata.Logs { + ld := testdata.GenerateLogDataOneLogNoResource() + if attributes == nil { + return ld + } + resource := ld.ResourceLogs().At(0).Resource() + for k, v := range attributes { + resource.Attributes().InsertString(k, v) + } + resource.Attributes().Sort() + return ld +} + +type testTraceConsumer struct { + td pdata.Traces +} + +func (ttn *testTraceConsumer) ConsumeTraces(_ context.Context, td pdata.Traces) error { + // sort attributes to be able to compare traces + for i := 0; i < td.ResourceSpans().Len(); i++ { + td.ResourceSpans().At(i).Resource().Attributes().Sort() + } + ttn.td = td + return nil +} + +type testMetricsConsumer struct { + md pdata.Metrics +} + +func (tmn *testMetricsConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + // sort attributes to be able to compare traces + for i := 0; i < md.ResourceMetrics().Len(); i++ { + md.ResourceMetrics().At(i).Resource().Attributes().Sort() + } + tmn.md = md + return nil +} + +type testLogsConsumer struct { + ld pdata.Logs +} + +func (tln *testLogsConsumer) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + // sort attributes to be able to compare traces + for i := 0; i < ld.ResourceLogs().Len(); i++ { + ld.ResourceLogs().At(i).Resource().Attributes().Sort() + } + tln.ld = ld + return nil +} diff --git a/internal/otel_collector/processor/resourceprocessor/testdata/config.yaml b/internal/otel_collector/processor/resourceprocessor/testdata/config.yaml new file mode 100644 index 00000000000..30cff1fddd8 --- /dev/null +++ b/internal/otel_collector/processor/resourceprocessor/testdata/config.yaml @@ -0,0 +1,41 @@ +receivers: + examplereceiver: + +processors: + # The following specifies a resource configuration doing the changes on resource attributes: + # 1. Set "cloud.zone" attributes with "zone-1" value ignoring existing values. + # 2. Copy "k8s-cluster" attribute value to "k8s.cluster.name" attribute, nothing happens if "k8s-cluster" not found. + # 3. Remove "redundant-attribute" attribute. + # There are many more attribute modification actions supported, + # check processor/attributesprocessor/testdata/config.yaml for reference. + resource: + attributes: + - key: cloud.zone + value: zone-1 + action: upsert + - key: k8s.cluster.name + from_attribute: k8s-cluster + action: insert + - key: redundant-attribute + action: delete + # The following specifies an invalid resource configuration, it has to have at least one action set in attributes field. + resource/invalid: + + +exporters: + exampleexporter: + +service: + pipelines: + logs: + receivers: [examplereceiver] + processors: [resource] + exporters: [exampleexporter] + metrics: + receivers: [examplereceiver] + processors: [resource] + exporters: [exampleexporter] + traces: + receivers: [examplereceiver] + processors: [resource] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/README.md b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/README.md new file mode 100644 index 00000000000..31e593a1fb7 --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/README.md @@ -0,0 +1,33 @@ +# Probabilistic Sampling Processor + +Supported pipeline types: traces + +The probabilistic sampler supports two types of sampling: + +1. `sampling.priority` [semantic +convention](https://github.com/opentracing/specification/blob/master/semantic_conventions.md#span-tags-table) +as defined by OpenTracing +2. Trace ID hashing + +The `sampling.priority` semantic convention takes priority over trace ID hashing. As the name +implies, trace ID hashing samples based on hash values determined by trace IDs. In order for +trace ID hashing to work, all collectors for a given tier (e.g. behind the same load balancer) +must have the same `hash_seed`. It is also possible to leverage a different `hash_seed` at +different collector tiers to support additional sampling requirements. Please refer to +[config.go](./config.go) for the config spec. + +The following configuration options can be modified: +- `hash_seed` (no default): An integer used to compute the hash algorithm. Note that all collectors for a given tier (e.g. behind the same load balancer) should have the same hash_seed. +- `sampling_percentage` (default = 0): Percentage at which traces are sampled; >= 100 samples all traces + +Examples: + +```yaml +processors: + probabilistic_sampler: + hash_seed: 22 + sampling_percentage: 15.3 +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/config.go b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/config.go new file mode 100644 index 00000000000..12a2407b0af --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/config.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package probabilisticsamplerprocessor + +import "go.opentelemetry.io/collector/config/configmodels" + +// Config has the configuration guiding the trace sampler processor. +type Config struct { + configmodels.ProcessorSettings `mapstructure:",squash"` + // SamplingPercentage is the percentage rate at which traces are going to be sampled. Defaults to zero, i.e.: no sample. + // Values greater or equal 100 are treated as "sample all traces". + SamplingPercentage float32 `mapstructure:"sampling_percentage"` + // HashSeed allows one to configure the hashing seed. This is important in scenarios where multiple layers of collectors + // have different sampling rates: if they use the same seed all passing one layer may pass the other even if they have + // different sampling rates, configuring different seeds avoids that. + HashSeed uint32 `mapstructure:"hash_seed"` +} diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/config_test.go b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/config_test.go new file mode 100644 index 00000000000..6a09b4bf4f5 --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/config_test.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package probabilisticsamplerprocessor + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Processors[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.Nil(t, err) + require.NotNil(t, cfg) + + p0 := cfg.Processors["probabilistic_sampler"] + assert.Equal(t, p0, + &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: "probabilistic_sampler", + NameVal: "probabilistic_sampler", + }, + SamplingPercentage: 15.3, + HashSeed: 22, + }) + +} + +func TestLoadConfigEmpty(t *testing.T) { + factories, err := componenttest.ExampleComponents() + require.NoError(t, err) + factories.Processors, err = component.MakeProcessorFactoryMap(NewFactory()) + require.NotNil(t, factories.Processors) + require.NoError(t, err) + + config, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "empty.yaml"), factories) + + require.Nil(t, err) + require.NotNil(t, config) + p0 := config.Processors["probabilistic_sampler"] + assert.Equal(t, p0, createDefaultConfig()) +} diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/factory.go b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/factory.go new file mode 100644 index 00000000000..f8af5fc3447 --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/factory.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package probabilisticsamplerprocessor + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // The value of "type" trace-samplers in configuration. + typeStr = "probabilistic_sampler" +) + +// NewFactory returns a new factory for the Probabilistic sampler processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTraceProcessor)) +} + +func createDefaultConfig() configmodels.Processor { + return &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +// CreateTracesProcessor creates a trace processor based on this config. +func createTraceProcessor( + _ context.Context, + _ component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.TracesConsumer, +) (component.TracesProcessor, error) { + oCfg := cfg.(*Config) + return newTraceProcessor(nextConsumer, *oCfg) +} diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/factory_test.go b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/factory_test.go new file mode 100644 index 00000000000..3f39318e456 --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/factory_test.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package probabilisticsamplerprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateProcessor(t *testing.T) { + cfg := createDefaultConfig() + params := component.ProcessorCreateParams{Logger: zap.NewNop()} + tp, err := createTraceProcessor(context.Background(), params, cfg, consumertest.NewTracesNop()) + assert.NotNil(t, tp) + assert.NoError(t, err, "cannot create trace processor") +} diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/probabilisticsampler.go b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/probabilisticsampler.go new file mode 100644 index 00000000000..2f97912fe77 --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/probabilisticsampler.go @@ -0,0 +1,234 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package probabilisticsamplerprocessor + +import ( + "context" + "strconv" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// samplingPriority has the semantic result of parsing the "sampling.priority" +// attribute per OpenTracing semantic conventions. +type samplingPriority int + +const ( + // deferDecision means that the decision if a span will be "sampled" (ie.: + // forwarded by the collector) is made by hashing the trace ID according + // to the configured sampling rate. + deferDecision samplingPriority = iota + // mustSampleSpan indicates that the span had a "sampling.priority" attribute + // greater than zero and it is going to be sampled, ie.: forwarded by the + // collector. + mustSampleSpan + // doNotSampleSpan indicates that the span had a "sampling.priority" attribute + // equal zero and it is NOT going to be sampled, ie.: it won't be forwarded + // by the collector. + doNotSampleSpan + + // The constants help translate user friendly percentages to numbers direct used in sampling. + numHashBuckets = 0x4000 // Using a power of 2 to avoid division. + bitMaskHashBuckets = numHashBuckets - 1 + percentageScaleFactor = numHashBuckets / 100.0 +) + +type tracesamplerprocessor struct { + nextConsumer consumer.TracesConsumer + scaledSamplingRate uint32 + hashSeed uint32 +} + +// newTraceProcessor returns a processor.TracesProcessor that will perform head sampling according to the given +// configuration. +func newTraceProcessor(nextConsumer consumer.TracesConsumer, cfg Config) (component.TracesProcessor, error) { + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + return &tracesamplerprocessor{ + nextConsumer: nextConsumer, + // Adjust sampling percentage on private so recalculations are avoided. + scaledSamplingRate: uint32(cfg.SamplingPercentage * percentageScaleFactor), + hashSeed: cfg.HashSeed, + }, nil +} + +func (tsp *tracesamplerprocessor) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + rspans := td.ResourceSpans() + sampledTraceData := pdata.NewTraces() + for i := 0; i < rspans.Len(); i++ { + tsp.processTraces(rspans.At(i), sampledTraceData) + } + return tsp.nextConsumer.ConsumeTraces(ctx, sampledTraceData) +} + +func (tsp *tracesamplerprocessor) processTraces(resourceSpans pdata.ResourceSpans, sampledTraceData pdata.Traces) { + scaledSamplingRate := tsp.scaledSamplingRate + + sampledTraceData.ResourceSpans().Resize(sampledTraceData.ResourceSpans().Len() + 1) + rs := sampledTraceData.ResourceSpans().At(sampledTraceData.ResourceSpans().Len() - 1) + resourceSpans.Resource().CopyTo(rs.Resource()) + rs.InstrumentationLibrarySpans().Resize(1) + spns := rs.InstrumentationLibrarySpans().At(0).Spans() + + ilss := resourceSpans.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + ils := ilss.At(j) + for k := 0; k < ils.Spans().Len(); k++ { + span := ils.Spans().At(k) + sp := parseSpanSamplingPriority(span) + if sp == doNotSampleSpan { + // The OpenTelemetry mentions this as a "hint" we take a stronger + // approach and do not sample the span since some may use it to + // remove specific spans from traces. + continue + } + + // If one assumes random trace ids hashing may seems avoidable, however, traces can be coming from sources + // with various different criteria to generate trace id and perhaps were already sampled without hashing. + // Hashing here prevents bias due to such systems. + tidBytes := span.TraceID().Bytes() + sampled := sp == mustSampleSpan || + hash(tidBytes[:], tsp.hashSeed)&bitMaskHashBuckets < scaledSamplingRate + + if sampled { + spns.Append(span) + } + } + } +} + +func (tsp *tracesamplerprocessor) GetCapabilities() component.ProcessorCapabilities { + return component.ProcessorCapabilities{MutatesConsumedData: false} +} + +// Start is invoked during service startup. +func (tsp *tracesamplerprocessor) Start(context.Context, component.Host) error { + return nil +} + +// Shutdown is invoked during service shutdown. +func (tsp *tracesamplerprocessor) Shutdown(context.Context) error { + return nil +} + +// parseSpanSamplingPriority checks if the span has the "sampling.priority" tag to +// decide if the span should be sampled or not. The usage of the tag follows the +// OpenTracing semantic tags: +// https://github.com/opentracing/specification/blob/master/semantic_conventions.md#span-tags-table +func parseSpanSamplingPriority(span pdata.Span) samplingPriority { + attribMap := span.Attributes() + if attribMap.Len() <= 0 { + return deferDecision + } + + samplingPriorityAttrib, ok := attribMap.Get("sampling.priority") + if !ok { + return deferDecision + } + + // By default defer the decision. + decision := deferDecision + + // Try check for different types since there are various client libraries + // using different conventions regarding "sampling.priority". Besides the + // client libraries it is also possible that the type was lost in translation + // between different formats. + switch samplingPriorityAttrib.Type() { + case pdata.AttributeValueINT: + value := samplingPriorityAttrib.IntVal() + if value == 0 { + decision = doNotSampleSpan + } else if value > 0 { + decision = mustSampleSpan + } + case pdata.AttributeValueDOUBLE: + value := samplingPriorityAttrib.DoubleVal() + if value == 0.0 { + decision = doNotSampleSpan + } else if value > 0.0 { + decision = mustSampleSpan + } + case pdata.AttributeValueSTRING: + attribVal := samplingPriorityAttrib.StringVal() + if value, err := strconv.ParseFloat(attribVal, 64); err == nil { + if value == 0.0 { + decision = doNotSampleSpan + } else if value > 0.0 { + decision = mustSampleSpan + } + } + } + + return decision +} + +// hash is a murmur3 hash function, see http://en.wikipedia.org/wiki/MurmurHash +func hash(key []byte, seed uint32) (hash uint32) { + const ( + c1 = 0xcc9e2d51 + c2 = 0x1b873593 + c3 = 0x85ebca6b + c4 = 0xc2b2ae35 + r1 = 15 + r2 = 13 + m = 5 + n = 0xe6546b64 + ) + + hash = seed + iByte := 0 + for ; iByte+4 <= len(key); iByte += 4 { + k := uint32(key[iByte]) | uint32(key[iByte+1])<<8 | uint32(key[iByte+2])<<16 | uint32(key[iByte+3])<<24 + k *= c1 + k = (k << r1) | (k >> (32 - r1)) + k *= c2 + hash ^= k + hash = (hash << r2) | (hash >> (32 - r2)) + hash = hash*m + n + } + + // TraceId and SpanId have lengths that are multiple of 4 so the code below is never expected to + // be hit when sampling traces. However, it is preserved here to keep it as a correct murmur3 implementation. + // This is enforced via tests. + var remainingBytes uint32 + switch len(key) - iByte { + case 3: + remainingBytes += uint32(key[iByte+2]) << 16 + fallthrough + case 2: + remainingBytes += uint32(key[iByte+1]) << 8 + fallthrough + case 1: + remainingBytes += uint32(key[iByte]) + remainingBytes *= c1 + remainingBytes = (remainingBytes << r1) | (remainingBytes >> (32 - r1)) + remainingBytes *= c2 + hash ^= remainingBytes + } + + hash ^= uint32(len(key)) + hash ^= hash >> 16 + hash *= c3 + hash ^= hash >> 13 + hash *= c4 + hash ^= hash >> 16 + + return +} diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/probabilisticsampler_test.go b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/probabilisticsampler_test.go new file mode 100644 index 00000000000..e84ac94249d --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/probabilisticsampler_test.go @@ -0,0 +1,506 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package probabilisticsamplerprocessor + +import ( + "context" + "math" + "math/rand" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func TestNewTraceProcessor(t *testing.T) { + tests := []struct { + name string + nextConsumer consumer.TracesConsumer + cfg Config + want component.TracesProcessor + wantErr bool + }{ + { + name: "nil_nextConsumer", + wantErr: true, + }, + { + name: "happy_path", + nextConsumer: consumertest.NewTracesNop(), + cfg: Config{ + SamplingPercentage: 15.5, + }, + want: &tracesamplerprocessor{ + nextConsumer: consumertest.NewTracesNop(), + }, + }, + { + name: "happy_path_hash_seed", + nextConsumer: consumertest.NewTracesNop(), + cfg: Config{ + SamplingPercentage: 13.33, + HashSeed: 4321, + }, + want: &tracesamplerprocessor{ + nextConsumer: consumertest.NewTracesNop(), + hashSeed: 4321, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if !tt.wantErr { + // The truncation below with uint32 cannot be defined at initialization (compiler error), performing it at runtime. + tt.want.(*tracesamplerprocessor).scaledSamplingRate = uint32(tt.cfg.SamplingPercentage * percentageScaleFactor) + } + got, err := newTraceProcessor(tt.nextConsumer, tt.cfg) + if (err != nil) != tt.wantErr { + t.Errorf("newTraceProcessor() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("newTraceProcessor() = %v, want %v", got, tt.want) + } + }) + } +} + +// Test_tracesamplerprocessor_SamplingPercentageRange checks for different sampling rates and ensures +// that they are within acceptable deltas. +func Test_tracesamplerprocessor_SamplingPercentageRange(t *testing.T) { + tests := []struct { + name string + cfg Config + numBatches int + numTracesPerBatch int + acceptableDelta float64 + }{ + { + name: "random_sampling_tiny", + cfg: Config{ + SamplingPercentage: 0.03, + }, + numBatches: 1e5, + numTracesPerBatch: 2, + acceptableDelta: 0.01, + }, + { + name: "random_sampling_small", + cfg: Config{ + SamplingPercentage: 5, + }, + numBatches: 1e5, + numTracesPerBatch: 2, + acceptableDelta: 0.01, + }, + { + name: "random_sampling_medium", + cfg: Config{ + SamplingPercentage: 50.0, + }, + numBatches: 1e5, + numTracesPerBatch: 4, + acceptableDelta: 0.1, + }, + { + name: "random_sampling_high", + cfg: Config{ + SamplingPercentage: 90.0, + }, + numBatches: 1e5, + numTracesPerBatch: 1, + acceptableDelta: 0.2, + }, + { + name: "random_sampling_all", + cfg: Config{ + SamplingPercentage: 100.0, + }, + numBatches: 1e5, + numTracesPerBatch: 1, + acceptableDelta: 0.0, + }, + } + const testSvcName = "test-svc" + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := new(consumertest.TracesSink) + tsp, err := newTraceProcessor(sink, tt.cfg) + if err != nil { + t.Errorf("error when creating tracesamplerprocessor: %v", err) + return + } + for _, td := range genRandomTestData(tt.numBatches, tt.numTracesPerBatch, testSvcName, 1) { + if err := tsp.ConsumeTraces(context.Background(), td); err != nil { + t.Errorf("tracesamplerprocessor.ConsumeTraceData() error = %v", err) + return + } + } + _, sampled := assertSampledData(t, sink.AllTraces(), testSvcName) + actualPercentageSamplingPercentage := float32(sampled) / float32(tt.numBatches*tt.numTracesPerBatch) * 100.0 + delta := math.Abs(float64(actualPercentageSamplingPercentage - tt.cfg.SamplingPercentage)) + if delta > tt.acceptableDelta { + t.Errorf( + "got %f percentage sampling rate, want %f (allowed delta is %f but got %f)", + actualPercentageSamplingPercentage, + tt.cfg.SamplingPercentage, + tt.acceptableDelta, + delta, + ) + } + }) + } +} + +// Test_tracesamplerprocessor_SamplingPercentageRange_MultipleResourceSpans checks for number of spans sent to xt consumer. This is to avoid duplicate spans +func Test_tracesamplerprocessor_SamplingPercentageRange_MultipleResourceSpans(t *testing.T) { + tests := []struct { + name string + cfg Config + numBatches int + numTracesPerBatch int + acceptableDelta float64 + resourceSpanPerTrace int + }{ + { + name: "single_batch_single_trace_two_resource_spans", + cfg: Config{ + SamplingPercentage: 100.0, + }, + numBatches: 1, + numTracesPerBatch: 1, + acceptableDelta: 0.0, + resourceSpanPerTrace: 2, + }, + { + name: "single_batch_two_traces_two_resource_spans", + cfg: Config{ + SamplingPercentage: 100.0, + }, + numBatches: 1, + numTracesPerBatch: 2, + acceptableDelta: 0.0, + resourceSpanPerTrace: 2, + }, + } + const testSvcName = "test-svc" + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := new(consumertest.TracesSink) + tsp, err := newTraceProcessor(sink, tt.cfg) + if err != nil { + t.Errorf("error when creating tracesamplerprocessor: %v", err) + return + } + + for _, td := range genRandomTestData(tt.numBatches, tt.numTracesPerBatch, testSvcName, tt.resourceSpanPerTrace) { + if err := tsp.ConsumeTraces(context.Background(), td); err != nil { + t.Errorf("tracesamplerprocessor.ConsumeTraceData() error = %v", err) + return + } + assert.Equal(t, tt.resourceSpanPerTrace*tt.numTracesPerBatch, sink.SpansCount()) + sink.Reset() + } + + }) + } +} + +// Test_tracesamplerprocessor_SpanSamplingPriority checks if handling of "sampling.priority" is correct. +func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { + singleSpanWithAttrib := func(key string, attribValue pdata.AttributeValue) pdata.Traces { + traces := pdata.NewTraces() + traces.ResourceSpans().Resize(1) + rs := traces.ResourceSpans().At(0) + rs.InstrumentationLibrarySpans().Resize(1) + instrLibrarySpans := rs.InstrumentationLibrarySpans().At(0) + instrLibrarySpans.Spans().Append(getSpanWithAttributes(key, attribValue)) + return traces + } + tests := []struct { + name string + cfg Config + td pdata.Traces + sampled bool + }{ + { + name: "must_sample", + cfg: Config{ + SamplingPercentage: 0.0, + }, + td: singleSpanWithAttrib( + "sampling.priority", + pdata.NewAttributeValueInt(2)), + sampled: true, + }, + { + name: "must_sample_double", + cfg: Config{ + SamplingPercentage: 0.0, + }, + td: singleSpanWithAttrib( + "sampling.priority", + pdata.NewAttributeValueDouble(1)), + sampled: true, + }, + { + name: "must_sample_string", + cfg: Config{ + SamplingPercentage: 0.0, + }, + td: singleSpanWithAttrib( + "sampling.priority", + pdata.NewAttributeValueString("1")), + sampled: true, + }, + { + name: "must_not_sample", + cfg: Config{ + SamplingPercentage: 100.0, + }, + td: singleSpanWithAttrib( + "sampling.priority", + pdata.NewAttributeValueInt(0)), + }, + { + name: "must_not_sample_double", + cfg: Config{ + SamplingPercentage: 100.0, + }, + td: singleSpanWithAttrib( + "sampling.priority", + pdata.NewAttributeValueDouble(0)), + }, + { + name: "must_not_sample_string", + cfg: Config{ + SamplingPercentage: 100.0, + }, + td: singleSpanWithAttrib( + "sampling.priority", + pdata.NewAttributeValueString("0")), + }, + { + name: "defer_sample_expect_not_sampled", + cfg: Config{ + SamplingPercentage: 0.0, + }, + td: singleSpanWithAttrib( + "no.sampling.priority", + pdata.NewAttributeValueInt(2)), + }, + { + name: "defer_sample_expect_sampled", + cfg: Config{ + SamplingPercentage: 100.0, + }, + td: singleSpanWithAttrib( + "no.sampling.priority", + pdata.NewAttributeValueInt(2)), + sampled: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := new(consumertest.TracesSink) + tsp, err := newTraceProcessor(sink, tt.cfg) + require.NoError(t, err) + + err = tsp.ConsumeTraces(context.Background(), tt.td) + require.NoError(t, err) + + sampledData := sink.AllTraces() + require.Equal(t, 1, len(sampledData)) + assert.Equal(t, tt.sampled, sink.SpansCount() == 1) + }) + } +} + +// Test_parseSpanSamplingPriority ensures that the function parsing the attributes is taking "sampling.priority" +// attribute correctly. +func Test_parseSpanSamplingPriority(t *testing.T) { + tests := []struct { + name string + span pdata.Span + want samplingPriority + }{ + { + name: "nil_span", + span: pdata.NewSpan(), + want: deferDecision, + }, + { + name: "nil_attributes", + span: pdata.NewSpan(), + want: deferDecision, + }, + { + name: "no_sampling_priority", + span: getSpanWithAttributes("key", pdata.NewAttributeValueBool(true)), + want: deferDecision, + }, + { + name: "sampling_priority_int_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueInt(0)), + want: doNotSampleSpan, + }, + { + name: "sampling_priority_int_gt_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueInt(1)), + want: mustSampleSpan, + }, + { + name: "sampling_priority_int_lt_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueInt(-1)), + want: deferDecision, + }, + { + name: "sampling_priority_double_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueDouble(0)), + want: doNotSampleSpan, + }, + { + name: "sampling_priority_double_gt_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueDouble(1)), + want: mustSampleSpan, + }, + { + name: "sampling_priority_double_lt_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueDouble(-1)), + want: deferDecision, + }, + { + name: "sampling_priority_string_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueString("0.0")), + want: doNotSampleSpan, + }, + { + name: "sampling_priority_string_gt_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueString("0.5")), + want: mustSampleSpan, + }, + { + name: "sampling_priority_string_lt_zero", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueString("-0.5")), + want: deferDecision, + }, + { + name: "sampling_priority_string_NaN", + span: getSpanWithAttributes("sampling.priority", pdata.NewAttributeValueString("NaN")), + want: deferDecision, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, parseSpanSamplingPriority(tt.span)) + }) + } +} + +func getSpanWithAttributes(key string, value pdata.AttributeValue) pdata.Span { + span := pdata.NewSpan() + span.SetName("spanName") + span.Attributes().InitFromMap(map[string]pdata.AttributeValue{key: value}) + return span +} + +// Test_hash ensures that the hash function supports different key lengths even if in +// practice it is only expected to receive keys with length 16 (trace id length in OC proto). +func Test_hash(t *testing.T) { + // Statistically a random selection of such small number of keys should not result in + // collisions, but, of course it is possible that they happen, a different random source + // should avoid that. + r := rand.New(rand.NewSource(1)) + fullKey := tracetranslator.UInt64ToByteTraceID(r.Uint64(), r.Uint64()) + seen := make(map[uint32]bool) + for i := 1; i <= len(fullKey); i++ { + key := fullKey[:i] + hash := hash(key, 1) + require.False(t, seen[hash], "Unexpected duplicated hash") + seen[hash] = true + } +} + +// genRandomTestData generates a slice of consumerdata.TraceData with the numBatches elements which one with +// numTracesPerBatch spans (ie.: each span has a different trace ID). All spans belong to the specified +// serviceName. +func genRandomTestData(numBatches, numTracesPerBatch int, serviceName string, resourceSpanCount int) (tdd []pdata.Traces) { + r := rand.New(rand.NewSource(1)) + var traceBatches []pdata.Traces + for i := 0; i < numBatches; i++ { + traces := pdata.NewTraces() + traces.ResourceSpans().Resize(resourceSpanCount) + for j := 0; j < resourceSpanCount; j++ { + rs := traces.ResourceSpans().At(j) + rs.Resource().Attributes().InsertString("service.name", serviceName) + rs.Resource().Attributes().InsertBool("bool", true) + rs.Resource().Attributes().InsertString("string", "yes") + rs.Resource().Attributes().InsertInt("int64", 10000000) + rs.InstrumentationLibrarySpans().Resize(1) + ils := rs.InstrumentationLibrarySpans().At(0) + ils.Spans().Resize(numTracesPerBatch) + + for k := 0; k < numTracesPerBatch; k++ { + span := ils.Spans().At(k) + span.SetTraceID(tracetranslator.UInt64ToTraceID(r.Uint64(), r.Uint64())) + span.SetSpanID(tracetranslator.UInt64ToSpanID(r.Uint64())) + attributes := make(map[string]pdata.AttributeValue) + attributes[tracetranslator.TagHTTPStatusCode] = pdata.NewAttributeValueInt(404) + attributes[tracetranslator.TagHTTPStatusMsg] = pdata.NewAttributeValueString("Not Found") + span.Attributes().InitFromMap(attributes) + } + } + traceBatches = append(traceBatches, traces) + } + + return traceBatches +} + +// assertSampledData checks for no repeated traceIDs and counts the number of spans on the sampled data for +// the given service. +func assertSampledData(t *testing.T, sampled []pdata.Traces, serviceName string) (traceIDs map[[16]byte]bool, spanCount int) { + traceIDs = make(map[[16]byte]bool) + for _, td := range sampled { + rspans := td.ResourceSpans() + for i := 0; i < rspans.Len(); i++ { + rspan := rspans.At(i) + ilss := rspan.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + ils := ilss.At(j) + if svcNameAttr, _ := rspan.Resource().Attributes().Get("service.name"); svcNameAttr.StringVal() != serviceName { + continue + } + for k := 0; k < ils.Spans().Len(); k++ { + spanCount++ + span := ils.Spans().At(k) + key := span.TraceID().Bytes() + if traceIDs[key] { + t.Errorf("same traceID used more than once %q", key) + return + } + traceIDs[key] = true + } + } + } + } + return +} diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/testdata/config.yaml b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/testdata/config.yaml new file mode 100644 index 00000000000..4ab26d911d0 --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/testdata/config.yaml @@ -0,0 +1,35 @@ +receivers: + examplereceiver: + +processors: + # The probabilistic_sampler sets trace sampling by hashing the trace id of + # each span and making the sampling decision based on the hashed value. It + # also implements the "sampling.priority" semantic convention as defined by + # OpenTracing. See + # https://github.com/opentracing/specification/blob/master/semantic_conventions.md#span-tags-table + # The "sampling.priority" semantics have priority over trace id hashing and + # can be used to control if given spans are sampled, ie.: forwarded, or not. + probabilistic_sampler: + # the percentage rate at which traces are going to be sampled. Defaults to + # zero, i.e.: no sample. Values greater or equal 100 are treated as + # "sample all traces". + sampling_percentage: 15.3 + # hash_seed allows one to configure the hashing seed. This is important in + # scenarios where multiple layers of collectors are used to achieve the + # desired sampling rate, eg.: 10% on first layer and 10% on the + # second, resulting in an overall sampling rate of 1% (10% x 10%). + # If all layers use the same seed, all data passing one layer will also pass + # the next one, independent of the configured sampling rate. Having different + # seeds at different layers ensures that sampling rate in each layer work as + # intended. + hash_seed: 22 + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [probabilistic_sampler] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/testdata/empty.yaml b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/testdata/empty.yaml new file mode 100644 index 00000000000..0123346ee2f --- /dev/null +++ b/internal/otel_collector/processor/samplingprocessor/probabilisticsamplerprocessor/testdata/empty.yaml @@ -0,0 +1,16 @@ +receivers: + examplereceiver: + +processors: + probabilistic_sampler: + + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [probabilistic_sampler] + exporters: [exampleexporter] diff --git a/internal/otel_collector/processor/spanprocessor/README.md b/internal/otel_collector/processor/spanprocessor/README.md new file mode 100644 index 00000000000..21fceacb579 --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/README.md @@ -0,0 +1,101 @@ +# Span Processor + +Supported pipeline types: traces + +The span processor modifies either the span name or attributes of a span based +on the span name. Please refer to +[config.go](./config.go) for the config spec. + +It optionally supports the ability to [include/exclude spans](../README.md#includeexclude-spans). + +The following actions are supported: + +- `name`: Modify the name of attributes within a span + +### Name a span + +The following settings are required: + +- `from_attributes`: The attribute value for the keys are used to create a +new name in the order specified in the configuration. + +The following settings can be optionally configured: + +- `separator`: A string, which is specified will be used to split values + +Note: If renaming is dependent on attributes being modified by the `attributes` +processor, ensure the `span` processor is specified after the `attributes` +processor in the `pipeline` specification. + +```yaml +span: + name: + # from_attributes represents the attribute keys to pull the values from to generate the + # new span name. + from_attributes: [, , ...] + # Separator is the string used to concatenate various parts of the span name. + separator: +``` + +Example: + +```yaml +span: + name: + from_attributes: ["db.svc", "operation"] + separator: "::" +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. + +### Extract attributes from span name + +Takes a list of regular expressions to match span name against and extract +attributes from it based on subexpressions. Must be specified under the +`to_attributes` section. + +The following settings are required: + +- `rules`: A list of rules to extract attribute values from span name. The values +in the span name are replaced by extracted attribute names. Each rule in the list +is regex pattern string. Span name is checked against the regex and if the regex +matches then all named subexpressions of the regex are extracted as attributes +and are added to the span. Each subexpression name becomes an attribute name and +subexpression matched portion becomes the attribute value. The matched portion +in the span name is replaced by extracted attribute name. If the attributes +already exist in the span then they will be overwritten. The process is repeated +for all rules in the order they are specified. Each subsequent rule works on the +span name that is the output after processing the previous rule. +- `break_after_match` (default = false): specifies if processing of rules should stop after the first +match. If it is false rule processing will continue to be performed over the +modified span name. + +```yaml +span/to_attributes: + name: + to_attributes: + rules: + - regexp-rule1 + - regexp-rule2 + - regexp-rule3 + ... + break_after_match: + +``` + +Example: + +```yaml +# Let's assume input span name is /api/v1/document/12345678/update +# Applying the following results in output span name /api/v1/document/{documentId}/update +# and will add a new attribute "documentId"="12345678" to the span. +span/to_attributes: + name: + to_attributes: + rules: + - ^\/api\/v1\/document\/(?P.*)\/update$ +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. diff --git a/internal/otel_collector/processor/spanprocessor/config.go b/internal/otel_collector/processor/spanprocessor/config.go new file mode 100644 index 00000000000..eadd9f6f904 --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/config.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanprocessor + +import ( + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/internal/processor/filterconfig" +) + +// Config is the configuration for the span processor. +// Prior to any actions being applied, each span is compared against +// the include properties and then the exclude properties if they are specified. +// This determines if a span is to be processed or not. +type Config struct { + configmodels.ProcessorSettings `mapstructure:",squash"` + + filterconfig.MatchConfig `mapstructure:",squash"` + + // Rename specifies the components required to re-name a span. + // The `from_attributes` field needs to be set for this processor to be properly + // configured. + // Note: The field name is `Rename` to avoid collision with the Name() method + // from configmodels.ProcessorSettings.NamedEntity + Rename Name `mapstructure:"name"` +} + +// Name specifies the attributes to use to re-name a span. +type Name struct { + // Specifies transformations of span name to and from attributes. + // First FromAttributes rules are applied, then ToAttributes are applied. + // At least one of these 2 fields must be set. + + // FromAttributes represents the attribute keys to pull the values from to + // generate the new span name. All attribute keys are required in the span + // to re-name a span. If any attribute is missing from the span, no re-name + // will occur. + // Note: The new span name is constructed in order of the `from_attributes` + // specified in the configuration. This field is required and cannot be empty. + FromAttributes []string `mapstructure:"from_attributes"` + + // Separator is the string used to separate attributes values in the new + // span name. If no value is set, no separator is used between attribute + // values. Used with FromAttributes only. + Separator string `mapstructure:"separator"` + + // ToAttributes specifies a configuration to extract attributes from span name. + ToAttributes *ToAttributes `mapstructure:"to_attributes"` +} + +type ToAttributes struct { + // Rules is a list of rules to extract attribute values from span name. The values + // in the span name are replaced by extracted attribute names. Each rule in the list + // is a regex pattern string. Span name is checked against the regex. If it matches + // then all named subexpressions of the regex are extracted as attributes + // and are added to the span. Each subexpression name becomes an attribute name and + // subexpression matched portion becomes the attribute value. The matched portion + // in the span name is replaced by extracted attribute name. If the attributes + // already exist in the span then they will be overwritten. The process is repeated + // for all rules in the order they are specified. Each subsequent rule works on the + // span name that is the output after processing the previous rule. + Rules []string `mapstructure:"rules"` + + // BreakAfterMatch specifies if processing of rules should stop after the first + // match. If it is false rule processing will continue to be performed over the + // modified span name. + BreakAfterMatch bool `mapstructure:"break_after_match"` +} diff --git a/internal/otel_collector/processor/spanprocessor/config_test.go b/internal/otel_collector/processor/spanprocessor/config_test.go new file mode 100644 index 00000000000..b0fe7cd6566 --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/config_test.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanprocessor + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Processors[typeStr] = factory + + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + assert.NoError(t, err) + assert.NotNil(t, cfg) + + p0 := cfg.Processors["span/custom"] + assert.Equal(t, p0, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: "span/custom", + }, + Rename: Name{ + FromAttributes: []string{"db.svc", "operation", "id"}, + Separator: "::", + }, + }) + + p1 := cfg.Processors["span/no-separator"] + assert.Equal(t, p1, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: "span/no-separator", + }, + Rename: Name{ + FromAttributes: []string{"db.svc", "operation", "id"}, + Separator: "", + }, + }) + + p2 := cfg.Processors["span/to_attributes"] + assert.Equal(t, p2, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: "span/to_attributes", + }, + Rename: Name{ + ToAttributes: &ToAttributes{ + Rules: []string{`^\/api\/v1\/document\/(?P.*)\/update$`}, + }, + }, + }) + + p3 := cfg.Processors["span/includeexclude"] + assert.Equal(t, p3, &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: "span/includeexclude", + }, + MatchConfig: filterconfig.MatchConfig{ + Include: &filterconfig.MatchProperties{ + Config: *createMatchConfig(filterset.Regexp), + Services: []string{`banks`}, + SpanNames: []string{"^(.*?)/(.*?)$"}, + }, + Exclude: &filterconfig.MatchProperties{ + Config: *createMatchConfig(filterset.Strict), + SpanNames: []string{`donot/change`}, + }, + }, + Rename: Name{ + ToAttributes: &ToAttributes{ + Rules: []string{`(?P.*?)$`}, + }, + }, + }) +} + +func createMatchConfig(matchType filterset.MatchType) *filterset.Config { + return &filterset.Config{ + MatchType: matchType, + } +} diff --git a/internal/otel_collector/processor/spanprocessor/doc.go b/internal/otel_collector/processor/spanprocessor/doc.go new file mode 100644 index 00000000000..2086444e613 --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package spanprocessor contains logic to modify top level settings of a span, such +// as its name. +package spanprocessor diff --git a/internal/otel_collector/processor/spanprocessor/factory.go b/internal/otel_collector/processor/spanprocessor/factory.go new file mode 100644 index 00000000000..0d835953216 --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/factory.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanprocessor + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // typeStr is the value of "type" Span processor in the configuration. + typeStr = "span" +) + +var processorCapabilities = component.ProcessorCapabilities{MutatesConsumedData: true} + +// errMissingRequiredField is returned when a required field in the config +// is not specified. +// TODO https://github.com/open-telemetry/opentelemetry-collector/issues/215 +// Move this to the error package that allows for span name and field to be specified. +var errMissingRequiredField = errors.New("error creating \"span\" processor: either \"from_attributes\" or \"to_attributes\" must be specified in \"name:\"") + +// NewFactory returns a new factory for the Span processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTraceProcessor)) +} + +func createDefaultConfig() configmodels.Processor { + return &Config{ + ProcessorSettings: configmodels.ProcessorSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createTraceProcessor( + _ context.Context, + _ component.ProcessorCreateParams, + cfg configmodels.Processor, + nextConsumer consumer.TracesConsumer, +) (component.TracesProcessor, error) { + + // 'from_attributes' or 'to_attributes' under 'name' has to be set for the span + // processor to be valid. If not set and not enforced, the processor would do no work. + oCfg := cfg.(*Config) + if len(oCfg.Rename.FromAttributes) == 0 && + (oCfg.Rename.ToAttributes == nil || len(oCfg.Rename.ToAttributes.Rules) == 0) { + return nil, errMissingRequiredField + } + + sp, err := newSpanProcessor(*oCfg) + if err != nil { + return nil, err + } + return processorhelper.NewTraceProcessor( + cfg, + nextConsumer, + sp, + processorhelper.WithCapabilities(processorCapabilities)) +} diff --git a/internal/otel_collector/processor/spanprocessor/factory_test.go b/internal/otel_collector/processor/spanprocessor/factory_test.go new file mode 100644 index 00000000000..692795dd359 --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/factory_test.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanprocessor + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +func TestFactory_Type(t *testing.T) { + factory := NewFactory() + assert.Equal(t, factory.Type(), configmodels.Type(typeStr)) +} + +func TestFactory_CreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, configcheck.ValidateConfig(cfg)) + + // Check the values of the default configuration. + assert.NotNil(t, cfg) + assert.Equal(t, configmodels.Type(typeStr), cfg.Type()) + assert.Equal(t, typeStr, cfg.Name()) +} + +func TestFactory_CreateTraceProcessor(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + + // Name.FromAttributes field needs to be set for the configuration to be valid. + oCfg.Rename.FromAttributes = []string{"test-key"} + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + + require.Nil(t, err) + assert.NotNil(t, tp) +} + +// TestFactory_CreateTraceProcessor_InvalidConfig ensures the default configuration +// returns an error. +func TestFactory_CreateTraceProcessor_InvalidConfig(t *testing.T) { + factory := NewFactory() + + testcases := []struct { + name string + cfg Name + err error + }{ + { + name: "missing_config", + err: errMissingRequiredField, + }, + + { + name: "invalid_regexp", + cfg: Name{ + ToAttributes: &ToAttributes{ + Rules: []string{"\\"}, + }, + }, + err: fmt.Errorf("invalid regexp pattern \\"), + }, + } + + for _, test := range testcases { + t.Run(test.name, func(t *testing.T) { + cfg := factory.CreateDefaultConfig().(*Config) + cfg.Rename = test.cfg + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewTracesNop()) + require.Nil(t, tp) + assert.EqualValues(t, err, test.err) + }) + } +} + +func TestFactory_CreateMetricProcessor(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + mp, err := factory.CreateMetricsProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, cfg, nil) + require.Nil(t, mp) + assert.Equal(t, err, configerror.ErrDataTypeIsNotSupported) +} diff --git a/internal/otel_collector/processor/spanprocessor/span.go b/internal/otel_collector/processor/spanprocessor/span.go new file mode 100644 index 00000000000..340b4211934 --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/span.go @@ -0,0 +1,221 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanprocessor + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterspan" +) + +type spanProcessor struct { + config Config + toAttributeRules []toAttributeRule + include filterspan.Matcher + exclude filterspan.Matcher +} + +// toAttributeRule is the compiled equivalent of config.ToAttributes field. +type toAttributeRule struct { + // Compiled regexp. + re *regexp.Regexp + + // Attribute names extracted from the regexp's subexpressions. + attrNames []string +} + +// newSpanProcessor returns the span processor. +func newSpanProcessor(config Config) (*spanProcessor, error) { + include, err := filterspan.NewMatcher(config.Include) + if err != nil { + return nil, err + } + exclude, err := filterspan.NewMatcher(config.Exclude) + if err != nil { + return nil, err + } + + sp := &spanProcessor{ + config: config, + include: include, + exclude: exclude, + } + + // Compile ToAttributes regexp and extract attributes names. + if config.Rename.ToAttributes != nil { + for _, pattern := range config.Rename.ToAttributes.Rules { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("invalid regexp pattern %s", pattern) + } + + rule := toAttributeRule{ + re: re, + // Subexpression names will become attribute names during extraction. + attrNames: re.SubexpNames(), + } + + sp.toAttributeRules = append(sp.toAttributeRules, rule) + } + } + + return sp, nil +} + +func (sp *spanProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + ilss := rs.InstrumentationLibrarySpans() + resource := rs.Resource() + for j := 0; j < ilss.Len(); j++ { + ils := ilss.At(j) + spans := ils.Spans() + library := ils.InstrumentationLibrary() + for k := 0; k < spans.Len(); k++ { + s := spans.At(k) + if filterspan.SkipSpan(sp.include, sp.exclude, s, resource, library) { + continue + } + sp.processFromAttributes(s) + sp.processToAttributes(s) + } + } + } + return td, nil +} + +func (sp *spanProcessor) processFromAttributes(span pdata.Span) { + if len(sp.config.Rename.FromAttributes) == 0 { + // There is FromAttributes rule. + return + } + + attrs := span.Attributes() + if attrs.Len() == 0 { + // There are no attributes to create span name from. + return + } + + // Note: There was a separate proposal for creating the string. + // With benchmarking, strings.Builder is faster than the proposal. + // For full context, refer to this PR comment: + // https://go.opentelemetry.io/collector/pull/301#discussion_r318357678 + var sb strings.Builder + for i, key := range sp.config.Rename.FromAttributes { + attr, found := attrs.Get(key) + + // If one of the keys isn't found, the span name is not updated. + if !found { + return + } + + // Note: WriteString() always return a nil error so there is no error checking + // for this method call. + // https://golang.org/src/strings/builder.go?s=3425:3477#L110 + + // Include the separator before appending an attribute value if: + // this isn't the first value(ie i == 0) loop through the FromAttributes + // and + // the separator isn't an empty string. + if i > 0 && sp.config.Rename.Separator != "" { + sb.WriteString(sp.config.Rename.Separator) + } + + switch attr.Type() { + case pdata.AttributeValueSTRING: + sb.WriteString(attr.StringVal()) + case pdata.AttributeValueBOOL: + sb.WriteString(strconv.FormatBool(attr.BoolVal())) + case pdata.AttributeValueDOUBLE: + sb.WriteString(strconv.FormatFloat(attr.DoubleVal(), 'f', -1, 64)) + case pdata.AttributeValueINT: + sb.WriteString(strconv.FormatInt(attr.IntVal(), 10)) + default: + sb.WriteString("") + } + } + span.SetName(sb.String()) +} + +func (sp *spanProcessor) processToAttributes(span pdata.Span) { + if span.Name() == "" { + // There is no span name to work on. + return + } + + if sp.config.Rename.ToAttributes == nil { + // No rules to apply. + return + } + + // Process rules one by one. Store results of processing in the span + // so that each subsequent rule works on the span name that is the output + // after processing the previous rule. + for _, rule := range sp.toAttributeRules { + re := rule.re + oldName := span.Name() + + // Match the regular expression and extract matched subexpressions. + submatches := re.FindStringSubmatch(oldName) + if submatches == nil { + continue + } + // There is a match. We will also need positions of subexpression matches. + submatchIdxPairs := re.FindStringSubmatchIndex(oldName) + + // A place to accumulate new span name. + var sb strings.Builder + + // Index in the oldName until which we traversed. + var oldNameIndex = 0 + + attrs := span.Attributes() + + // TODO: Pre-allocate len(submatches) space in the attributes. + + // Start from index 1, which is the first submatch (index 0 is the entire match). + // We will go over submatches and will simultaneously build a new span name, + // replacing matched subexpressions by attribute names. + for i := 1; i < len(submatches); i++ { + attrs.UpsertString(rule.attrNames[i], submatches[i]) + + // Add part of span name from end of previous match to start of this match + // and then add attribute name wrapped in curly brackets. + matchStartIndex := submatchIdxPairs[i*2] // start of i'th submatch. + sb.WriteString(oldName[oldNameIndex:matchStartIndex] + "{" + rule.attrNames[i] + "}") + + // Advance the index to the end of current match. + oldNameIndex = submatchIdxPairs[i*2+1] // end of i'th submatch. + } + if oldNameIndex < len(oldName) { + // Append the remainder, from the end of last match until end of span name. + sb.WriteString(oldName[oldNameIndex:]) + } + + // Set new span name. + span.SetName(sb.String()) + + if sp.config.Rename.ToAttributes.BreakAfterMatch { + // Stop processing, break after first match is requested. + break + } + } +} diff --git a/internal/otel_collector/processor/spanprocessor/span_test.go b/internal/otel_collector/processor/spanprocessor/span_test.go new file mode 100644 index 00000000000..df4fe1b5b27 --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/span_test.go @@ -0,0 +1,599 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterconfig" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +func TestNewTraceProcessor(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Rename.FromAttributes = []string{"foo"} + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, nil) + require.Error(t, componenterror.ErrNilNextConsumer, err) + require.Nil(t, tp) + + tp, err = factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{}, cfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) +} + +// Common structure for the test cases. +type testCase struct { + serviceName string + inputName string + inputAttributes map[string]pdata.AttributeValue + outputName string + outputAttributes map[string]pdata.AttributeValue +} + +// runIndividualTestCase is the common logic of passing trace data through a configured attributes processor. +func runIndividualTestCase(t *testing.T, tt testCase, tp component.TracesProcessor) { + t.Run(tt.inputName, func(t *testing.T) { + td := generateTraceData(tt.serviceName, tt.inputName, tt.inputAttributes) + + assert.NoError(t, tp.ConsumeTraces(context.Background(), td)) + // Ensure that the modified `td` has the attributes sorted: + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + rs.Resource().Attributes().Sort() + ilss := rs.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + spans := ilss.At(j).Spans() + for k := 0; k < spans.Len(); k++ { + spans.At(k).Attributes().Sort() + } + } + } + assert.EqualValues(t, generateTraceData(tt.serviceName, tt.outputName, tt.outputAttributes), td) + }) +} + +func generateTraceData(serviceName, inputName string, attrs map[string]pdata.AttributeValue) pdata.Traces { + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + rs := td.ResourceSpans().At(0) + if serviceName != "" { + rs.Resource().Attributes().UpsertString(conventions.AttributeServiceName, serviceName) + } + rs.InstrumentationLibrarySpans().Resize(1) + ils := rs.InstrumentationLibrarySpans().At(0) + spans := ils.Spans() + spans.Resize(1) + spans.At(0).SetName(inputName) + spans.At(0).Attributes().InitFromMap(attrs).Sort() + return td +} + +// TestSpanProcessor_Values tests all possible value types. +func TestSpanProcessor_NilEmptyData(t *testing.T) { + type nilEmptyTestCase struct { + name string + input pdata.Traces + output pdata.Traces + } + // TODO: Add test for "nil" Span. This needs support from data slices to allow to construct that. + testCases := []nilEmptyTestCase{ + { + name: "empty", + input: testdata.GenerateTraceDataEmpty(), + output: testdata.GenerateTraceDataEmpty(), + }, + { + name: "one-empty-resource-spans", + input: testdata.GenerateTraceDataOneEmptyResourceSpans(), + output: testdata.GenerateTraceDataOneEmptyResourceSpans(), + }, + { + name: "no-libraries", + input: testdata.GenerateTraceDataNoLibraries(), + output: testdata.GenerateTraceDataNoLibraries(), + }, + { + name: "one-empty-instrumentation-library", + input: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + output: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + }, + } + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Include = &filterconfig.MatchProperties{ + Config: *createMatchConfig(filterset.Strict), + Services: []string{"service"}, + } + oCfg.Rename.FromAttributes = []string{"key"} + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, tp.ConsumeTraces(context.Background(), tt.input)) + assert.EqualValues(t, tt.output, tt.input) + }) + } +} + +// TestSpanProcessor_Values tests all possible value types. +func TestSpanProcessor_Values(t *testing.T) { + // TODO: Add test for "nil" Span. This needs support from data slices to allow to construct that. + testCases := []testCase{ + { + inputName: "", + inputAttributes: nil, + outputName: "", + outputAttributes: nil, + }, + { + inputName: "nil-attributes", + inputAttributes: nil, + outputName: "nil-attributes", + outputAttributes: nil, + }, + { + inputName: "empty-attributes", + inputAttributes: map[string]pdata.AttributeValue{}, + outputName: "empty-attributes", + outputAttributes: map[string]pdata.AttributeValue{}, + }, + { + inputName: "string-type", + inputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + }, + outputName: "bob", + outputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + }, + }, + { + inputName: "int-type", + inputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueInt(123), + }, + outputName: "123", + outputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueInt(123), + }, + }, + { + inputName: "double-type", + inputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueDouble(234.129312), + }, + outputName: "234.129312", + outputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueDouble(234.129312), + }, + }, + { + inputName: "bool-type", + inputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueBool(true), + }, + outputName: "true", + outputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueBool(true), + }, + }, + // TODO: What do we do when AttributeMap contains a nil entry? Is that possible? + // TODO: In the new protocol do we want to support unknown type as 0 instead of string? + // TODO: Do we want to allow constructing entries with unknown type? + /*{ + inputName: "nil-type", + inputAttributes: map[string]data.AttributeValue{ + "key1": data.NewAttributeValue(), + }, + outputName: "", + outputAttributes: map[string]data.AttributeValue{ + "key1": data.NewAttributeValue(), + }, + }, + { + inputName: "unknown-type", + inputAttributes: map[string]data.AttributeValue{ + "key1": {}, + }, + outputName: "", + outputAttributes: map[string]data.AttributeValue{ + "key1": {}, + }, + },*/ + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Rename.FromAttributes = []string{"key1"} + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + for _, tc := range testCases { + runIndividualTestCase(t, tc, tp) + } +} + +// TestSpanProcessor_MissingKeys tests that missing a key in an attribute map results in no span name changes. +func TestSpanProcessor_MissingKeys(t *testing.T) { + testCases := []testCase{ + { + inputName: "first-keys-missing", + inputAttributes: map[string]pdata.AttributeValue{ + "key2": pdata.NewAttributeValueInt(123), + "key3": pdata.NewAttributeValueDouble(234.129312), + "key4": pdata.NewAttributeValueBool(true), + }, + outputName: "first-keys-missing", + outputAttributes: map[string]pdata.AttributeValue{ + "key2": pdata.NewAttributeValueInt(123), + "key3": pdata.NewAttributeValueDouble(234.129312), + "key4": pdata.NewAttributeValueBool(true), + }, + }, + { + inputName: "middle-key-missing", + inputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + "key4": pdata.NewAttributeValueBool(true), + }, + outputName: "middle-key-missing", + outputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + "key4": pdata.NewAttributeValueBool(true), + }, + }, + { + inputName: "last-key-missing", + inputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + "key3": pdata.NewAttributeValueDouble(234.129312), + }, + outputName: "last-key-missing", + outputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + "key3": pdata.NewAttributeValueDouble(234.129312), + }, + }, + { + inputName: "all-keys-exists", + inputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + "key3": pdata.NewAttributeValueDouble(234.129312), + "key4": pdata.NewAttributeValueBool(true), + }, + outputName: "bob::123::234.129312::true", + outputAttributes: map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + "key3": pdata.NewAttributeValueDouble(234.129312), + "key4": pdata.NewAttributeValueBool(true), + }, + }, + } + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Rename.FromAttributes = []string{"key1", "key2", "key3", "key4"} + oCfg.Rename.Separator = "::" + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + for _, tc := range testCases { + runIndividualTestCase(t, tc, tp) + } +} + +// TestSpanProcessor_Separator ensures naming a span with a single key and separator will only contain the value from +// the single key. +func TestSpanProcessor_Separator(t *testing.T) { + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Rename.FromAttributes = []string{"key1"} + oCfg.Rename.Separator = "::" + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + traceData := generateTraceData( + "", + "ensure no separator in the rename with one key", + map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + }) + assert.NoError(t, tp.ConsumeTraces(context.Background(), traceData)) + + assert.Equal(t, generateTraceData( + "", + "bob", + map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + }), traceData) +} + +// TestSpanProcessor_NoSeparatorMultipleKeys tests naming a span using multiple keys and no separator. +func TestSpanProcessor_NoSeparatorMultipleKeys(t *testing.T) { + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Rename.FromAttributes = []string{"key1", "key2"} + oCfg.Rename.Separator = "" + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + traceData := generateTraceData( + "", + "ensure no separator in the rename with two keys", map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + }) + assert.NoError(t, tp.ConsumeTraces(context.Background(), traceData)) + + assert.Equal(t, generateTraceData( + "", + "bob123", + map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + }), traceData) +} + +// TestSpanProcessor_SeparatorMultipleKeys tests naming a span with multiple keys and a separator. +func TestSpanProcessor_SeparatorMultipleKeys(t *testing.T) { + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Rename.FromAttributes = []string{"key1", "key2", "key3", "key4"} + oCfg.Rename.Separator = "::" + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + traceData := generateTraceData( + "", + "rename with separators and multiple keys", + map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + "key3": pdata.NewAttributeValueDouble(234.129312), + "key4": pdata.NewAttributeValueBool(true), + }) + assert.NoError(t, tp.ConsumeTraces(context.Background(), traceData)) + + assert.Equal(t, generateTraceData( + "", + "bob::123::234.129312::true", + map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + "key2": pdata.NewAttributeValueInt(123), + "key3": pdata.NewAttributeValueDouble(234.129312), + "key4": pdata.NewAttributeValueBool(true), + }), traceData) +} + +// TestSpanProcessor_NilName tests naming a span when the input span had no name. +func TestSpanProcessor_NilName(t *testing.T) { + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Rename.FromAttributes = []string{"key1"} + oCfg.Rename.Separator = "::" + + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + traceData := generateTraceData( + "", + "", + map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + }) + assert.NoError(t, tp.ConsumeTraces(context.Background(), traceData)) + + assert.Equal(t, generateTraceData( + "", + "bob", + map[string]pdata.AttributeValue{ + "key1": pdata.NewAttributeValueString("bob"), + }), traceData) +} + +// TestSpanProcessor_ToAttributes +func TestSpanProcessor_ToAttributes(t *testing.T) { + + testCases := []struct { + rules []string + breakAfterMatch bool + testCase + }{ + { + rules: []string{`^\/api\/v1\/document\/(?P.*)\/update\/1$`}, + testCase: testCase{ + inputName: "/api/v1/document/321083210/update/1", + inputAttributes: map[string]pdata.AttributeValue{}, + outputName: "/api/v1/document/{documentId}/update/1", + outputAttributes: map[string]pdata.AttributeValue{ + "documentId": pdata.NewAttributeValueString("321083210"), + }, + }, + }, + + { + rules: []string{`^\/api\/(?P.*)\/document\/(?P.*)\/update\/2$`}, + testCase: testCase{ + inputName: "/api/v1/document/321083210/update/2", + outputName: "/api/{version}/document/{documentId}/update/2", + outputAttributes: map[string]pdata.AttributeValue{ + "documentId": pdata.NewAttributeValueString("321083210"), + "version": pdata.NewAttributeValueString("v1"), + }, + }, + }, + + { + rules: []string{`^\/api\/.*\/document\/(?P.*)\/update\/3$`, + `^\/api\/(?P.*)\/document\/.*\/update\/3$`}, + testCase: testCase{ + inputName: "/api/v1/document/321083210/update/3", + outputName: "/api/{version}/document/{documentId}/update/3", + outputAttributes: map[string]pdata.AttributeValue{ + "documentId": pdata.NewAttributeValueString("321083210"), + "version": pdata.NewAttributeValueString("v1"), + }, + }, + breakAfterMatch: false, + }, + + { + rules: []string{`^\/api\/v1\/document\/(?P.*)\/update\/4$`, + `^\/api\/(?P.*)\/document\/(?P.*)\/update\/4$`}, + testCase: testCase{ + inputName: "/api/v1/document/321083210/update/4", + outputName: "/api/v1/document/{documentId}/update/4", + outputAttributes: map[string]pdata.AttributeValue{ + "documentId": pdata.NewAttributeValueString("321083210"), + }, + }, + breakAfterMatch: true, + }, + + { + rules: []string{"rule"}, + testCase: testCase{ + inputName: "", + outputName: "", + outputAttributes: nil, + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Rename.ToAttributes = &ToAttributes{} + + for _, tc := range testCases { + oCfg.Rename.ToAttributes.Rules = tc.rules + oCfg.Rename.ToAttributes.BreakAfterMatch = tc.breakAfterMatch + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + runIndividualTestCase(t, tc.testCase, tp) + } +} + +func TestSpanProcessor_skipSpan(t *testing.T) { + testCases := []testCase{ + { + serviceName: "bankss", + inputName: "url/url", + outputName: "url/url", + }, + { + serviceName: "banks", + inputName: "noslasheshere", + outputName: "noslasheshere", + }, + { + serviceName: "banks", + inputName: "www.test.com/code", + outputName: "{operation_website}", + outputAttributes: map[string]pdata.AttributeValue{ + "operation_website": pdata.NewAttributeValueString("www.test.com/code"), + }, + }, + { + serviceName: "banks", + inputName: "donot/", + inputAttributes: map[string]pdata.AttributeValue{ + "operation_website": pdata.NewAttributeValueString("www.test.com/code"), + }, + outputName: "{operation_website}", + outputAttributes: map[string]pdata.AttributeValue{ + "operation_website": pdata.NewAttributeValueString("donot/"), + }, + }, + { + serviceName: "banks", + inputName: "donot/change", + inputAttributes: map[string]pdata.AttributeValue{ + "operation_website": pdata.NewAttributeValueString("www.test.com/code"), + }, + outputName: "donot/change", + outputAttributes: map[string]pdata.AttributeValue{ + "operation_website": pdata.NewAttributeValueString("www.test.com/code"), + }, + }, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.Include = &filterconfig.MatchProperties{ + Config: *createMatchConfig(filterset.Regexp), + Services: []string{`^banks$`}, + SpanNames: []string{"/"}, + } + oCfg.Exclude = &filterconfig.MatchProperties{ + Config: *createMatchConfig(filterset.Strict), + SpanNames: []string{`donot/change`}, + } + oCfg.Rename.ToAttributes = &ToAttributes{ + Rules: []string{`(?P.*?)$`}, + } + tp, err := factory.CreateTracesProcessor(context.Background(), component.ProcessorCreateParams{Logger: zap.NewNop()}, oCfg, consumertest.NewTracesNop()) + require.Nil(t, err) + require.NotNil(t, tp) + + for _, tc := range testCases { + runIndividualTestCase(t, tc, tp) + } +} diff --git a/internal/otel_collector/processor/spanprocessor/testdata/config.yaml b/internal/otel_collector/processor/spanprocessor/testdata/config.yaml new file mode 100644 index 00000000000..67d75e61c5f --- /dev/null +++ b/internal/otel_collector/processor/spanprocessor/testdata/config.yaml @@ -0,0 +1,95 @@ +receivers: + examplereceiver: + +processors: + # The following specifies the values of attribute `db.svc`, `operation`, + # and `id` will form the new name of the span, in that order, separated by the + # value `::`. All attribute keys needs to be specified in the span for + # the processor to rename it. + # Note: There is no default configuration for the span processor. For 'name', + # the field `from_attributes` is required. + # + # Example 1 - All keys are in the span: + # Span name before processor: + # "Span.Name": "serviceA" + # Attributes Key/Value pair for a span + # { "db.svc": "location", "operation": "get", "id": "1234"} + # Separator: "::" + # Results in the following new span name: + # "Span.Name": "location::get::1234" + # + # Example 2 - Some keys are missing from the span. + # Span name(before processor): + # "Span.Name": "serviceA" + # Attributes Key/Value pair for a span + # { "db.svc": "location", "id": "1234"} + # Separator: "::" + # Results in no new name because the attribute key `operation` isn't set. + # Span name after processor: + # "Span.Name": "serviceA" + span/custom: + name: + separator: "::" + from_attributes: [db.svc, operation, id] + + # The following specifies generating a span name with no separator. + # Example: + # Attributes Key/Value pair + # { "db.svc": "location:, "operation": "get", "id": "1234"} + # Separator: "" + # Results in the following new span name: + # "locationget1234" + span/no-separator: + name: + from_attributes: [db.svc, operation, id] + + # The following extracts attributes from span name and replaces extracted + # parts with attribute names. + # to_attributes is a list of rules that extract attribute values from span name and + # replace them by attributes names in the span name. Each rule in the list is + # regex pattern string. Span name is checked against the regex and if the regex matches + # all named subexpressions from the regex then the matches are extracted as attributes + # and added to the span. Subexpression name becomes the attribute name and + # subexpression matched portion becomes the attribute value. The matched portion + # in the span name is replaced by extracted attribute name. If the attributes exist + # they will be overwritten. Checks are performed for elements in this array in the + # order they are specified. + # + # Example: + # Let's assume input span name is /api/v1/document/12345678/update + # Applying the following results in output span name /api/v1/document/{documentId}/update + # and will add a new attribute "documentId"="12345678" to the span. + span/to_attributes: + name: + to_attributes: + rules: + - ^\/api\/v1\/document\/(?P.*)\/update$ + + # The following demonstrates renaming the span name to `{operation_website}` + # and adding the attribute {Key: operation_website, Value: } + # when the span has the following properties + # - Services names that contain the word `banks`. + # - The span name contains '/' anywhere in the string. + # - The span name is not 'donot/change'. + span/includeexclude: + include: + match_type: regexp + services: ["banks"] + span_names: ["^(.*?)/(.*?)$"] + exclude: + match_type: strict + span_names: ["donot/change"] + name: + to_attributes: + rules: + - "(?P.*?)$" + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [span/custom] + exporters: [exampleexporter] diff --git a/internal/otel_collector/proto_patch.sed b/internal/otel_collector/proto_patch.sed new file mode 100644 index 00000000000..c83bfd00ba2 --- /dev/null +++ b/internal/otel_collector/proto_patch.sed @@ -0,0 +1,40 @@ +s+github.com/open-telemetry/opentelemetry-proto/gen/go/+go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/+g + +s+package opentelemetry.proto.\(.*\).v1;+package opentelemetry.proto.\1.v1;\ +\ +import "gogoproto/gogo.proto";+g + +s+bytes trace_id = \(.*\);+bytes trace_id = \1\ + [\ + // Use custom TraceId data type for this field.\ + (gogoproto.nullable) = false,\ + (gogoproto.customtype) = "go.opentelemetry.io/collector/internal/data.TraceID"\ + ];+g + +s+bytes \(.*span_id\) = \(.*\);+bytes \1 = \2\ + [\ + // Use custom SpanId data type for this field.\ + (gogoproto.nullable) = false,\ + (gogoproto.customtype) = "go.opentelemetry.io/collector/internal/data.SpanID"\ + ];+g + +s+repeated opentelemetry.proto.common.v1.KeyValue \(.*\);+repeated opentelemetry.proto.common.v1.KeyValue \1\ + [ (gogoproto.nullable) = false ];+g + +s+repeated KeyValue \(.*\);+repeated KeyValue \1\ + [ (gogoproto.nullable) = false ];+g + +s+AnyValue \(.*\);+AnyValue \1\ + [ (gogoproto.nullable) = false ];+g + +s+repeated opentelemetry.proto.common.v1.StringKeyValue \(.*\);+repeated opentelemetry.proto.common.v1.StringKeyValue \1\ + [ (gogoproto.nullable) = false ];+g + +s+opentelemetry.proto.resource.v1.Resource resource = \(.*\);+opentelemetry.proto.resource.v1.Resource resource = \1\ + [ (gogoproto.nullable) = false ];+g + +s+opentelemetry.proto.common.v1.InstrumentationLibrary instrumentation_library = \(.*\);+opentelemetry.proto.common.v1.InstrumentationLibrary instrumentation_library = \1\ + [ (gogoproto.nullable) = false ];+g + +s+Status \(.*\);+Status \1\ + [ (gogoproto.nullable) = false ];+g diff --git a/internal/otel_collector/receiver/README.md b/internal/otel_collector/receiver/README.md new file mode 100644 index 00000000000..b3fcf3eb010 --- /dev/null +++ b/internal/otel_collector/receiver/README.md @@ -0,0 +1,83 @@ +# General Information + +A receiver is how data gets into the OpenTelemetry Collector. Generally, a +receiver accepts data in a specified format, translates it into the internal +format and passes it to [processors](../processor/README.md) and +[exporters](../exporter/README.md) defined in the applicable +pipelines. + +Available trace receivers (sorted alphabetically): + +- [Jaeger Receiver](jaegerreceiver/README.md) +- [Kafka Receiver](kafkareceiver/README.md) +- [OpenCensus Receiver](opencensusreceiver/README.md) +- [OTLP Receiver](otlpreceiver/README.md) +- [Zipkin Receiver](zipkinreceiver/README.md) + +Available metric receivers (sorted alphabetically): + +- [Host Metrics Receiver](hostmetricsreceiver/README.md) +- [OpenCensus Receiver](opencensusreceiver/README.md) +- [OTLP Receiver](otlpreceiver/README.md) +- [Prometheus Receiver](prometheusreceiver/README.md) + +Available log receivers (sorted alphabetically): + +- [Fluent Forward Receiver](fluentforwardreceiver/README.md) +- [OTLP Receiver](otlpreceiver/README.md) + +The [contrib repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) + has more receivers that can be added to custom builds of the collector. + +## Configuring Receivers + +Receivers are configured via YAML under the top-level `receivers` tag. There +must be at least one enabled receiver for a configuration to be considered +valid. + +The following is a sample configuration for the `examplereceiver`. + +```yaml +receivers: + # Receiver 1. + # : + examplereceiver: + # : + endpoint: 1.2.3.4:8080 + # ... + # Receiver 2. + # /: + examplereceiver/settings: + # : + endpoint: 0.0.0.0:9211 +``` + +A receiver instance is referenced by its full name in other parts of the config, +such as in pipelines. A full name consists of the receiver type, '/' and the +name appended to the receiver type in the configuration. All receiver full names +must be unique. + +For the example above: + +- Receiver 1 has full name `examplereceiver`. +- Receiver 2 has full name `examplereceiver/settings`. + +Receivers are enabled upon being added to a pipeline. For example: + +```yaml +service: + pipelines: + # Valid pipelines are: traces, metrics or logs + # Trace pipeline 1. + traces: + receivers: [examplereceiver, examplereceiver/settings] + processors: [] + exporters: [exampleexporter] + # Trace pipeline 2. + traces/another: + receivers: [examplereceiver, examplereceiver/settings] + processors: [] + exporters: [exampleexporter] +``` + +> At least one receiver must be enabled per pipeline to be a valid configuration. \ No newline at end of file diff --git a/internal/otel_collector/receiver/doc.go b/internal/otel_collector/receiver/doc.go new file mode 100644 index 00000000000..f9d6d670038 --- /dev/null +++ b/internal/otel_collector/receiver/doc.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package receiver contains implementations of Receiver components. +// +// To implement a custom receiver you will need to implement component.ReceiverFactory +// interface and component.Receiver interface. +// +// To make the custom receiver part of the Collector build the factory must be added +// to defaultcomponents.Components() function. +package receiver diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/README.md b/internal/otel_collector/receiver/fluentforwardreceiver/README.md new file mode 100644 index 00000000000..51549cb34dd --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/README.md @@ -0,0 +1,33 @@ +# Fluent Forward Receiver + +This receiver runs a TCP server that accepts events via the [Fluent Forward +protocol](https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1). + +This receiver: + + - Does **not** support TLS or the handshake portion of the Forward protocol. + - Does support acknowledgments of events that have the `chunk` option, as per the spec. + - Supports all three event types (message, forward, packed forward, including + compressed packed forward) + - Supports listening on a Unix domain socket by making the `listenAddress` + option of the form `unix://`. + - If using TCP, it will start a UDP server on the same port to deliver + heartbeat echos, as per the spec. + +Here is a basic example config that makes the receiver listen on all interfaces +on port 8006: + +```yaml +receivers: + fluentforward: + endpoint: 0.0.0.0:8006 +``` + + +## Development + +If you are working on this receiver and need to regenerate any of the message +pack autogenerated code, just run `go generate` on this package and its +subpackages. You can get the `msgp` binary by just running `go get -u -t +github.com/tinylib/msgp`, and make sure the Go binary path is on your shell's +PATH. diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/ack.go b/internal/otel_collector/receiver/fluentforwardreceiver/ack.go new file mode 100644 index 00000000000..dba73f616b7 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/ack.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import "github.com/tinylib/msgp/msgp" + +type AckResponse struct { + Ack string `msg:"ack"` +} + +func (z AckResponse) EncodeMsg(en *msgp.Writer) error { + // map header, size 1 + // write "ack" + err := en.Append(0x81, 0xa3, 0x61, 0x63, 0x6b) + if err != nil { + return err + } + + err = en.WriteString(z.Ack) + if err != nil { + return msgp.WrapError(err, "Ack") + } + return nil +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/ack_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/ack_test.go new file mode 100644 index 00000000000..f08146872e8 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/ack_test.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tinylib/msgp/msgp" + + "go.opentelemetry.io/collector/testutil" +) + +func msgpWriterWithLimit(t *testing.T, l int) *msgp.Writer { + // NewWriterSize forces size to be at least 18 bytes so just use that as + // the floor and write nulls to those first 18 bytes to make the limit + // truly l. + w := msgp.NewWriterSize(&testutil.LimitedWriter{ + MaxLen: l, + }, 18+l) + _, err := w.Write(bytes.Repeat([]byte{0x00}, 18)) + require.NoError(t, err) + return w +} + +func TestAckEncoding(t *testing.T) { + a := &AckResponse{ + Ack: "test", + } + + err := a.EncodeMsg(msgpWriterWithLimit(t, 1000)) + require.Nil(t, err) + + err = a.EncodeMsg(msgpWriterWithLimit(t, 4)) + require.NotNil(t, err) + + err = a.EncodeMsg(msgpWriterWithLimit(t, 7)) + require.NotNil(t, err) +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/collector.go b/internal/otel_collector/receiver/fluentforwardreceiver/collector.go new file mode 100644 index 00000000000..1a54c2d45c2 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/collector.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "context" + + "go.opencensus.io/stats" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/fluentforwardreceiver/observ" +) + +// Collector acts as an aggregator of LogRecords so that we don't have to +// generate as many pdata.Logs instances...we can pre-batch the LogRecord +// instances from several Forward events into one to hopefully reduce +// allocations and GC overhead. +type Collector struct { + nextConsumer consumer.LogsConsumer + eventCh <-chan Event + logger *zap.Logger +} + +func newCollector(eventCh <-chan Event, next consumer.LogsConsumer, logger *zap.Logger) *Collector { + return &Collector{ + nextConsumer: next, + eventCh: eventCh, + logger: logger, + } +} + +func (c *Collector) Start(ctx context.Context) { + go c.processEvents(ctx) +} + +func (c *Collector) processEvents(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case e := <-c.eventCh: + buffered := []Event{e} + // Pull out anything waiting on the eventCh to get better + // efficiency on LogResource allocations. + buffered = fillBufferUntilChanEmpty(c.eventCh, buffered) + + logs := collectLogRecords(buffered) + c.nextConsumer.ConsumeLogs(ctx, logs) + } + } +} + +func fillBufferUntilChanEmpty(eventCh <-chan Event, buf []Event) []Event { + for { + select { + case e2 := <-eventCh: + buf = append(buf, e2) + default: + return buf + } + } +} + +func collectLogRecords(events []Event) pdata.Logs { + out := pdata.NewLogs() + + logs := out.ResourceLogs() + + logs.Resize(1) + rls := logs.At(0) + + rls.InstrumentationLibraryLogs().Resize(1) + logSlice := rls.InstrumentationLibraryLogs().At(0).Logs() + + for i := range events { + events[i].LogRecords().MoveAndAppendTo(logSlice) + } + + stats.Record(context.Background(), observ.RecordsGenerated.M(int64(out.LogRecordCount()))) + + return out +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/config.go b/internal/otel_collector/receiver/fluentforwardreceiver/config.go new file mode 100644 index 00000000000..37af82ae14f --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/config.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for the SignalFx receiver. +type Config struct { + configmodels.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // The address to listen on for incoming Fluent Forward events. Should be + // of the form `:` (TCP) or `unix://` (Unix + // domain socket). + ListenAddress string `mapstructure:"endpoint"` +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/config_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/config_test.go new file mode 100644 index 00000000000..eb90f7ad7ca --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/config_test.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.Nil(t, err) + + factory := NewFactory() + factories.Receivers[configmodels.Type(typeStr)] = factory + cfg, err := configtest.LoadConfigFile( + t, path.Join(".", "testdata", "config.yaml"), factories, + ) + + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, len(cfg.Receivers), 1) + + r0 := cfg.Receivers["fluentforward"] + assert.Equal(t, r0, factory.CreateDefaultConfig()) + +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/conversion.go b/internal/otel_collector/receiver/fluentforwardreceiver/conversion.go new file mode 100644 index 00000000000..74e3b0febe2 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/conversion.go @@ -0,0 +1,412 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/tinylib/msgp/msgp" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +const tagAttributeKey = "fluent.tag" + +// Most of this logic is derived directly from +// https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1, +// which describes the fields in much greater detail. + +type Event interface { + DecodeMsg(dc *msgp.Reader) error + LogRecords() pdata.LogSlice + Chunk() string + Compressed() string +} + +type OptionsMap map[string]interface{} + +// Chunk returns the `chunk` option or blank string if it was not set. +func (om OptionsMap) Chunk() string { + c, _ := om["chunk"].(string) + return c +} + +func (om OptionsMap) Compressed() string { + compressed, _ := om["compressed"].(string) + return compressed +} + +type EventMode int + +type Peeker interface { + Peek(n int) ([]byte, error) +} + +const ( + UnknownMode EventMode = iota + MessageMode + ForwardMode + PackedForwardMode +) + +func (em EventMode) String() string { + switch em { + case UnknownMode: + return "unknown" + case MessageMode: + return "message" + case ForwardMode: + return "forward" + case PackedForwardMode: + return "packedforward" + default: + panic("programmer bug") + } +} + +func insertToAttributeMap(key string, val interface{}, dest *pdata.AttributeMap) { + // See https://github.com/tinylib/msgp/wiki/Type-Mapping-Rules + switch r := val.(type) { + case bool: + dest.InsertBool(key, r) + case string: + dest.InsertString(key, r) + case uint64: + dest.InsertInt(key, int64(r)) + case int64: + dest.InsertInt(key, r) + case []byte: + dest.InsertString(key, string(r)) + case map[string]interface{}, []interface{}: + encoded, err := json.Marshal(r) + if err != nil { + dest.InsertString(key, err.Error()) + } + dest.InsertString(key, string(encoded)) + case float32: + dest.InsertDouble(key, float64(r)) + case float64: + dest.InsertDouble(key, r) + default: + dest.InsertString(key, fmt.Sprintf("%v", r)) + } +} + +func timeFromTimestamp(ts interface{}) (time.Time, error) { + switch v := ts.(type) { + case int64: + return time.Unix(v, 0), nil + case *EventTimeExt: + return time.Time(*v), nil + default: + return time.Time{}, fmt.Errorf("unknown type of value: %v", ts) + } +} + +func decodeTimestampToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { + tsIntf, err := dc.ReadIntf() + if err != nil { + return msgp.WrapError(err, "Time") + } + + ts, err := timeFromTimestamp(tsIntf) + if err != nil { + return msgp.WrapError(err, "Time") + } + + lr.SetTimestamp(pdata.TimestampUnixNano(ts.UnixNano())) + return nil +} + +func parseRecordToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { + attrs := lr.Attributes() + + recordLen, err := dc.ReadMapHeader() + if err != nil { + return msgp.WrapError(err, "Record") + } + + for recordLen > 0 { + recordLen-- + key, err := dc.ReadString() + if err != nil { + return msgp.WrapError(err, "Record") + } + val, err := dc.ReadIntf() + if err != nil { + return msgp.WrapError(err, "Record", key) + } + + if s, ok := val.(string); ok && key == "log" { + lr.Body().SetStringVal(s) + } else { + insertToAttributeMap(key, val, &attrs) + } + } + + return nil +} + +type MessageEventLogRecord struct { + pdata.LogSlice + OptionsMap +} + +func (melr *MessageEventLogRecord) LogRecords() pdata.LogSlice { + return melr.LogSlice +} + +func (melr *MessageEventLogRecord) DecodeMsg(dc *msgp.Reader) error { + melr.LogSlice = pdata.NewLogSlice() + melr.LogSlice.Resize(1) + + var arrLen uint32 + var err error + + arrLen, err = dc.ReadArrayHeader() + if err != nil { + return msgp.WrapError(err) + } + if arrLen > 4 || arrLen < 3 { + return msgp.ArrayError{Wanted: 3, Got: arrLen} + } + + tag, err := dc.ReadString() + if err != nil { + return msgp.WrapError(err, "Tag") + } + + attrs := melr.LogSlice.At(0).Attributes() + attrs.InsertString(tagAttributeKey, tag) + + err = decodeTimestampToLogRecord(dc, melr.LogSlice.At(0)) + if err != nil { + return msgp.WrapError(err, "Time") + } + + err = parseRecordToLogRecord(dc, melr.LogSlice.At(0)) + if err != nil { + return err + } + + if arrLen == 4 { + melr.OptionsMap, err = parseOptions(dc) + if err != nil { + return err + } + } + return nil +} + +func parseOptions(dc *msgp.Reader) (OptionsMap, error) { + var optionLen uint32 + optionLen, err := dc.ReadMapHeader() + if err != nil { + return nil, msgp.WrapError(err, "Option") + } + out := make(OptionsMap, optionLen) + + for optionLen > 0 { + optionLen-- + key, err := dc.ReadString() + if err != nil { + return nil, msgp.WrapError(err, "Option") + } + val, err := dc.ReadIntf() + if err != nil { + return nil, msgp.WrapError(err, "Option", key) + } + out[key] = val + } + return out, nil +} + +type ForwardEventLogRecords struct { + pdata.LogSlice + OptionsMap +} + +func (fe *ForwardEventLogRecords) LogRecords() pdata.LogSlice { + return fe.LogSlice +} + +func (fe *ForwardEventLogRecords) DecodeMsg(dc *msgp.Reader) (err error) { + fe.LogSlice = pdata.NewLogSlice() + + var arrLen uint32 + arrLen, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if arrLen < 2 || arrLen > 3 { + err = msgp.ArrayError{Wanted: 2, Got: arrLen} + return + } + + tag, err := dc.ReadString() + if err != nil { + return msgp.WrapError(err, "Tag") + } + + entryLen, err := dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Record") + return + } + + fe.LogSlice.Resize(int(entryLen)) + for i := 0; i < int(entryLen); i++ { + lr := fe.LogSlice.At(i) + + err = parseEntryToLogRecord(dc, lr) + if err != nil { + return msgp.WrapError(err, "Entries", i) + } + fe.LogSlice.At(i).Attributes().InsertString(tagAttributeKey, tag) + } + + if arrLen == 3 { + fe.OptionsMap, err = parseOptions(dc) + if err != nil { + return err + } + } + + return +} + +func parseEntryToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { + arrLen, err := dc.ReadArrayHeader() + if err != nil { + return msgp.WrapError(err) + } + if arrLen != 2 { + return msgp.ArrayError{Wanted: 2, Got: arrLen} + } + + err = decodeTimestampToLogRecord(dc, lr) + if err != nil { + return msgp.WrapError(err, "Time") + } + + return parseRecordToLogRecord(dc, lr) +} + +type PackedForwardEventLogRecords struct { + pdata.LogSlice + OptionsMap +} + +func (pfe *PackedForwardEventLogRecords) LogRecords() pdata.LogSlice { + return pfe.LogSlice +} + +// DecodeMsg implements msgp.Decodable. This was originally code generated but +// then manually copied here in order to handle the optional Options field. +func (pfe *PackedForwardEventLogRecords) DecodeMsg(dc *msgp.Reader) error { + pfe.LogSlice = pdata.NewLogSlice() + + arrLen, err := dc.ReadArrayHeader() + if err != nil { + return msgp.WrapError(err) + } + if arrLen < 2 || arrLen > 3 { + return msgp.ArrayError{Wanted: 2, Got: arrLen} + } + + tag, err := dc.ReadString() + if err != nil { + return msgp.WrapError(err, "Tag") + } + + entriesFirstByte, err := dc.R.Peek(1) + if err != nil { + return msgp.WrapError(err, "EntriesRaw") + } + + entriesType := msgp.NextType(entriesFirstByte) + // We have to read out the entries raw all the way first because we don't + // know whether it is compressed or not until we read the options map which + // comes after. I guess we could use some kind of detection logic to + // determine if it is gzipped by peeking and just ignoring options, but + // this seems simpler for now. + var entriesRaw []byte + switch entriesType { + case msgp.StrType: + var entriesStr string + entriesStr, err = dc.ReadString() + if err != nil { + return msgp.WrapError(err, "EntriesRaw") + } + entriesRaw = []byte(entriesStr) + case msgp.BinType: + entriesRaw, err = dc.ReadBytes(nil) + if err != nil { + return msgp.WrapError(err, "EntriesRaw") + } + default: + return msgp.WrapError(fmt.Errorf("invalid type %d", entriesType), "EntriesRaw") + } + + if arrLen == 3 { + pfe.OptionsMap, err = parseOptions(dc) + if err != nil { + return err + } + } + + err = pfe.parseEntries(entriesRaw, pfe.Compressed() == "gzip", tag) + if err != nil { + return err + } + + return nil +} + +func (pfe *PackedForwardEventLogRecords) parseEntries(entriesRaw []byte, isGzipped bool, tag string) error { + var reader io.Reader + reader = bytes.NewReader(entriesRaw) + + if isGzipped { + var err error + reader, err = gzip.NewReader(reader) + if err != nil { + return err + } + defer reader.(*gzip.Reader).Close() + } + + msgpReader := msgp.NewReader(reader) + for { + lr := pdata.NewLogRecord() + err := parseEntryToLogRecord(msgpReader, lr) + if err != nil { + if msgp.Cause(err) == io.EOF { + return nil + } + return err + } + + lr.Attributes().InsertString(tagAttributeKey, tag) + + pfe.LogSlice.Append(lr) + } +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/conversion_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/conversion_test.go new file mode 100644 index 00000000000..5681895c272 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/conversion_test.go @@ -0,0 +1,213 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tinylib/msgp/msgp" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/testutil/logstest" +) + +func TestMessageEventConversion(t *testing.T) { + eventBytes := parseHexDump("testdata/message-event") + reader := msgp.NewReader(bytes.NewReader(eventBytes)) + + var event MessageEventLogRecord + err := event.DecodeMsg(reader) + require.Nil(t, err) + + le := event.LogRecords().At(0) + le.Attributes().Sort() + + expected := logstest.Logs( + logstest.Log{ + Timestamp: 1593031012000000000, + Body: pdata.NewAttributeValueString("..."), + Attributes: map[string]pdata.AttributeValue{ + "container_id": pdata.NewAttributeValueString("b00a67eb645849d6ab38ff8beb4aad035cc7e917bf123c3e9057c7e89fc73d2d"), + "container_name": pdata.NewAttributeValueString("/unruffled_cannon"), + "fluent.tag": pdata.NewAttributeValueString("b00a67eb6458"), + "source": pdata.NewAttributeValueString("stdout"), + }, + }, + ) + require.EqualValues(t, expected.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().At(0), le) +} + +func TestAttributeTypeConversion(t *testing.T) { + + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendString(b, "my-tag") + b = msgp.AppendInt(b, 5000) + b = msgp.AppendMapHeader(b, 14) + b = msgp.AppendString(b, "a") + b = msgp.AppendFloat64(b, 5.0) + b = msgp.AppendString(b, "b") + b = msgp.AppendFloat32(b, 6.0) + b = msgp.AppendString(b, "c") + b = msgp.AppendBool(b, true) + b = msgp.AppendString(b, "d") + b = msgp.AppendInt8(b, 1) + b = msgp.AppendString(b, "e") + b = msgp.AppendInt16(b, 2) + b = msgp.AppendString(b, "f") + b = msgp.AppendInt32(b, 3) + b = msgp.AppendString(b, "g") + b = msgp.AppendInt64(b, 4) + b = msgp.AppendString(b, "h") + b = msgp.AppendUint8(b, ^uint8(0)) + b = msgp.AppendString(b, "i") + b = msgp.AppendUint16(b, ^uint16(0)) + b = msgp.AppendString(b, "j") + b = msgp.AppendUint32(b, ^uint32(0)) + b = msgp.AppendString(b, "k") + b = msgp.AppendUint64(b, ^uint64(0)) + b = msgp.AppendString(b, "l") + b = msgp.AppendComplex64(b, complex64(0)) + b = msgp.AppendString(b, "m") + b = msgp.AppendBytes(b, []byte{0x1, 0x65, 0x2}) + b = msgp.AppendString(b, "n") + b = msgp.AppendArrayHeader(b, 2) + b = msgp.AppendString(b, "first") + b = msgp.AppendString(b, "second") + + reader := msgp.NewReader(bytes.NewReader(b)) + + var event MessageEventLogRecord + err := event.DecodeMsg(reader) + require.Nil(t, err) + + le := event.LogRecords().At(0) + le.Attributes().Sort() + require.EqualValues(t, logstest.Logs( + logstest.Log{ + Timestamp: 5000000000000, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "a": pdata.NewAttributeValueDouble(5.0), + "b": pdata.NewAttributeValueDouble(6.0), + "c": pdata.NewAttributeValueBool(true), + "d": pdata.NewAttributeValueInt(1), + "e": pdata.NewAttributeValueInt(2), + "f": pdata.NewAttributeValueInt(3), + "fluent.tag": pdata.NewAttributeValueString("my-tag"), + "g": pdata.NewAttributeValueInt(4), + "h": pdata.NewAttributeValueInt(255), + "i": pdata.NewAttributeValueInt(65535), + "j": pdata.NewAttributeValueInt(4294967295), + "k": pdata.NewAttributeValueInt(-1), + "l": pdata.NewAttributeValueString("(0+0i)"), + "m": pdata.NewAttributeValueString("\001e\002"), + "n": pdata.NewAttributeValueString(`["first","second"]`), + }, + }, + ).ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().At(0), le) +} + +func TestEventMode(t *testing.T) { + require.Equal(t, "unknown", UnknownMode.String()) + require.Equal(t, "message", MessageMode.String()) + require.Equal(t, "forward", ForwardMode.String()) + require.Equal(t, "packedforward", PackedForwardMode.String()) + + const TestMode EventMode = 6 + require.Panics(t, func() { _ = TestMode.String() }) +} + +func TestTimeFromTimestampBadType(t *testing.T) { + _, err := timeFromTimestamp("bad") + require.NotNil(t, err) +} + +func TestMessageEventConversionWithErrors(t *testing.T) { + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendString(b, "my-tag") + b = msgp.AppendInt(b, 5000) + b = msgp.AppendMapHeader(b, 1) + b = msgp.AppendString(b, "a") + b = msgp.AppendFloat64(b, 5.0) + + for i := 0; i < len(b)-1; i++ { + t.Run(fmt.Sprintf("EOF at byte %d", i), func(t *testing.T) { + reader := msgp.NewReader(bytes.NewReader(b[:i])) + + var event MessageEventLogRecord + err := event.DecodeMsg(reader) + require.NotNil(t, err) + }) + } + + t.Run("Invalid timestamp type uint", func(t *testing.T) { + in := make([]byte, len(b)) + copy(in, b) + in[8] = 0xcd + reader := msgp.NewReader(bytes.NewReader(in)) + + var event MessageEventLogRecord + err := event.DecodeMsg(reader) + require.NotNil(t, err) + }) +} + +func TestForwardEventConversionWithErrors(t *testing.T) { + b := parseHexDump("testdata/forward-event") + + for i := 0; i < len(b)-1; i++ { + t.Run(fmt.Sprintf("EOF at byte %d", i), func(t *testing.T) { + reader := msgp.NewReader(bytes.NewReader(b[:i])) + + var event ForwardEventLogRecords + err := event.DecodeMsg(reader) + require.NotNil(t, err) + }) + } +} + +func TestPackedForwardEventConversionWithErrors(t *testing.T) { + b := parseHexDump("testdata/forward-packed-compressed") + + for i := 0; i < len(b)-1; i++ { + t.Run(fmt.Sprintf("EOF at byte %d", i), func(t *testing.T) { + reader := msgp.NewReader(bytes.NewReader(b[:i])) + + var event PackedForwardEventLogRecords + err := event.DecodeMsg(reader) + require.NotNil(t, err) + }) + } + + t.Run("Invalid gzip header", func(t *testing.T) { + in := make([]byte, len(b)) + copy(in, b) + in[0x71] = 0xff + reader := msgp.NewReader(bytes.NewReader(in)) + + var event PackedForwardEventLogRecords + err := event.DecodeMsg(reader) + require.NotNil(t, err) + require.Contains(t, err.Error(), "gzip") + print(err.Error()) + }) +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/factory.go b/internal/otel_collector/receiver/fluentforwardreceiver/factory.go new file mode 100644 index 00000000000..5061ad64112 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/factory.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +// This file implements factory for SignalFx receiver. + +const ( + // The value of "type" key in configuration. + typeStr = "fluentforward" +) + +func NewFactory() component.ReceiverFactory { + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithLogs(createLogsReceiver)) +} + +func createDefaultConfig() configmodels.Receiver { + return &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createLogsReceiver( + _ context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + consumer consumer.LogsConsumer, +) (component.LogsReceiver, error) { + + rCfg := cfg.(*Config) + return newFluentReceiver(params.Logger, rCfg, consumer) +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/factory_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/factory_test.go new file mode 100644 index 00000000000..9b6581465c0 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/factory_test.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ListenAddress = "localhost:0" // Endpoint is required, not going to be used here. + + require.Equal(t, configmodels.Type("fluentforward"), factory.Type()) + + tReceiver, err := factory.CreateLogsReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewLogsNop()) + assert.Nil(t, err, "receiver creation failed") + assert.NotNil(t, tReceiver, "receiver creation failed") +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/heartbeat.go b/internal/otel_collector/receiver/fluentforwardreceiver/heartbeat.go new file mode 100644 index 00000000000..c9cb68d2c7a --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/heartbeat.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "context" + "net" + "syscall" + + "go.uber.org/zap" +) + +// See https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#heartbeat-message +func respondToHeartbeats(ctx context.Context, udpSock net.PacketConn, logger *zap.Logger) { + go func() { + <-ctx.Done() + udpSock.Close() + }() + + buf := make([]byte, 1) + for { + n, addr, err := udpSock.ReadFrom(buf) + if err != nil || n == 0 { + if ctx.Err() != nil || err == syscall.EINVAL { + return + } + continue + } + // Technically the heartbeat should be a byte 0x00 but just echo back + // whatever the client sent and move on. + _, err = udpSock.WriteTo(buf, addr) + if err != nil { + logger.Debug("Failed to write back heartbeat packet", zap.String("addr", addr.String()), zap.Error(err)) + } + } +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/heartbeat_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/heartbeat_test.go new file mode 100644 index 00000000000..8d73803a1ea --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/heartbeat_test.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "context" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestUDPHeartbeat(t *testing.T) { + udpSock, err := net.ListenPacket("udp", "127.0.0.1:0") + require.Nil(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go respondToHeartbeats(ctx, udpSock, zap.NewNop()) + + conn, err := net.Dial("udp", udpSock.LocalAddr().String()) + require.Nil(t, err) + + n, err := conn.Write([]byte{0x00}) + require.Nil(t, err) + require.Equal(t, 1, n) + + buf := make([]byte, 1) + require.NoError(t, conn.SetReadDeadline(time.Now().Add(5*time.Second))) + n, err = conn.Read(buf) + require.Nil(t, err) + require.Equal(t, 1, n) + require.Equal(t, uint8(0x00), buf[0]) +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/observ/metrics.go b/internal/otel_collector/receiver/fluentforwardreceiver/observ/metrics.go new file mode 100644 index 00000000000..40dad5e1fec --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/observ/metrics.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// package observ contains logic pertaining to the internal observation +// of the fluent forward receiver. +package observ + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var ( + ConnectionsOpened = stats.Int64( + "fluent_opened_connections", + "Number of connections opened to the fluentforward receiver", + stats.UnitDimensionless) + connectionsOpenedView = &view.View{ + Name: ConnectionsOpened.Name(), + Measure: ConnectionsOpened, + Description: ConnectionsOpened.Description(), + Aggregation: view.Sum(), + } + + ConnectionsClosed = stats.Int64( + "fluent_closed_connections", + "Number of connections closed to the fluentforward receiver", + stats.UnitDimensionless) + connectionsClosedView = &view.View{ + Name: ConnectionsClosed.Name(), + Measure: ConnectionsClosed, + Description: ConnectionsClosed.Description(), + Aggregation: view.Sum(), + } + + EventsParsed = stats.Int64( + "fluent_events_parsed", + "Number of Fluent events parsed successfully", + stats.UnitDimensionless) + eventsParsedView = &view.View{ + Name: EventsParsed.Name(), + Measure: EventsParsed, + Description: EventsParsed.Description(), + Aggregation: view.Sum(), + } + + FailedToParse = stats.Int64( + "fluent_parse_failures", + "Number of times Fluent messages failed to be decoded", + stats.UnitDimensionless) + failedToParseView = &view.View{ + Name: FailedToParse.Name(), + Measure: FailedToParse, + Description: FailedToParse.Description(), + Aggregation: view.Sum(), + } + + RecordsGenerated = stats.Int64( + "fluent_records_generated", + "Number of log records generated from Fluent forward input", + stats.UnitDimensionless) + recordsGeneratedView = &view.View{ + Name: RecordsGenerated.Name(), + Measure: RecordsGenerated, + Description: RecordsGenerated.Description(), + Aggregation: view.Sum(), + } +) + +func MetricViews() []*view.View { + return []*view.View{ + connectionsOpenedView, + connectionsClosedView, + eventsParsedView, + failedToParseView, + recordsGeneratedView, + } +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/observ/metrics_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/observ/metrics_test.go new file mode 100644 index 00000000000..11d1ce9e454 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/observ/metrics_test.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observ + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestViews(t *testing.T) { + require.Equal(t, len(MetricViews()), 5) +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/parse_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/parse_test.go new file mode 100644 index 00000000000..04afcd39b65 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/parse_test.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "encoding/hex" + "io/ioutil" + "path/filepath" + "runtime" + "strings" +) + +func parseHexDump(name string) []byte { + _, file, _, _ := runtime.Caller(0) + dir, err := filepath.Abs(filepath.Dir(file)) + if err != nil { + panic("Failed to find absolute path of hex dump: " + err.Error()) + } + + path := filepath.Join(dir, name+".hexdump") + dump, err := ioutil.ReadFile(path) + if err != nil { + panic("failed to read hex dump file " + path + ": " + err.Error()) + } + + var hexStr string + for _, line := range strings.Split(string(dump), "\n") { + if len(line) == 0 { + continue + } + line = strings.Split(line, "|")[0] + hexStr += strings.Join(strings.Fields(line)[1:], "") + } + + bytes, err := hex.DecodeString(hexStr) + if err != nil { + panic("failed to parse hex bytes: " + err.Error()) + } + + return bytes +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/receiver.go b/internal/otel_collector/receiver/fluentforwardreceiver/receiver.go new file mode 100644 index 00000000000..3b110baac8d --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/receiver.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "context" + "net" + "strings" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" +) + +// Give the event channel a bit of buffer to help reduce backpressure on +// FluentBit and increase throughput. +const eventChannelLength = 100 + +type fluentReceiver struct { + collector *Collector + listener net.Listener + conf *Config + logger *zap.Logger + server *server + cancel context.CancelFunc +} + +func newFluentReceiver(logger *zap.Logger, conf *Config, next consumer.LogsConsumer) (component.LogsReceiver, error) { + eventCh := make(chan Event, eventChannelLength) + + collector := newCollector(eventCh, next, logger) + + server := newServer(eventCh, logger) + + return &fluentReceiver{ + collector: collector, + server: server, + conf: conf, + logger: logger, + }, nil +} + +func (r *fluentReceiver) Start(ctx context.Context, _ component.Host) error { + receiverCtx, cancel := context.WithCancel(ctx) + r.cancel = cancel + + r.collector.Start(receiverCtx) + + listenAddr := r.conf.ListenAddress + + var listener net.Listener + var udpListener net.PacketConn + var err error + if strings.HasPrefix(listenAddr, "/") || strings.HasPrefix(listenAddr, "unix://") { + listener, err = net.Listen("unix", strings.TrimPrefix(listenAddr, "unix://")) + } else { + listener, err = net.Listen("tcp", listenAddr) + if err == nil { + udpListener, err = net.ListenPacket("udp", listenAddr) + } + } + + if err != nil { + return err + } + + r.listener = listener + + r.server.Start(receiverCtx, listener) + + if udpListener != nil { + go respondToHeartbeats(receiverCtx, udpListener, r.logger) + } + + return nil +} + +func (r *fluentReceiver) Shutdown(context.Context) error { + r.cancel() + return nil +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/receiver_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/receiver_test.go new file mode 100644 index 00000000000..a7122825f1f --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/receiver_test.go @@ -0,0 +1,438 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tinylib/msgp/msgp" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/testutil/logstest" +) + +func setupServer(t *testing.T) (func() net.Conn, *consumertest.LogsSink, *observer.ObservedLogs, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + next := new(consumertest.LogsSink) + logCore, logObserver := observer.New(zap.DebugLevel) + logger := zap.New(logCore) + + conf := &Config{ + ListenAddress: "127.0.0.1:0", + } + + receiver, err := newFluentReceiver(logger, conf, next) + require.NoError(t, err) + require.NoError(t, receiver.Start(ctx, nil)) + + connect := func() net.Conn { + conn, err := net.Dial("tcp", receiver.(*fluentReceiver).listener.Addr().String()) + require.Nil(t, err) + return conn + } + + go func() { + <-ctx.Done() + require.NoError(t, receiver.Shutdown(ctx)) + }() + + return connect, next, logObserver, cancel +} + +func waitForConnectionClose(t *testing.T, conn net.Conn) { + one := make([]byte, 1) + require.NoError(t, conn.SetReadDeadline(time.Now().Add(5*time.Second))) + _, err := conn.Read(one) + // If this is a timeout, then the connection didn't actually close like + // expected. + require.Equal(t, io.EOF, err) +} + +// Make sure malformed events don't cause panics. +func TestMessageEventConversionMalformed(t *testing.T) { + connect, _, observedLogs, cancel := setupServer(t) + defer cancel() + + eventBytes := parseHexDump("testdata/message-event") + + vulnerableBits := []int{0, 1, 14, 59} + + for _, pos := range vulnerableBits { + eventBytes[pos]++ + + conn := connect() + n, err := conn.Write(eventBytes) + require.NoError(t, err) + require.Len(t, eventBytes, n) + + waitForConnectionClose(t, conn) + + require.Len(t, observedLogs.FilterMessageSnippet("Unexpected").All(), 1) + _ = observedLogs.TakeAll() + } +} + +func TestMessageEvent(t *testing.T) { + connect, next, _, cancel := setupServer(t) + defer cancel() + + eventBytes := parseHexDump("testdata/message-event") + + conn := connect() + n, err := conn.Write(eventBytes) + require.NoError(t, err) + require.Equal(t, len(eventBytes), n) + require.NoError(t, conn.Close()) + + var converted []pdata.Logs + require.Eventually(t, func() bool { + converted = next.AllLogs() + return len(converted) == 1 + }, 5*time.Second, 10*time.Millisecond) + + converted[0].ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().At(0).Attributes().Sort() + require.EqualValues(t, logstest.Logs(logstest.Log{ + Timestamp: 1593031012000000000, + Body: pdata.NewAttributeValueString("..."), + Attributes: map[string]pdata.AttributeValue{ + "container_id": pdata.NewAttributeValueString("b00a67eb645849d6ab38ff8beb4aad035cc7e917bf123c3e9057c7e89fc73d2d"), + "container_name": pdata.NewAttributeValueString("/unruffled_cannon"), + "fluent.tag": pdata.NewAttributeValueString("b00a67eb6458"), + "source": pdata.NewAttributeValueString("stdout"), + }, + }, + ), converted[0]) +} + +func TestForwardEvent(t *testing.T) { + connect, next, _, cancel := setupServer(t) + defer cancel() + + eventBytes := parseHexDump("testdata/forward-event") + + conn := connect() + n, err := conn.Write(eventBytes) + require.NoError(t, err) + require.Equal(t, len(eventBytes), n) + require.NoError(t, conn.Close()) + + var converted []pdata.Logs + require.Eventually(t, func() bool { + converted = next.AllLogs() + return len(converted) == 1 + }, 5*time.Second, 10*time.Millisecond) + + ls := converted[0].ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() + ls.At(0).Attributes().Sort() + ls.At(1).Attributes().Sort() + require.EqualValues(t, logstest.Logs( + logstest.Log{ + Timestamp: 1593032377776693638, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "Mem.free": pdata.NewAttributeValueInt(848908), + "Mem.total": pdata.NewAttributeValueInt(7155496), + "Mem.used": pdata.NewAttributeValueInt(6306588), + "Swap.free": pdata.NewAttributeValueInt(0), + "Swap.total": pdata.NewAttributeValueInt(0), + "Swap.used": pdata.NewAttributeValueInt(0), + "fluent.tag": pdata.NewAttributeValueString("mem.0"), + }, + }, + logstest.Log{ + Timestamp: 1593032378756829346, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "Mem.free": pdata.NewAttributeValueInt(848908), + "Mem.total": pdata.NewAttributeValueInt(7155496), + "Mem.used": pdata.NewAttributeValueInt(6306588), + "Swap.free": pdata.NewAttributeValueInt(0), + "Swap.total": pdata.NewAttributeValueInt(0), + "Swap.used": pdata.NewAttributeValueInt(0), + "fluent.tag": pdata.NewAttributeValueString("mem.0"), + }, + }, + ), converted[0]) +} + +func TestEventAcknowledgment(t *testing.T) { + connect, _, logs, cancel := setupServer(t) + defer func() { fmt.Printf("%v", logs.All()) }() + defer cancel() + + const chunkValue = "abcdef01234576789" + + var b []byte + + // Make a message event with the chunk option + b = msgp.AppendArrayHeader(b, 4) + b = msgp.AppendString(b, "my-tag") + b = msgp.AppendInt(b, 5000) + b = msgp.AppendMapHeader(b, 1) + b = msgp.AppendString(b, "a") + b = msgp.AppendFloat64(b, 5.0) + b = msgp.AppendMapStrStr(b, map[string]string{"chunk": chunkValue}) + + conn := connect() + n, err := conn.Write(b) + require.NoError(t, err) + require.Equal(t, len(b), n) + + require.NoError(t, conn.SetReadDeadline(time.Now().Add(5*time.Second))) + resp := map[string]interface{}{} + err = msgp.NewReader(conn).ReadMapStrIntf(resp) + require.NoError(t, err) + + require.Equal(t, chunkValue, resp["ack"]) +} + +func TestForwardPackedEvent(t *testing.T) { + connect, next, _, cancel := setupServer(t) + defer cancel() + + eventBytes := parseHexDump("testdata/forward-packed") + + conn := connect() + n, err := conn.Write(eventBytes) + require.NoError(t, err) + require.Equal(t, len(eventBytes), n) + require.NoError(t, conn.Close()) + + var converted []pdata.Logs + require.Eventually(t, func() bool { + converted = next.AllLogs() + return len(converted) == 1 + }, 5*time.Second, 10*time.Millisecond) + + ls := converted[0].ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() + for i := 0; i < ls.Len(); i++ { + ls.At(i).Attributes().Sort() + } + require.EqualValues(t, logstest.Logs( + logstest.Log{ + Timestamp: 1593032517024597622, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("starting fluentd worker pid=17 ppid=7 worker=0"), + "pid": pdata.NewAttributeValueInt(17), + "ppid": pdata.NewAttributeValueInt(7), + "worker": pdata.NewAttributeValueInt(0), + }, + }, + logstest.Log{ + Timestamp: 1593032517028573686, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("delayed_commit_timeout is overwritten by ack_response_timeout"), + }, + }, + logstest.Log{ + Timestamp: 1593032517028815948, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("following tail of /var/log/kern.log"), + }, + }, + logstest.Log{ + Timestamp: 1593032517031174229, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("fluentd worker is now running worker=0"), + "worker": pdata.NewAttributeValueInt(0), + }, + }, + logstest.Log{ + Timestamp: 1593032522187382822, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("fluentd worker is now stopping worker=0"), + "worker": pdata.NewAttributeValueInt(0), + }, + }, + ), converted[0]) +} + +func TestForwardPackedCompressedEvent(t *testing.T) { + connect, next, _, cancel := setupServer(t) + defer cancel() + + eventBytes := parseHexDump("testdata/forward-packed-compressed") + + conn := connect() + n, err := conn.Write(eventBytes) + require.NoError(t, err) + require.Equal(t, len(eventBytes), n) + require.NoError(t, conn.Close()) + + var converted []pdata.Logs + require.Eventually(t, func() bool { + converted = next.AllLogs() + return len(converted) == 1 + }, 5*time.Second, 10*time.Millisecond) + + ls := converted[0].ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() + for i := 0; i < ls.Len(); i++ { + ls.At(i).Attributes().Sort() + } + require.EqualValues(t, logstest.Logs( + logstest.Log{ + Timestamp: 1593032426012197420, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("starting fluentd worker pid=17 ppid=7 worker=0"), + "pid": pdata.NewAttributeValueInt(17), + "ppid": pdata.NewAttributeValueInt(7), + "worker": pdata.NewAttributeValueInt(0), + }, + }, + logstest.Log{ + Timestamp: 1593032426013724933, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("delayed_commit_timeout is overwritten by ack_response_timeout"), + }, + }, + logstest.Log{ + Timestamp: 1593032426020510455, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("following tail of /var/log/kern.log"), + }, + }, + logstest.Log{ + Timestamp: 1593032426024346580, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("fluentd worker is now running worker=0"), + "worker": pdata.NewAttributeValueInt(0), + }, + }, + logstest.Log{ + Timestamp: 1593032434346935532, + Body: pdata.NewAttributeValueNull(), + Attributes: map[string]pdata.AttributeValue{ + "fluent.tag": pdata.NewAttributeValueString("fluent.info"), + "message": pdata.NewAttributeValueString("fluentd worker is now stopping worker=0"), + "worker": pdata.NewAttributeValueInt(0), + }, + }, + ), converted[0]) +} + +func TestUnixEndpoint(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + next := new(consumertest.LogsSink) + + tmpdir, err := ioutil.TempDir("", "fluent-socket") + require.NoError(t, err) + + defer os.RemoveAll(tmpdir) + + conf := &Config{ + ListenAddress: "unix://" + filepath.Join(tmpdir, "fluent.sock"), + } + + receiver, err := newFluentReceiver(zap.NewNop(), conf, next) + require.NoError(t, err) + require.NoError(t, receiver.Start(ctx, nil)) + + conn, err := net.Dial("unix", receiver.(*fluentReceiver).listener.Addr().String()) + require.NoError(t, err) + + n, err := conn.Write(parseHexDump("testdata/message-event")) + require.NoError(t, err) + require.Greater(t, n, 0) + + var converted []pdata.Logs + require.Eventually(t, func() bool { + converted = next.AllLogs() + return len(converted) == 1 + }, 5*time.Second, 10*time.Millisecond) +} + +func makeSampleEvent(tag string) []byte { + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendString(b, tag) + b = msgp.AppendInt(b, 5000) + b = msgp.AppendMapHeader(b, 1) + b = msgp.AppendString(b, "a") + b = msgp.AppendFloat64(b, 5.0) + return b +} + +func TestHighVolume(t *testing.T) { + connect, next, _, cancel := setupServer(t) + defer cancel() + + const totalRoutines = 8 + const totalMessagesPerRoutine = 1000 + + var wg sync.WaitGroup + for i := 0; i < totalRoutines; i++ { + wg.Add(1) + go func(num int) { + conn := connect() + for j := 0; j < totalMessagesPerRoutine; j++ { + eventBytes := makeSampleEvent(fmt.Sprintf("tag-%d-%d", num, j)) + n, err := conn.Write(eventBytes) + require.NoError(t, err) + require.Equal(t, len(eventBytes), n) + } + require.NoError(t, conn.Close()) + wg.Done() + }(i) + } + + wg.Wait() + + var converted []pdata.Logs + require.Eventually(t, func() bool { + converted = next.AllLogs() + + var total int + for i := range converted { + total += converted[i].LogRecordCount() + } + + return total == totalRoutines*totalMessagesPerRoutine + }, 10*time.Second, 100*time.Millisecond) +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/server.go b/internal/otel_collector/receiver/fluentforwardreceiver/server.go new file mode 100644 index 00000000000..508e8f20f8e --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/server.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "time" + + "github.com/tinylib/msgp/msgp" + "go.opencensus.io/stats" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/fluentforwardreceiver/observ" +) + +// The initial size of the read buffer. Messages can come in that are bigger +// than this, but this serves as a starting point. +const readBufferSize = 10 * 1024 + +type server struct { + outCh chan<- Event + logger *zap.Logger +} + +func newServer(outCh chan<- Event, logger *zap.Logger) *server { + return &server{ + outCh: outCh, + logger: logger, + } +} + +func (s *server) Start(ctx context.Context, listener net.Listener) { + go func() { + s.handleConnections(ctx, listener) + if ctx.Err() == nil { + panic("logic error in receiver, connections should always be listened for while receiver is running") + } + }() +} + +func (s *server) handleConnections(ctx context.Context, listener net.Listener) { + for { + conn, err := listener.Accept() + if ctx.Err() != nil { + return + } + // If there is an error and the receiver isn't shutdown, we need to + // keep trying to accept connections if at all possible. Put in a sleep + // to prevent hot loops in case the error persists. + if err != nil { + time.Sleep(10 * time.Second) + continue + } + stats.Record(ctx, observ.ConnectionsOpened.M(1)) + + s.logger.Debug("Got connection", zap.String("remoteAddr", conn.RemoteAddr().String())) + + go func() { + defer stats.Record(ctx, observ.ConnectionsClosed.M(1)) + + err := s.handleConn(ctx, conn) + if err != nil { + if err == io.EOF { + s.logger.Debug("Closing connection", zap.String("remoteAddr", conn.RemoteAddr().String()), zap.Error(err)) + } else { + s.logger.Debug("Unexpected error handling connection", zap.String("remoteAddr", conn.RemoteAddr().String()), zap.Error(err)) + } + } + conn.Close() + }() + } +} + +func (s *server) handleConn(ctx context.Context, conn net.Conn) error { + reader := msgp.NewReaderSize(conn, readBufferSize) + + for { + mode, err := DetermineNextEventMode(reader.R) + if err != nil { + return err + } + + var event Event + switch mode { + case UnknownMode: + return errors.New("could not determine event mode") + case MessageMode: + event = &MessageEventLogRecord{} + case ForwardMode: + event = &ForwardEventLogRecords{} + case PackedForwardMode: + event = &PackedForwardEventLogRecords{} + default: + panic("programmer bug in mode handling") + } + + err = event.DecodeMsg(reader) + if err != nil { + if err != io.EOF { + stats.Record(ctx, observ.FailedToParse.M(1)) + } + return fmt.Errorf("failed to parse %s mode event: %v", mode.String(), err) + } + + stats.Record(ctx, observ.EventsParsed.M(1)) + + s.outCh <- event + + // We must acknowledge the 'chunk' option if given. We could do this in + // another goroutine if it is too much of a bottleneck to reading + // messages -- this is the only thing that sends data back to the + // client. + if event.Chunk() != "" { + err := msgp.Encode(conn, AckResponse{Ack: event.Chunk()}) + if err != nil { + return fmt.Errorf("failed to acknowledge chunk %s: %v", event.Chunk(), err) + } + } + } +} + +// DetermineNextEventMode inspects the next bit of data from the given peeker +// reader to determine which type of event mode it is. According to the +// forward protocol spec: "Server MUST detect the carrier mode by inspecting +// the second element of the array." It is assumed that peeker is aligned at +// the start of a new event, otherwise the result is undefined and will +// probably error. +func DetermineNextEventMode(peeker Peeker) (EventMode, error) { + var chunk []byte + var err error + chunk, err = peeker.Peek(2) + if err != nil { + return UnknownMode, err + } + + // The first byte is the array header, which will always be 1 byte since no + // message modes have more than 4 entries. So skip to the second byte which + // is the tag string header. + tagType := chunk[1] + // We already read the first type for the type + tagLen := 1 + + isFixStr := tagType&0b10100000 == 0b10100000 + if isFixStr { + tagLen += int(tagType & 0b00011111) + } else { + switch tagType { + case 0xd9: + chunk, err = peeker.Peek(3) + if err != nil { + return UnknownMode, err + } + tagLen += 1 + int(chunk[2]) + case 0xda: + chunk, err = peeker.Peek(4) + if err != nil { + return UnknownMode, err + } + tagLen += 2 + int(binary.BigEndian.Uint16(chunk[2:])) + case 0xdb: + chunk, err = peeker.Peek(6) + if err != nil { + return UnknownMode, err + } + tagLen += 4 + int(binary.BigEndian.Uint32(chunk[2:])) + default: + return UnknownMode, errors.New("malformed tag field") + } + } + + // Skip past the first byte (array header) and the entire tag and then get + // one byte into the second field -- that is enough to know its type. + chunk, err = peeker.Peek(1 + tagLen + 1) + if err != nil { + return UnknownMode, err + } + + secondElmType := msgp.NextType(chunk[1+tagLen:]) + + switch secondElmType { + case msgp.IntType, msgp.UintType, msgp.ExtensionType: + return MessageMode, nil + case msgp.ArrayType: + return ForwardMode, nil + case msgp.BinType, msgp.StrType: + return PackedForwardMode, nil + default: + return UnknownMode, fmt.Errorf("unable to determine next event mode for type %v", secondElmType) + } +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/server_test.go b/internal/otel_collector/receiver/fluentforwardreceiver/server_test.go new file mode 100644 index 00000000000..e406d0502a9 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/server_test.go @@ -0,0 +1,127 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fluentforwardreceiver + +import ( + "bufio" + "bytes" + "errors" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tinylib/msgp/msgp" +) + +func TestDetermineNextEventMode(t *testing.T) { + cases := []struct { + name string + event func() []byte + expectedMode EventMode + expectedError error + }{ + { + "basic", + func() []byte { + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendString(b, "my-tag") + b = msgp.AppendInt(b, 5000) + return b + }, + MessageMode, + nil, + }, + { + "str8-tag", + func() []byte { + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendString(b, strings.Repeat("a", 128)) + b = msgp.AppendInt(b, 5000) + return b + }, + MessageMode, + nil, + }, + { + "str16-tag", + func() []byte { + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendString(b, strings.Repeat("a", 1024)) + b = msgp.AppendInt(b, 5000) + return b + }, + MessageMode, + nil, + }, + { + "str32-tag", + func() []byte { + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendString(b, strings.Repeat("a", 66000)) + b = msgp.AppendInt(b, 5000) + return b + }, + MessageMode, + nil, + }, + { + "non-string-tag", + func() []byte { + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendInt(b, 10) + b = msgp.AppendInt(b, 5000) + return b + }, + UnknownMode, + errors.New("malformed tag field"), + }, + { + "float-second-elm", + func() []byte { + var b []byte + + b = msgp.AppendArrayHeader(b, 3) + b = msgp.AppendString(b, "my-tag") + b = msgp.AppendFloat64(b, 5000.0) + return b + }, + UnknownMode, + errors.New("unable to determine next event mode for type float64"), + }, + } + + for i := range cases { + c := cases[i] + t.Run(c.name, func(t *testing.T) { + peeker := bufio.NewReaderSize(bytes.NewReader(c.event()), 1024*100) + mode, err := DetermineNextEventMode(peeker) + if c.expectedError != nil { + require.Equal(t, c.expectedError, err) + } else { + require.Equal(t, c.expectedMode, mode) + } + }) + } +} diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/testdata/config.yaml b/internal/otel_collector/receiver/fluentforwardreceiver/testdata/config.yaml new file mode 100644 index 00000000000..863f73db801 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/testdata/config.yaml @@ -0,0 +1,15 @@ +receivers: + fluentforward: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + logs: + receivers: [fluentforward] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/testdata/forward-event.hexdump b/internal/otel_collector/receiver/fluentforwardreceiver/testdata/forward-event.hexdump new file mode 100644 index 00000000000..eafe3fec810 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/testdata/forward-event.hexdump @@ -0,0 +1,12 @@ +00000000 92 a5 6d 65 6d 2e 30 92 92 d7 00 5e f3 be b9 2e |..mem.0....^....| +00000010 4b 67 86 86 a9 4d 65 6d 2e 74 6f 74 61 6c ce 00 |Kg...Mem.total..| +00000020 6d 2f 28 a8 4d 65 6d 2e 75 73 65 64 ce 00 60 3b |m/(.Mem.used..`;| +00000030 1c a8 4d 65 6d 2e 66 72 65 65 ce 00 0c f4 0c aa |..Mem.free......| +00000040 53 77 61 70 2e 74 6f 74 61 6c 00 a9 53 77 61 70 |Swap.total..Swap| +00000050 2e 75 73 65 64 00 a9 53 77 61 70 2e 66 72 65 65 |.used..Swap.free| +00000060 00 92 d7 00 5e f3 be ba 2d 1c 4c a2 86 a9 4d 65 |....^...-.L...Me| +00000070 6d 2e 74 6f 74 61 6c ce 00 6d 2f 28 a8 4d 65 6d |m.total..m/(.Mem| +00000080 2e 75 73 65 64 ce 00 60 3b 1c a8 4d 65 6d 2e 66 |.used..`;..Mem.f| +00000090 72 65 65 ce 00 0c f4 0c aa 53 77 61 70 2e 74 6f |ree......Swap.to| +000000a0 74 61 6c 00 a9 53 77 61 70 2e 75 73 65 64 00 a9 |tal..Swap.used..| +000000b0 53 77 61 70 2e 66 72 65 65 00 |Swap.free. | diff --git a/internal/otel_collector/receiver/fluentforwardreceiver/testdata/forward-packed-compressed.hexdump b/internal/otel_collector/receiver/fluentforwardreceiver/testdata/forward-packed-compressed.hexdump new file mode 100644 index 00000000000..4ae344bbae3 --- /dev/null +++ b/internal/otel_collector/receiver/fluentforwardreceiver/testdata/forward-packed-compressed.hexdump @@ -0,0 +1,32 @@ +00000000 93 ab 66 6c 75 65 6e 74 2e 69 6e 66 6f db 00 00 |..fluent.info...| +00000010 01 af 1f 8b 08 00 ea be f3 5e 00 03 9b 74 9d 21 |.........^...t.!| +00000020 ee f3 be 57 0c bb e4 74 5a 16 17 64 a6 08 2e 29 |...W...tZ..d...)| +00000030 00 92 ec cb ca f3 8b b2 53 8b 18 96 e7 a6 16 17 |........S.......| +00000040 27 a6 a7 de d4 2b 2e 49 2c 2a c9 cc 4b 57 48 cb |'....+.I,*..KWH.| +00000050 29 4d cd 2b 49 51 80 a8 50 00 aa b6 35 34 57 00 |)M.+IQ..P...54W.| +00000060 e9 b2 35 87 0a da 1a 00 00 ed 57 34 22 57 00 00 |..5.......W4"W..| +00000070 00 1f 8b 08 00 ea be f3 5e 00 03 9b 74 9d 21 ee |........^...t.!.| +00000080 f3 be 57 0c 17 73 59 1b 97 e7 a6 16 17 27 a6 a7 |..W..sY......'..| +00000090 de b4 4d 49 cd 49 ac 4c 4d 89 4f ce cf cd cd 2c |..MI.I.LM.O....,| +000000a0 89 2f c9 cc 4d cd 2f 2d 51 c8 2c 56 c8 2f 4b 2d |./..M./-Q.,V./K-| +000000b0 2a 2f ca 2c 29 49 cd 53 48 aa 54 48 4c ce 8e 2f |*/.,)I.SH.THL../| +000000c0 4a 2d 2e c8 cf 2b 4e 85 29 03 00 4b 69 57 f4 53 |J-...+N.)..KiW.S| +000000d0 00 00 00 1f 8b 08 00 ea be f3 5e 00 03 9b 74 9d |..........^...t.| +000000e0 21 ee f3 be 57 8c 16 df be 37 2e cf 4d 2d 2e 4e |!...W....7..M-.N| +000000f0 4c 4f bd a9 9c 96 9f 93 93 5f 9e 99 97 ae 50 92 |LO......._....P.| +00000100 98 99 a3 90 9f a6 a0 5f 96 58 a4 9f 93 9f ae 9f |......._.X......| +00000110 9d 5a 94 a7 07 64 00 00 3c 6b b5 13 39 00 00 00 |.Z...d.. # default = 1m + scrapers: + : + : + ... +``` + +The available scrapers are: + +| Scraper | Supported OSs | Description | +|------------|------------------------------|--------------------------------------------------------| +| cpu | All except Mac[1] | CPU utilization metrics | +| disk | All except Mac[1] | Disk I/O metrics | +| load | All | CPU load metrics | +| filesystem | All | File System utilization metrics | +| memory | All | Memory utilization metrics | +| network | All | Network interface I/O metrics & TCP connection metrics | +| processes | Linux | Process count metrics | +| swap | All | Swap space utilization and I/O metrics | +| process | Linux & Windows | Per process CPU, Memory, and Disk I/O metrics | + +### Notes + +[1] Not supported on Mac when compiled without cgo which is the default. + +Several scrapers support additional configuration: + +### Disk + +```yaml +disk: + : + devices: [ , ... ] + match_type: +``` + +### File System + +```yaml +filesystem: + : + devices: [ , ... ] + match_type: + : + fs_types: [ , ... ] + match_type: + : + mount_points: [ , ... ] + match_type: +``` + +### Network + +```yaml +network: + : + interfaces: [ , ... ] + match_type: +``` + +### Process + +```yaml +process: + disk: + : + names: [ , ... ] + match_type: +``` + +## Advanced Configuration + +### Filtering + +If you are only interested in a subset of metrics from a particular source, +it is recommended you use this receiver with the +[Filter Processor](https://github.com/open-telemetry/opentelemetry-collector/tree/master/processor/filterprocessor). + +### Different Frequencies + +If you would like to scrape some metrics at a different frequency than others, +you can configure multiple `hostmetrics` receivers with different +`collection_interval` values. For example: + +```yaml +receivers: + hostmetrics: + collection_interval: 30s + scrapers: + cpu: + memory: + + hostmetrics/disk: + collection_interval: 1m + scrapers: + disk: + filesystem: + +service: + pipelines: + metrics: + receivers: [hostmetrics, hostmetrics/disk] +``` diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/codegen.go b/internal/otel_collector/receiver/hostmetricsreceiver/codegen.go new file mode 100644 index 00000000000..feefb424aaf --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/codegen.go @@ -0,0 +1,19 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +//go:generate mdatagen metadata.yaml + +package hostmetricsreceiver diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/config.go new file mode 100644 index 00000000000..82626aba81d --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/config.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostmetricsreceiver + +import ( + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// Config defines configuration for HostMetrics receiver. +type Config struct { + scraperhelper.ScraperControllerSettings `mapstructure:",squash"` + Scrapers map[string]internal.Config `mapstructure:"-"` +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/config_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/config_test.go new file mode 100644 index 00000000000..1b3c3f9d34e --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/config_test.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostmetricsreceiver + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/processscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + require.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, len(cfg.Receivers), 2) + + r0 := cfg.Receivers["hostmetrics"] + defaultConfigCPUScraper := factory.CreateDefaultConfig() + defaultConfigCPUScraper.(*Config).Scrapers = map[string]internal.Config{ + cpuscraper.TypeStr: (&cpuscraper.Factory{}).CreateDefaultConfig(), + } + + assert.Equal(t, defaultConfigCPUScraper, r0) + + r1 := cfg.Receivers["hostmetrics/customname"].(*Config) + expectedConfig := &Config{ + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "hostmetrics/customname", + }, + CollectionInterval: 30 * time.Second, + }, + Scrapers: map[string]internal.Config{ + cpuscraper.TypeStr: &cpuscraper.Config{}, + diskscraper.TypeStr: &diskscraper.Config{}, + loadscraper.TypeStr: &loadscraper.Config{}, + filesystemscraper.TypeStr: &filesystemscraper.Config{}, + memoryscraper.TypeStr: &memoryscraper.Config{}, + networkscraper.TypeStr: &networkscraper.Config{ + Include: networkscraper.MatchConfig{ + Interfaces: []string{"test1"}, + Config: filterset.Config{MatchType: "strict"}, + }, + }, + processesscraper.TypeStr: &processesscraper.Config{}, + swapscraper.TypeStr: &swapscraper.Config{}, + processscraper.TypeStr: &processscraper.Config{ + Include: processscraper.MatchConfig{ + Names: []string{"test2", "test3"}, + Config: filterset.Config{MatchType: "regexp"}, + }, + }, + }, + } + + assert.Equal(t, expectedConfig, r1) +} + +func TestLoadInvalidConfig_NoScrapers(t *testing.T) { + factories, err := componenttest.ExampleComponents() + require.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "config-noscrapers.yaml"), factories) + + require.EqualError(t, err, "error reading receivers configuration for hostmetrics: must specify at least one scraper when using hostmetrics receiver") +} + +func TestLoadInvalidConfig_InvalidScraperKey(t *testing.T) { + factories, err := componenttest.ExampleComponents() + require.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "config-invalidscraperkey.yaml"), factories) + + require.EqualError(t, err, "error reading receivers configuration for hostmetrics: invalid scraper key: invalidscraperkey") +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/example_config.yaml b/internal/otel_collector/receiver/hostmetricsreceiver/example_config.yaml new file mode 100644 index 00000000000..2ee28b7bfd6 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/example_config.yaml @@ -0,0 +1,29 @@ +extensions: + zpages: + endpoint: 0.0.0.0:55679 + +receivers: + hostmetrics: + collection_interval: 1m + scrapers: + cpu: + load: + memory: + disk: + filesystem: + network: + processes: + swap: + +exporters: + logging: + prometheus: + endpoint: 0.0.0.0:8889 + +service: + pipelines: + metrics: + receivers: [hostmetrics] + exporters: [prometheus, logging] + + extensions: [zpages] diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/factory.go new file mode 100644 index 00000000000..41bd6b4fe93 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/factory.go @@ -0,0 +1,223 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostmetricsreceiver + +import ( + "context" + "errors" + "fmt" + + "github.com/spf13/viper" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/processscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper" + "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for HostMetrics receiver. + +const ( + // The value of "type" key in configuration. + typeStr = "hostmetrics" + scrapersKey = "scrapers" +) + +var ( + scraperFactories = map[string]internal.ScraperFactory{ + cpuscraper.TypeStr: &cpuscraper.Factory{}, + diskscraper.TypeStr: &diskscraper.Factory{}, + loadscraper.TypeStr: &loadscraper.Factory{}, + filesystemscraper.TypeStr: &filesystemscraper.Factory{}, + memoryscraper.TypeStr: &memoryscraper.Factory{}, + networkscraper.TypeStr: &networkscraper.Factory{}, + processesscraper.TypeStr: &processesscraper.Factory{}, + swapscraper.TypeStr: &swapscraper.Factory{}, + } + + resourceScraperFactories = map[string]internal.ResourceScraperFactory{ + processscraper.TypeStr: &processscraper.Factory{}, + } +) + +// NewFactory creates a new factory for host metrics receiver. +func NewFactory() component.ReceiverFactory { + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithMetrics(createMetricsReceiver), + receiverhelper.WithCustomUnmarshaler(customUnmarshaler)) +} + +// customUnmarshaler returns custom unmarshaler for this config. +func customUnmarshaler(componentViperSection *viper.Viper, intoCfg interface{}) error { + + // load the non-dynamic config normally + + err := componentViperSection.Unmarshal(intoCfg) + if err != nil { + return err + } + + cfg, ok := intoCfg.(*Config) + if !ok { + return fmt.Errorf("config type not hostmetrics.Config") + } + + // dynamically load the individual collector configs based on the key name + + cfg.Scrapers = map[string]internal.Config{} + + scrapersViperSection, err := config.ViperSubExact(componentViperSection, scrapersKey) + if err != nil { + return err + } + if len(scrapersViperSection.AllKeys()) == 0 { + return errors.New("must specify at least one scraper when using hostmetrics receiver") + } + + for key := range componentViperSection.GetStringMap(scrapersKey) { + factory, ok := getScraperFactory(key) + if !ok { + return fmt.Errorf("invalid scraper key: %s", key) + } + + collectorCfg := factory.CreateDefaultConfig() + collectorViperSection, err := config.ViperSubExact(scrapersViperSection, key) + if err != nil { + return err + } + err = collectorViperSection.UnmarshalExact(collectorCfg) + if err != nil { + return fmt.Errorf("error reading settings for scraper type %q: %v", key, err) + } + + cfg.Scrapers[key] = collectorCfg + } + + return nil +} + +func getScraperFactory(key string) (internal.BaseFactory, bool) { + if factory, ok := scraperFactories[key]; ok { + return factory, true + } + + if factory, ok := resourceScraperFactories[key]; ok { + return factory, true + } + + return nil, false +} + +// createDefaultConfig creates the default configuration for receiver. +func createDefaultConfig() configmodels.Receiver { + return &Config{ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(typeStr)} +} + +// createMetricsReceiver creates a metrics receiver based on provided config. +func createMetricsReceiver( + ctx context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + consumer consumer.MetricsConsumer, +) (component.MetricsReceiver, error) { + oCfg := cfg.(*Config) + + addScraperOptions, err := createAddScraperOptions(ctx, params.Logger, oCfg, scraperFactories, resourceScraperFactories) + if err != nil { + return nil, err + } + + return scraperhelper.NewScraperControllerReceiver( + &oCfg.ScraperControllerSettings, + params.Logger, + consumer, + addScraperOptions..., + ) +} + +func createAddScraperOptions( + ctx context.Context, + logger *zap.Logger, + config *Config, + factories map[string]internal.ScraperFactory, + resourceFactories map[string]internal.ResourceScraperFactory, +) ([]scraperhelper.ScraperControllerOption, error) { + scraperControllerOptions := make([]scraperhelper.ScraperControllerOption, 0, len(config.Scrapers)) + + for key, cfg := range config.Scrapers { + hostMetricsScraper, ok, err := createHostMetricsScraper(ctx, logger, key, cfg, factories) + if err != nil { + return nil, fmt.Errorf("failed to create scraper for key %q: %w", key, err) + } + + if ok { + scraperControllerOptions = append(scraperControllerOptions, scraperhelper.AddMetricsScraper(hostMetricsScraper)) + continue + } + + resourceMetricsScraper, ok, err := createResourceMetricsScraper(ctx, logger, key, cfg, resourceFactories) + if err != nil { + return nil, fmt.Errorf("failed to create resource scraper for key %q: %w", key, err) + } + + if ok { + scraperControllerOptions = append(scraperControllerOptions, scraperhelper.AddResourceMetricsScraper(resourceMetricsScraper)) + continue + } + + return nil, fmt.Errorf("host metrics scraper factory not found for key: %q", key) + } + + return scraperControllerOptions, nil +} + +func createHostMetricsScraper(ctx context.Context, logger *zap.Logger, key string, cfg internal.Config, factories map[string]internal.ScraperFactory) (scraper scraperhelper.MetricsScraper, ok bool, err error) { + factory := factories[key] + if factory == nil { + ok = false + return + } + + ok = true + scraper, err = factory.CreateMetricsScraper(ctx, logger, cfg) + return +} + +func createResourceMetricsScraper(ctx context.Context, logger *zap.Logger, key string, cfg internal.Config, factories map[string]internal.ResourceScraperFactory) (scraper scraperhelper.ResourceMetricsScraper, ok bool, err error) { + factory := factories[key] + if factory == nil { + ok = false + return + } + + ok = true + scraper, err = factory.CreateResourceMetricsScraper(ctx, logger, cfg) + return +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/factory_test.go new file mode 100644 index 00000000000..ebb478bbcef --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/factory_test.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostmetricsreceiver + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +var creationParams = component.ReceiverCreateParams{Logger: zap.NewNop()} + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + tReceiver, err := factory.CreateTracesReceiver(context.Background(), creationParams, cfg, consumertest.NewTracesNop()) + assert.Equal(t, err, configerror.ErrDataTypeIsNotSupported) + assert.Nil(t, tReceiver) + + mReceiver, err := factory.CreateMetricsReceiver(context.Background(), creationParams, cfg, consumertest.NewMetricsNop()) + assert.NoError(t, err) + assert.NotNil(t, mReceiver) + + tLogs, err := factory.CreateLogsReceiver(context.Background(), creationParams, cfg, consumertest.NewLogsNop()) + assert.Equal(t, err, configerror.ErrDataTypeIsNotSupported) + assert.Nil(t, tLogs) +} + +func TestCreateReceiver_ScraperKeyConfigError(t *testing.T) { + const errorKey string = "error" + + factory := NewFactory() + cfg := &Config{Scrapers: map[string]internal.Config{errorKey: &mockConfig{}}} + + _, err := factory.CreateMetricsReceiver(context.Background(), creationParams, cfg, consumertest.NewMetricsNop()) + assert.EqualError(t, err, fmt.Sprintf("host metrics scraper factory not found for key: %q", errorKey)) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go new file mode 100644 index 00000000000..c2ebbcc6346 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go @@ -0,0 +1,439 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostmetricsreceiver + +import ( + "context" + "errors" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/processscraper" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +var standardMetrics = []string{ + "system.cpu.time", + "system.memory.usage", + "system.disk.io", + "system.disk.io_time", + "system.disk.ops", + "system.disk.operation_time", + "system.disk.pending_operations", + "system.filesystem.usage", + "system.cpu.load_average.1m", + "system.cpu.load_average.5m", + "system.cpu.load_average.15m", + "system.network.packets", + "system.network.dropped_packets", + "system.network.errors", + "system.network.io", + "system.network.tcp_connections", + "system.swap.paging_ops", + "system.swap.usage", +} + +var resourceMetrics = []string{ + "process.cpu.time", + "process.memory.physical_usage", + "process.memory.virtual_usage", + "process.disk.io", +} + +var systemSpecificMetrics = map[string][]string{ + "linux": {"system.disk.merged", "system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"}, + "darwin": {"system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"}, + "freebsd": {"system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"}, + "openbsd": {"system.filesystem.inodes.usage", "system.processes.running", "system.processes.blocked", "system.swap.page_faults"}, + "solaris": {"system.filesystem.inodes.usage", "system.swap.page_faults"}, +} + +var factories = map[string]internal.ScraperFactory{ + cpuscraper.TypeStr: &cpuscraper.Factory{}, + diskscraper.TypeStr: &diskscraper.Factory{}, + filesystemscraper.TypeStr: &filesystemscraper.Factory{}, + loadscraper.TypeStr: &loadscraper.Factory{}, + memoryscraper.TypeStr: &memoryscraper.Factory{}, + networkscraper.TypeStr: &networkscraper.Factory{}, + processesscraper.TypeStr: &processesscraper.Factory{}, + swapscraper.TypeStr: &swapscraper.Factory{}, +} + +var resourceFactories = map[string]internal.ResourceScraperFactory{ + processscraper.TypeStr: &processscraper.Factory{}, +} + +func TestGatherMetrics_EndToEnd(t *testing.T) { + scraperFactories = factories + resourceScraperFactories = resourceFactories + + sink := new(consumertest.MetricsSink) + + config := &Config{ + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + CollectionInterval: 100 * time.Millisecond, + }, + Scrapers: map[string]internal.Config{ + cpuscraper.TypeStr: &cpuscraper.Config{}, + diskscraper.TypeStr: &diskscraper.Config{}, + filesystemscraper.TypeStr: &filesystemscraper.Config{}, + loadscraper.TypeStr: &loadscraper.Config{}, + memoryscraper.TypeStr: &memoryscraper.Config{}, + networkscraper.TypeStr: &networkscraper.Config{}, + processesscraper.TypeStr: &processesscraper.Config{}, + swapscraper.TypeStr: &swapscraper.Config{}, + }, + } + + if runtime.GOOS == "linux" || runtime.GOOS == "windows" { + config.Scrapers[processscraper.TypeStr] = &processscraper.Config{} + } + + receiver, err := NewFactory().CreateMetricsReceiver(context.Background(), creationParams, config, sink) + + require.NoError(t, err, "Failed to create metrics receiver: %v", err) + + ctx, cancelFn := context.WithCancel(context.Background()) + err = receiver.Start(ctx, componenttest.NewNopHost()) + require.NoError(t, err, "Failed to start metrics receiver: %v", err) + defer func() { assert.NoError(t, receiver.Shutdown(context.Background())) }() + + // canceling the context provided to Start should not cancel any async processes initiated by the receiver + cancelFn() + + const tick = 50 * time.Millisecond + const waitFor = 5 * time.Second + require.Eventuallyf(t, func() bool { + got := sink.AllMetrics() + if len(got) == 0 { + return false + } + + assertIncludesExpectedMetrics(t, got[0]) + return true + }, waitFor, tick, "No metrics were collected after %v", waitFor) +} + +func assertIncludesExpectedMetrics(t *testing.T, got pdata.Metrics) { + // get the superset of metrics returned by all resource metrics (excluding the first) + returnedMetrics := make(map[string]struct{}) + returnedResourceMetrics := make(map[string]struct{}) + rms := got.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + metrics := getMetricSlice(t, rm) + returnedMetricNames := getReturnedMetricNames(metrics) + if rm.Resource().Attributes().Len() == 0 { + appendMapInto(returnedMetrics, returnedMetricNames) + } else { + appendMapInto(returnedResourceMetrics, returnedMetricNames) + } + } + + // verify the expected list of metrics returned (os dependent) + expectedMetrics := append(standardMetrics, systemSpecificMetrics[runtime.GOOS]...) + assert.Equal(t, len(expectedMetrics), len(returnedMetrics)) + for _, expected := range expectedMetrics { + assert.Contains(t, returnedMetrics, expected) + } + + // verify the expected list of resource metrics returned (Linux & Windows only) + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + return + } + + assert.Equal(t, len(resourceMetrics), len(returnedResourceMetrics)) + for _, expected := range resourceMetrics { + assert.Contains(t, returnedResourceMetrics, expected) + } +} + +func getMetricSlice(t *testing.T, rm pdata.ResourceMetrics) pdata.MetricSlice { + ilms := rm.InstrumentationLibraryMetrics() + require.Equal(t, 1, ilms.Len()) + return ilms.At(0).Metrics() +} + +func getReturnedMetricNames(metrics pdata.MetricSlice) map[string]struct{} { + metricNames := make(map[string]struct{}) + for i := 0; i < metrics.Len(); i++ { + metricNames[metrics.At(i).Name()] = struct{}{} + } + return metricNames +} + +func appendMapInto(m1 map[string]struct{}, m2 map[string]struct{}) { + for k, v := range m2 { + m1[k] = v + } +} + +const mockTypeStr = "mock" +const mockResourceTypeStr = "mockresource" + +type mockConfig struct{} + +type mockFactory struct{ mock.Mock } +type mockScraper struct{ mock.Mock } + +func (m *mockFactory) CreateDefaultConfig() internal.Config { return &mockConfig{} } +func (m *mockFactory) CreateMetricsScraper(context.Context, *zap.Logger, internal.Config) (scraperhelper.MetricsScraper, error) { + args := m.MethodCalled("CreateMetricsScraper") + return args.Get(0).(scraperhelper.MetricsScraper), args.Error(1) +} + +func (m *mockScraper) Name() string { return "" } +func (m *mockScraper) Start(context.Context, component.Host) error { return nil } +func (m *mockScraper) Shutdown(context.Context) error { return nil } +func (m *mockScraper) Scrape(context.Context, string) (pdata.MetricSlice, error) { + return pdata.NewMetricSlice(), errors.New("err1") +} + +type mockResourceFactory struct{ mock.Mock } +type mockResourceScraper struct{ mock.Mock } + +func (m *mockResourceFactory) CreateDefaultConfig() internal.Config { return &mockConfig{} } +func (m *mockResourceFactory) CreateResourceMetricsScraper(context.Context, *zap.Logger, internal.Config) (scraperhelper.ResourceMetricsScraper, error) { + args := m.MethodCalled("CreateResourceMetricsScraper") + return args.Get(0).(scraperhelper.ResourceMetricsScraper), args.Error(1) +} + +func (m *mockResourceScraper) Name() string { return "" } +func (m *mockResourceScraper) Start(context.Context, component.Host) error { return nil } +func (m *mockResourceScraper) Shutdown(context.Context) error { return nil } +func (m *mockResourceScraper) Scrape(context.Context, string) (pdata.ResourceMetricsSlice, error) { + return pdata.NewResourceMetricsSlice(), errors.New("err2") +} + +func TestGatherMetrics_ScraperKeyConfigError(t *testing.T) { + scraperFactories = map[string]internal.ScraperFactory{} + resourceScraperFactories = map[string]internal.ResourceScraperFactory{} + + sink := new(consumertest.MetricsSink) + config := &Config{Scrapers: map[string]internal.Config{"error": &mockConfig{}}} + _, err := NewFactory().CreateMetricsReceiver(context.Background(), creationParams, config, sink) + require.Error(t, err) +} + +func TestGatherMetrics_CreateMetricsScraperError(t *testing.T) { + mFactory := &mockFactory{} + mFactory.On("CreateMetricsScraper").Return(&mockScraper{}, errors.New("err1")) + scraperFactories = map[string]internal.ScraperFactory{mockTypeStr: mFactory} + resourceScraperFactories = map[string]internal.ResourceScraperFactory{} + + sink := new(consumertest.MetricsSink) + config := &Config{Scrapers: map[string]internal.Config{mockTypeStr: &mockConfig{}}} + _, err := NewFactory().CreateMetricsReceiver(context.Background(), creationParams, config, sink) + require.Error(t, err) +} + +func TestGatherMetrics_CreateMetricsResourceScraperError(t *testing.T) { + mResourceFactory := &mockResourceFactory{} + mResourceFactory.On("CreateResourceMetricsScraper").Return(&mockResourceScraper{}, errors.New("err1")) + scraperFactories = map[string]internal.ScraperFactory{} + resourceScraperFactories = map[string]internal.ResourceScraperFactory{mockResourceTypeStr: mResourceFactory} + + sink := new(consumertest.MetricsSink) + config := &Config{Scrapers: map[string]internal.Config{mockResourceTypeStr: &mockConfig{}}} + _, err := NewFactory().CreateMetricsReceiver(context.Background(), creationParams, config, sink) + require.Error(t, err) +} + +type notifyingSink struct { + receivedMetrics bool + timesCalled int + ch chan int +} + +func (s *notifyingSink) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + if md.MetricCount() > 0 { + s.receivedMetrics = true + } + + s.timesCalled++ + s.ch <- s.timesCalled + return nil +} + +func benchmarkScrapeMetrics(b *testing.B, cfg *Config) { + scraperFactories = factories + resourceScraperFactories = resourceFactories + + sink := ¬ifyingSink{ch: make(chan int, 10)} + tickerCh := make(chan time.Time) + + options, err := createAddScraperOptions(context.Background(), zap.NewNop(), cfg, scraperFactories, resourceScraperFactories) + require.NoError(b, err) + options = append(options, scraperhelper.WithTickerChannel(tickerCh)) + + receiver, err := scraperhelper.NewScraperControllerReceiver(&cfg.ScraperControllerSettings, zap.NewNop(), sink, options...) + require.NoError(b, err) + + require.NoError(b, receiver.Start(context.Background(), componenttest.NewNopHost())) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + tickerCh <- time.Now() + <-sink.ch + } + + if !sink.receivedMetrics { + b.Fail() + } +} + +func Benchmark_ScrapeCpuMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{cpuscraper.TypeStr: (&cpuscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeDiskMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{diskscraper.TypeStr: (&diskscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeFileSystemMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{filesystemscraper.TypeStr: (&filesystemscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeLoadMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{loadscraper.TypeStr: (&loadscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeMemoryMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{memoryscraper.TypeStr: (&memoryscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeNetworkMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{networkscraper.TypeStr: (&networkscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeProcessesMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{processesscraper.TypeStr: (&processesscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeSwapMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{swapscraper.TypeStr: (&swapscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeProcessMetrics(b *testing.B) { + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + b.Skip("skipping test on non linux/windows") + } + + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{processscraper.TypeStr: (&processscraper.Factory{}).CreateDefaultConfig()}, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeSystemMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{ + cpuscraper.TypeStr: (&cpuscraper.Factory{}).CreateDefaultConfig(), + diskscraper.TypeStr: (&diskscraper.Factory{}).CreateDefaultConfig(), + filesystemscraper.TypeStr: (&filesystemscraper.Factory{}).CreateDefaultConfig(), + loadscraper.TypeStr: (&loadscraper.Factory{}).CreateDefaultConfig(), + memoryscraper.TypeStr: (&memoryscraper.Factory{}).CreateDefaultConfig(), + networkscraper.TypeStr: (&networkscraper.Factory{}).CreateDefaultConfig(), + processesscraper.TypeStr: (&processesscraper.Factory{}).CreateDefaultConfig(), + swapscraper.TypeStr: (&swapscraper.Factory{}).CreateDefaultConfig(), + }, + } + + benchmarkScrapeMetrics(b, cfg) +} + +func Benchmark_ScrapeSystemAndProcessMetrics(b *testing.B) { + cfg := &Config{ + ScraperControllerSettings: scraperhelper.DefaultScraperControllerSettings(""), + Scrapers: map[string]internal.Config{ + cpuscraper.TypeStr: &cpuscraper.Config{}, + diskscraper.TypeStr: &diskscraper.Config{}, + filesystemscraper.TypeStr: &filesystemscraper.Config{}, + loadscraper.TypeStr: &loadscraper.Config{}, + memoryscraper.TypeStr: &memoryscraper.Config{}, + networkscraper.TypeStr: &networkscraper.Config{}, + processesscraper.TypeStr: &processesscraper.Config{}, + swapscraper.TypeStr: &swapscraper.Config{}, + }, + } + + if runtime.GOOS == "linux" || runtime.GOOS == "windows" { + cfg.Scrapers[processscraper.TypeStr] = &processscraper.Config{} + } + + benchmarkScrapeMetrics(b, cfg) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/metadata/generated_metrics.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/metadata/generated_metrics.go new file mode 100644 index 00000000000..0462614b1a8 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/metadata/generated_metrics.go @@ -0,0 +1,168 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// Type is the component type name. +const Type configmodels.Type = "hostmetricsreceiver" + +type metricIntf interface { + Name() string + New() pdata.Metric +} + +// Intentionally not exposing this so that it is opaque and can change freely. +type metricImpl struct { + name string + newFunc func() pdata.Metric +} + +func (m *metricImpl) Name() string { + return m.name +} + +func (m *metricImpl) New() pdata.Metric { + return m.newFunc() +} + +type metricStruct struct { + SystemCPUTime metricIntf + SystemMemoryUsage metricIntf +} + +// Names returns a list of all the metric name strings. +func (m *metricStruct) Names() []string { + return []string{ + "system.cpu.time", + "system.memory.usage", + } +} + +var metricsByName = map[string]metricIntf{ + "system.cpu.time": Metrics.SystemCPUTime, + "system.memory.usage": Metrics.SystemMemoryUsage, +} + +func (m *metricStruct) ByName(n string) metricIntf { + return metricsByName[n] +} + +func (m *metricStruct) FactoriesByName() map[string]func() pdata.Metric { + return map[string]func() pdata.Metric{ + Metrics.SystemCPUTime.Name(): Metrics.SystemCPUTime.New, + Metrics.SystemMemoryUsage.Name(): Metrics.SystemMemoryUsage.New, + } +} + +// Metrics contains a set of methods for each metric that help with +// manipulating those metrics. +var Metrics = &metricStruct{ + &metricImpl{ + "system.cpu.time", + func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.cpu.time") + metric.SetDescription("Total CPU seconds broken down by different states.") + metric.SetUnit("s") + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + metric.DoubleSum().SetIsMonotonic(true) + metric.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + + return metric + }, + }, + &metricImpl{ + "system.memory.usage", + func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.memory.usage") + metric.SetDescription("Bytes of memory in use.") + metric.SetUnit("By") + metric.SetDataType(pdata.MetricDataTypeIntSum) + metric.IntSum().SetIsMonotonic(false) + metric.IntSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + + return metric + }, + }, +} + +// M contains a set of methods for each metric that help with +// manipulating those metrics. M is an alias for Metrics +var M = Metrics + +// Labels contains the possible metric labels that can be used. +var Labels = struct { + // Cpu (CPU number starting at 0.) + Cpu string + // CPUState (Breakdown of CPU usage by type.) + CPUState string + // MemState (Breakdown of memory usage by type.) + MemState string +}{ + "cpu", + "state", + "state", +} + +// L contains the possible metric labels that can be used. L is an alias for +// Labels. +var L = Labels + +// LabelCPUState are the possible values that the label "cpu.state" can have. +var LabelCPUState = struct { + Idle string + Interrupt string + Nice string + Softirq string + Steal string + System string + User string + Wait string +}{ + "idle", + "interrupt", + "nice", + "softirq", + "steal", + "system", + "user", + "wait", +} + +// LabelMemState are the possible values that the label "mem.state" can have. +var LabelMemState = struct { + Buffered string + Cached string + Inactive string + Free string + SlabReclaimable string + SlabUnreclaimable string + Used string +}{ + "buffered", + "cached", + "inactive", + "free", + "slab_reclaimable", + "slab_unreclaimable", + "used", +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/doc.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/doc.go new file mode 100644 index 00000000000..6ce1037f033 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/doc.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package perfcounters is a thin wrapper around +// https://godoc.org/github.com/leoluk/perflib_exporter/perflib that +// provides functions to scrape raw performance counter data, without +// calculating rates or formatting them, from the registry. +package perfcounters diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper.go new file mode 100644 index 00000000000..bc5e17b9add --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper.go @@ -0,0 +1,202 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package perfcounters + +import ( + "fmt" + "strconv" + "strings" + + "github.com/leoluk/perflib_exporter/perflib" + + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +const totalInstanceName = "_Total" + +// PerfCounterScraper scrapes performance counter data. +type PerfCounterScraper interface { + // start initializes the PerfCounterScraper so that subsequent calls + // to scrape will return performance counter data for the specified set. + // of objects + Initialize(objects ...string) error + // scrape returns performance data for the initialized objects. + Scrape() (PerfDataCollection, error) +} + +// PerfLibScraper is an implementation of PerfCounterScraper that uses +// perflib to scrape performance counter data. +type PerfLibScraper struct { + objectIndices string +} + +func (p *PerfLibScraper) Initialize(objects ...string) error { + // "Counter 009" reads perf counter names in English. + // This is always present regardless of the OS language. + nameTable := perflib.QueryNameTable("Counter 009") + + // lookup object indices from name table + objectIndicesMap := map[uint32]struct{}{} + for _, name := range objects { + index := nameTable.LookupIndex(name) + if index == 0 { + return fmt.Errorf("Failed to retrieve perf counter object %q", name) + } + + objectIndicesMap[index] = struct{}{} + } + + // convert to a space-separated string + objectIndicesSlice := make([]string, 0, len(objectIndicesMap)) + for k := range objectIndicesMap { + objectIndicesSlice = append(objectIndicesSlice, strconv.Itoa(int(k))) + } + p.objectIndices = strings.Join(objectIndicesSlice, " ") + return nil +} + +func (p *PerfLibScraper) Scrape() (PerfDataCollection, error) { + objects, err := perflib.QueryPerformanceData(p.objectIndices) + if err != nil { + return nil, err + } + + indexed := make(map[string]*perflib.PerfObject) + for _, obj := range objects { + indexed[obj.Name] = obj + } + + return perfDataCollection{perfObject: indexed}, nil +} + +// PerfDataCollection represents a collection of perf counter data. +type PerfDataCollection interface { + // GetObject returns the perf counter data associated with the specified object, + // or returns an error if no data exists for this object name. + GetObject(objectName string) (PerfDataObject, error) +} + +type perfDataCollection struct { + perfObject map[string]*perflib.PerfObject +} + +func (p perfDataCollection) GetObject(objectName string) (PerfDataObject, error) { + obj, ok := p.perfObject[objectName] + if !ok { + return nil, fmt.Errorf("Unable to find object %q", objectName) + } + + return perfDataObject{obj}, nil +} + +// PerfDataCollection represents a collection of perf counter values +// and associated instances. +type PerfDataObject interface { + // Filter filters the perf counter data to only retain data related to + // relevant instances based on the supplied parameters. + Filter(includeFS, excludeFS filterset.FilterSet, includeTotal bool) + // GetValues returns the performance counter data associated with the specified + // counters, or returns an error if any of the specified counter names do not + // exist. + GetValues(counterNames ...string) ([]*CounterValues, error) +} + +type perfDataObject struct { + *perflib.PerfObject +} + +func (obj perfDataObject) Filter(includeFS, excludeFS filterset.FilterSet, includeTotal bool) { + if includeFS == nil && excludeFS == nil && includeTotal { + return + } + + filteredDevices := make([]*perflib.PerfInstance, 0, len(obj.Instances)) + for _, device := range obj.Instances { + if includeDevice(device.Name, includeFS, excludeFS, includeTotal) { + filteredDevices = append(filteredDevices, device) + } + } + obj.Instances = filteredDevices +} + +func includeDevice(deviceName string, includeFS, excludeFS filterset.FilterSet, includeTotal bool) bool { + if deviceName == totalInstanceName { + return includeTotal + } + + return (includeFS == nil || includeFS.Matches(deviceName)) && + (excludeFS == nil || !excludeFS.Matches(deviceName)) +} + +// CounterValues represents a set of perf counter values for a given instance. +type CounterValues struct { + InstanceName string + Values map[string]int64 +} + +type counterIndex struct { + index int + name string +} + +func (obj perfDataObject) GetValues(counterNames ...string) ([]*CounterValues, error) { + counterIndices := make([]counterIndex, 0, len(counterNames)) + for idx, counter := range obj.CounterDefs { + // "Base" values give the value of a related counter that pdh.dll uses to compute the derived + // value for this counter. We only care about raw values so ignore base values. See + // https://docs.microsoft.com/en-us/windows/win32/perfctrs/retrieving-counter-data. + if counter.IsBaseValue && !counter.IsNanosecondCounter { + continue + } + + for _, counterName := range counterNames { + if counter.Name == counterName { + counterIndices = append(counterIndices, counterIndex{index: idx, name: counter.Name}) + break + } + } + } + + if len(counterIndices) < len(counterNames) { + return nil, fmt.Errorf("Unable to find counters %q in object %q", missingCounterNames(counterNames, counterIndices), obj.Name) + } + + values := make([]*CounterValues, len(obj.Instances)) + for i, instance := range obj.Instances { + instanceValues := &CounterValues{InstanceName: instance.Name, Values: make(map[string]int64, len(counterIndices))} + for _, counter := range counterIndices { + instanceValues.Values[counter.name] = instance.Counters[counter.index].Value + } + values[i] = instanceValues + } + return values, nil +} + +func missingCounterNames(counterNames []string, counterIndices []counterIndex) []string { + matchedCounters := make(map[string]struct{}, len(counterIndices)) + for _, counter := range counterIndices { + matchedCounters[counter.name] = struct{}{} + } + + counters := make([]string, 0, len(counterNames)-len(matchedCounters)) + for _, counter := range counterNames { + if _, ok := matchedCounters[counter]; !ok { + counters = append(counters, counter) + } + } + return counters +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_mock.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_mock.go new file mode 100644 index 00000000000..861a5cae50a --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_mock.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package perfcounters + +import ( + "fmt" + + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +// MockPerfCounterScraperError is an implementation of PerfCounterScraper that returns +// the supplied errors when scrape, GetObject, or GetValues are called. +type MockPerfCounterScraperError struct { + scrapeErr error + getObjectErr error + getValuesErr error +} + +// NewMockPerfCounterScraperError returns a MockPerfCounterScraperError that will return +// the specified errors on subsequent function calls. +func NewMockPerfCounterScraperError(scrapeErr, getObjectErr, getValuesErr error) *MockPerfCounterScraperError { + return &MockPerfCounterScraperError{scrapeErr: scrapeErr, getObjectErr: getObjectErr, getValuesErr: getValuesErr} +} + +// start is a no-op +func (p *MockPerfCounterScraperError) Initialize(objects ...string) error { + return nil +} + +// scrape returns the specified scrapeErr or an object that will return a subsequent error +// if scrapeErr is nil +func (p *MockPerfCounterScraperError) Scrape() (PerfDataCollection, error) { + if p.scrapeErr != nil { + return nil, p.scrapeErr + } + + return mockPerfDataCollectionError{getObjectErr: p.getObjectErr, getValuesErr: p.getValuesErr}, nil +} + +type mockPerfDataCollectionError struct { + getObjectErr error + getValuesErr error +} + +// GetObject returns the specified getObjectErr or an object that will return a subsequent +// error if getObjectErr is nil +func (p mockPerfDataCollectionError) GetObject(objectName string) (PerfDataObject, error) { + if p.getObjectErr != nil { + return nil, p.getObjectErr + } + + return mockPerfDataObjectError{getValuesErr: p.getValuesErr}, nil +} + +type mockPerfDataObjectError struct { + getValuesErr error +} + +// Filter is a no-op +func (obj mockPerfDataObjectError) Filter(includeFS, excludeFS filterset.FilterSet, includeTotal bool) { +} + +// GetValues returns the specified getValuesErr +func (obj mockPerfDataObjectError) GetValues(counterNames ...string) ([]*CounterValues, error) { + return nil, obj.getValuesErr +} + +// MockPerfCounterScraper is an implementation of PerfCounterScraper that returns the supplied +// object / counter values on each successive call to scrape, in the specified order. +// +// Example Usage: +// +// s := NewMockPerfCounterScraper(map[string]map[string][]int64{ +// "Object1": map[string][]int64{ +// "Counter1": []int64{1, 2}, +// "Counter2": []int64{4}, +// }, +// }) +// +// s.scrape().GetObject("Object1").GetValues("Counter1", "Counter2") +// +// ... 1st call returns []*CounterValues{ { Values: { "Counter1": 1, "Counter2": 4 } } } +// ... 2nd call returns []*CounterValues{ { Values: { "Counter1": 2, "Counter2": 4 } } } +type MockPerfCounterScraper struct { + objectsAndValuesToReturn map[string]map[string][]int64 + timesCalled int +} + +// NewMockPerfCounterScraper returns a MockPerfCounterScraper that will return the supplied +// object / counter values on each successive call to scrape, in the specified order. +func NewMockPerfCounterScraper(objectsAndValuesToReturn map[string]map[string][]int64) *MockPerfCounterScraper { + return &MockPerfCounterScraper{objectsAndValuesToReturn: objectsAndValuesToReturn} +} + +// start is a no-op +func (p *MockPerfCounterScraper) Initialize(objects ...string) error { + return nil +} + +// scrape returns a perf data collection with the supplied object / counter values, +// according to the supplied order. +func (p *MockPerfCounterScraper) Scrape() (PerfDataCollection, error) { + objectsAndValuesToReturn := make(map[string]map[string]int64, len(p.objectsAndValuesToReturn)) + for objectName, countersToReturn := range p.objectsAndValuesToReturn { + valuesToReturn := make(map[string]int64, len(countersToReturn)) + for counterName, orderedValuesToReturn := range countersToReturn { + returnIndex := p.timesCalled + if returnIndex >= len(orderedValuesToReturn) { + returnIndex = len(orderedValuesToReturn) - 1 + } + valuesToReturn[counterName] = orderedValuesToReturn[returnIndex] + } + objectsAndValuesToReturn[objectName] = valuesToReturn + } + + p.timesCalled++ + return mockPerfDataCollection{objectsAndValuesToReturn: objectsAndValuesToReturn}, nil +} + +type mockPerfDataCollection struct { + objectsAndValuesToReturn map[string]map[string]int64 +} + +// GetObject returns the specified object / counter values +func (p mockPerfDataCollection) GetObject(objectName string) (PerfDataObject, error) { + valuesToReturn, ok := p.objectsAndValuesToReturn[objectName] + if !ok { + return nil, fmt.Errorf("Unable to find object %q", objectName) + } + + return mockPerfDataObject{valuesToReturn: valuesToReturn}, nil +} + +type mockPerfDataObject struct { + valuesToReturn map[string]int64 +} + +// Filter is a no-op +func (obj mockPerfDataObject) Filter(includeFS, excludeFS filterset.FilterSet, includeTotal bool) { +} + +// GetValues returns the specified counter values +func (obj mockPerfDataObject) GetValues(counterNames ...string) ([]*CounterValues, error) { + value := &CounterValues{Values: map[string]int64{}} + for _, counterName := range counterNames { + valueToReturn, ok := obj.valuesToReturn[counterName] + if !ok { + return nil, fmt.Errorf("Mock Perf Counter Scraper configured incorrectly. Return value for counter %q not specified", counterName) + } + value.Values[counterName] = valueToReturn + } + return []*CounterValues{value}, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_test.go new file mode 100644 index 00000000000..c699f0e99cc --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_test.go @@ -0,0 +1,171 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package perfcounters + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/internal/processor/filterset" +) + +func Test_PerfCounterScraper(t *testing.T) { + type testCase struct { + name string + // NewPerfCounter + objects []string + newErr string + expectIndices []string + // Filter + includeFS filterset.FilterSet + excludeFS filterset.FilterSet + includeTotal bool + // GetObject + getObject string + getObjectErr string + // GetCounterValues + getCounters []string + getCountersErr string + expectedInstanceNames []string + excludedInstanceNames []string + expectedMinimumInstances int + } + + excludedCommonDrives := []string{"C:"} + excludeCommonDriveFilterSet, err := filterset.CreateFilterSet(excludedCommonDrives, &filterset.Config{MatchType: filterset.Strict}) + require.NoError(t, err) + + testCases := []testCase{ + { + name: "Standard", + objects: []string{"Memory"}, + expectIndices: []string{"4"}, + getObject: "Memory", + getCounters: []string{"Committed Bytes"}, + expectedInstanceNames: []string{""}, + }, + { + name: "Multiple Objects & Values", + objects: []string{"Memory", "LogicalDisk"}, + expectIndices: []string{"4", "236"}, + getObject: "LogicalDisk", + getCounters: []string{"Disk Reads/sec", "Disk Writes/sec"}, + expectedMinimumInstances: 1, + }, + { + name: "Filtered", + objects: []string{"LogicalDisk"}, + expectIndices: []string{"236"}, + excludeFS: excludeCommonDriveFilterSet, + includeTotal: true, + getObject: "LogicalDisk", + getCounters: []string{"Disk Reads/sec"}, + excludedInstanceNames: excludedCommonDrives, + }, + { + name: "New Error", + objects: []string{"Memory", "Invalid Object 1", "Invalid Object 2"}, + newErr: `Failed to retrieve perf counter object "Invalid Object 1"`, + }, + { + name: "Get Object Error", + objects: []string{"Memory"}, + expectIndices: []string{"4"}, + getObject: "Invalid Object 1", + getObjectErr: `Unable to find object "Invalid Object 1"`, + }, + { + name: "Get Values Error", + objects: []string{"Memory"}, + expectIndices: []string{"4"}, + getObject: "Memory", + getCounters: []string{"Committed Bytes", "Invalid Counter 1", "Invalid Counter 2"}, + getCountersErr: `Unable to find counters ["Invalid Counter 1" "Invalid Counter 2"] in object "Memory"`, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + s := &PerfLibScraper{} + err := s.Initialize(test.objects...) + if test.newErr != "" { + assert.EqualError(t, err, test.newErr) + return + } + require.NoError(t, err, "Failed to create new perf counter scraper: %v", err) + + assert.ElementsMatch(t, test.expectIndices, strings.Split(s.objectIndices, " ")) + + c, err := s.Scrape() + require.NoError(t, err, "Failed to scrape data: %v", err) + + p, err := c.GetObject(test.getObject) + if test.getObjectErr != "" { + assert.EqualError(t, err, test.getObjectErr) + return + } + require.NoError(t, err, "Failed to get object: %v", err) + + p.Filter(test.includeFS, test.excludeFS, test.includeTotal) + + counterValues, err := p.GetValues(test.getCounters...) + if test.getCountersErr != "" { + assert.EqualError(t, err, test.getCountersErr) + return + } + require.NoError(t, err, "Failed to get counter: %v", err) + + assert.GreaterOrEqual(t, len(counterValues), test.expectedMinimumInstances) + + if len(test.expectedInstanceNames) > 0 { + for _, expectedName := range test.expectedInstanceNames { + var gotName bool + for _, cv := range counterValues { + if cv.InstanceName == expectedName { + gotName = true + break + } + } + assert.Truef(t, gotName, "Expected Instance %q was not returned", expectedName) + } + } + + if len(test.excludedInstanceNames) > 0 { + for _, excludedName := range test.excludedInstanceNames { + for _, cv := range counterValues { + if cv.InstanceName == excludedName { + assert.Fail(t, "", "Excluded Instance %q was returned", excludedName) + break + } + } + } + } + + var includesTotal bool + for _, cv := range counterValues { + if cv.InstanceName == "_Total" { + includesTotal = true + break + } + } + assert.Equalf(t, test.includeTotal, includesTotal, "_Total was returned: %v (expected the opposite)", test.includeTotal, includesTotal) + }) + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper.go new file mode 100644 index 00000000000..45de375706a --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// BaseFactory for creating Scrapers. +type BaseFactory interface { + // CreateDefaultConfig creates the default configuration for the Scraper. + CreateDefaultConfig() Config +} + +// ScraperFactory can create a MetricScraper. +type ScraperFactory interface { + BaseFactory + + // CreateMetricsScraper creates a scraper based on this config. + // If the config is not valid, error will be returned instead. + CreateMetricsScraper(ctx context.Context, logger *zap.Logger, cfg Config) (scraperhelper.MetricsScraper, error) +} + +// ResourceScraperFactory can create a ResourceScraper. +type ResourceScraperFactory interface { + BaseFactory + + // CreateResourceMetricsScraper creates a resource scraper based on this + // config. If the config is not valid, error will be returned instead. + CreateResourceMetricsScraper(ctx context.Context, logger *zap.Logger, cfg Config) (scraperhelper.ResourceMetricsScraper, error) +} + +// Config is the configuration of a scraper. +type Config interface { +} + +// ConfigSettings provides common settings for scraper configuration. +type ConfigSettings struct { +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/config.go new file mode 100644 index 00000000000..0cf488103d9 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/config.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuscraper + +import "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + +// Config relating to CPU Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go new file mode 100644 index 00000000000..1884b65a648 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuscraper + +import ( + "context" + "time" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/host" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +const metricsLen = 1 + +// scraper for CPU Metrics +type scraper struct { + config *Config + startTime pdata.TimestampUnixNano + + // for mocking + bootTime func() (uint64, error) + times func(bool) ([]cpu.TimesStat, error) +} + +// newCPUScraper creates a set of CPU related metrics +func newCPUScraper(_ context.Context, cfg *Config) *scraper { + return &scraper{config: cfg, bootTime: host.BootTime, times: cpu.Times} +} + +func (s *scraper) start(context.Context, component.Host) error { + bootTime, err := s.bootTime() + if err != nil { + return err + } + + s.startTime = pdata.TimestampUnixNano(bootTime * 1e9) + return nil +} + +func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + now := internal.TimeToUnixNano(time.Now()) + cpuTimes, err := s.times( /*percpu=*/ true) + if err != nil { + return metrics, consumererror.NewPartialScrapeError(err, metricsLen) + } + + metrics.Resize(metricsLen) + initializeCPUTimeMetric(metrics.At(0), s.startTime, now, cpuTimes) + return metrics, nil +} + +func initializeCPUTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, cpuTimes []cpu.TimesStat) { + metadata.Metrics.SystemCPUTime.New().CopyTo(metric) + + ddps := metric.DoubleSum().DataPoints() + ddps.Resize(len(cpuTimes) * cpuStatesLen) + for i, cpuTime := range cpuTimes { + appendCPUTimeStateDataPoints(ddps, i*cpuStatesLen, startTime, now, cpuTime) + } +} + +const gopsCPUTotal string = "cpu-total" + +func initializeCPUTimeDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, cpuLabel string, stateLabel string, value float64) { + labelsMap := dataPoint.LabelsMap() + // ignore cpu label if reporting "total" cpu usage + if cpuLabel != gopsCPUTotal { + labelsMap.Insert(metadata.Labels.Cpu, cpuLabel) + } + labelsMap.Insert(metadata.Labels.CPUState, stateLabel) + + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go new file mode 100644 index 00000000000..627c66c315f --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package cpuscraper + +import ( + "github.com/shirou/gopsutil/cpu" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +const cpuStatesLen = 8 + +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startIdx int, startTime, now pdata.TimestampUnixNano, cpuTime cpu.TimesStat) { + initializeCPUTimeDataPoint(ddps.At(startIdx+0), startTime, now, cpuTime.CPU, metadata.LabelCPUState.User, cpuTime.User) + initializeCPUTimeDataPoint(ddps.At(startIdx+1), startTime, now, cpuTime.CPU, metadata.LabelCPUState.System, cpuTime.System) + initializeCPUTimeDataPoint(ddps.At(startIdx+2), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Idle, cpuTime.Idle) + initializeCPUTimeDataPoint(ddps.At(startIdx+3), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Interrupt, cpuTime.Irq) + initializeCPUTimeDataPoint(ddps.At(startIdx+4), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Nice, cpuTime.Nice) + initializeCPUTimeDataPoint(ddps.At(startIdx+5), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Softirq, cpuTime.Softirq) + initializeCPUTimeDataPoint(ddps.At(startIdx+6), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Steal, cpuTime.Steal) + initializeCPUTimeDataPoint(ddps.At(startIdx+7), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Wait, cpuTime.Iowait) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go new file mode 100644 index 00000000000..5257d5b5e27 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux + +package cpuscraper + +import ( + "github.com/shirou/gopsutil/cpu" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +const cpuStatesLen = 4 + +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startIdx int, startTime, now pdata.TimestampUnixNano, cpuTime cpu.TimesStat) { + initializeCPUTimeDataPoint(ddps.At(startIdx+0), startTime, now, cpuTime.CPU, metadata.LabelCPUState.User, cpuTime.User) + initializeCPUTimeDataPoint(ddps.At(startIdx+1), startTime, now, cpuTime.CPU, metadata.LabelCPUState.System, cpuTime.System) + initializeCPUTimeDataPoint(ddps.At(startIdx+2), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Idle, cpuTime.Idle) + initializeCPUTimeDataPoint(ddps.At(startIdx+3), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Interrupt, cpuTime.Irq) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go new file mode 100644 index 00000000000..cde0d8ece68 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go @@ -0,0 +1,127 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuscraper + +import ( + "context" + "errors" + "runtime" + "testing" + + "github.com/shirou/gopsutil/cpu" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +func TestScrape(t *testing.T) { + type testCase struct { + name string + bootTimeFunc func() (uint64, error) + timesFunc func(bool) ([]cpu.TimesStat, error) + expectedStartTime pdata.TimestampUnixNano + initializationErr string + expectedErr string + } + + testCases := []testCase{ + { + name: "Standard", + }, + { + name: "Validate Start Time", + bootTimeFunc: func() (uint64, error) { return 100, nil }, + expectedStartTime: 100 * 1e9, + }, + { + name: "Boot Time Error", + bootTimeFunc: func() (uint64, error) { return 0, errors.New("err1") }, + initializationErr: "err1", + }, + { + name: "Times Error", + timesFunc: func(bool) ([]cpu.TimesStat, error) { return nil, errors.New("err2") }, + expectedErr: "err2", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper := newCPUScraper(context.Background(), &Config{}) + if test.bootTimeFunc != nil { + scraper.bootTime = test.bootTimeFunc + } + if test.timesFunc != nil { + scraper.times = test.timesFunc + } + + err := scraper.start(context.Background(), componenttest.NewNopHost()) + if test.initializationErr != "" { + assert.EqualError(t, err, test.initializationErr) + return + } + require.NoError(t, err, "Failed to initialize cpu scraper: %v", err) + + metrics, err := scraper.scrape(context.Background()) + if test.expectedErr != "" { + assert.EqualError(t, err, test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, 1, err.(consumererror.PartialScrapeError).Failed) + } + + return + } + require.NoError(t, err, "Failed to scrape metrics: %v", err) + + assert.Equal(t, 1, metrics.Len()) + + assertCPUMetricValid(t, metrics.At(0), metadata.Metrics.SystemCPUTime.New(), test.expectedStartTime) + + if runtime.GOOS == "linux" { + assertCPUMetricHasLinuxSpecificStateLabels(t, metrics.At(0)) + } + + internal.AssertSameTimeStampForAllMetrics(t, metrics) + }) + } +} + +func assertCPUMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, descriptor, metric) + if startTime != 0 { + internal.AssertDoubleSumMetricStartTimeEquals(t, metric, startTime) + } + assert.GreaterOrEqual(t, metric.DoubleSum().DataPoints().Len(), 4*runtime.NumCPU()) + internal.AssertDoubleSumMetricLabelExists(t, metric, 0, metadata.Labels.Cpu) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 0, metadata.Labels.CPUState, metadata.LabelCPUState.User) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 1, metadata.Labels.CPUState, metadata.LabelCPUState.System) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 2, metadata.Labels.CPUState, metadata.LabelCPUState.Idle) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 3, metadata.Labels.CPUState, metadata.LabelCPUState.Interrupt) +} + +func assertCPUMetricHasLinuxSpecificStateLabels(t *testing.T, metric pdata.Metric) { + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 4, metadata.Labels.CPUState, metadata.LabelCPUState.Nice) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 5, metadata.Labels.CPUState, metadata.LabelCPUState.Softirq) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 6, metadata.Labels.CPUState, metadata.LabelCPUState.Steal) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 7, metadata.Labels.CPUState, metadata.LabelCPUState.Wait) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go new file mode 100644 index 00000000000..8411395ddb0 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuscraper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for CPU scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "cpu" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateMetricsScraper creates a scraper based on provided config. +func (f *Factory) CreateMetricsScraper( + ctx context.Context, + _ *zap.Logger, + config internal.Config, +) (scraperhelper.MetricsScraper, error) { + cfg := config.(*Config) + s := newCPUScraper(ctx, cfg) + + ms := scraperhelper.NewMetricsScraper( + TypeStr, + s.scrape, + scraperhelper.WithStart(s.start), + ) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory_test.go new file mode 100644 index 00000000000..07dd6afd07b --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory_test.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuscraper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.NoError(t, err) + assert.NotNil(t, scraper) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go new file mode 100644 index 00000000000..e36646f3e8d --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package diskscraper + +import ( + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +// Config relating to Disk Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // Include specifies a filter on the devices that should be included from the generated metrics. + // Exclude specifies a filter on the devices that should be excluded from the generated metrics. + // If neither `include` or `exclude` are set, metrics will be generated for all devices. + Include MatchConfig `mapstructure:"include"` + Exclude MatchConfig `mapstructure:"exclude"` +} + +type MatchConfig struct { + filterset.Config `mapstructure:",squash"` + + Devices []string `mapstructure:"devices"` +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_metadata.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_metadata.go new file mode 100644 index 00000000000..8ffd1129135 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_metadata.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package diskscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// labels + +const ( + deviceLabelName = "device" + directionLabelName = "direction" +) + +// direction label values + +const ( + readDirectionLabelValue = "read" + writeDirectionLabelValue = "write" +) + +// descriptors + +var diskIODescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.disk.io") + metric.SetDescription("Disk bytes transferred.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var diskOpsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.disk.ops") + metric.SetDescription("Disk operations count.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var diskIOTimeDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.disk.io_time") + metric.SetDescription("Time disk spent activated. On Windows, this is calculated as the inverse of disk idle time.") + metric.SetUnit("s") + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + sum := metric.DoubleSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var diskOperationTimeDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.disk.operation_time") + metric.SetDescription("Time spent in disk operations.") + metric.SetUnit("s") + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + sum := metric.DoubleSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var diskPendingOperationsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.disk.pending_operations") + metric.SetDescription("The queue size of pending I/O operations.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var diskMergedDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.disk.merged") + metric.SetDescription("The number of disk reads merged into single physical disk access operations.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go new file mode 100644 index 00000000000..7d2a696507a --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go @@ -0,0 +1,222 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package diskscraper + +import ( + "context" + "fmt" + "time" + + "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +const ( + standardMetricsLen = 5 + metricsLen = standardMetricsLen + systemSpecificMetricsLen +) + +// scraper for Disk Metrics +type scraper struct { + config *Config + startTime pdata.TimestampUnixNano + includeFS filterset.FilterSet + excludeFS filterset.FilterSet + + // for mocking + bootTime func() (uint64, error) + ioCounters func(names ...string) (map[string]disk.IOCountersStat, error) +} + +// newDiskScraper creates a Disk Scraper +func newDiskScraper(_ context.Context, cfg *Config) (*scraper, error) { + scraper := &scraper{config: cfg, bootTime: host.BootTime, ioCounters: disk.IOCounters} + + var err error + + if len(cfg.Include.Devices) > 0 { + scraper.includeFS, err = filterset.CreateFilterSet(cfg.Include.Devices, &cfg.Include.Config) + if err != nil { + return nil, fmt.Errorf("error creating device include filters: %w", err) + } + } + + if len(cfg.Exclude.Devices) > 0 { + scraper.excludeFS, err = filterset.CreateFilterSet(cfg.Exclude.Devices, &cfg.Exclude.Config) + if err != nil { + return nil, fmt.Errorf("error creating device exclude filters: %w", err) + } + } + + return scraper, nil +} + +func (s *scraper) start(context.Context, component.Host) error { + bootTime, err := s.bootTime() + if err != nil { + return err + } + + s.startTime = pdata.TimestampUnixNano(bootTime * 1e9) + return nil +} + +func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + now := internal.TimeToUnixNano(time.Now()) + ioCounters, err := s.ioCounters() + if err != nil { + return metrics, consumererror.NewPartialScrapeError(err, metricsLen) + } + + // filter devices by name + ioCounters = s.filterByDevice(ioCounters) + + if len(ioCounters) > 0 { + metrics.Resize(metricsLen) + initializeDiskIOMetric(metrics.At(0), s.startTime, now, ioCounters) + initializeDiskOpsMetric(metrics.At(1), s.startTime, now, ioCounters) + initializeDiskIOTimeMetric(metrics.At(2), s.startTime, now, ioCounters) + initializeDiskOperationTimeMetric(metrics.At(3), s.startTime, now, ioCounters) + initializeDiskPendingOperationsMetric(metrics.At(4), now, ioCounters) + appendSystemSpecificMetrics(metrics, 5, s.startTime, now, ioCounters) + } + + return metrics, nil +} + +func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskIODescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(ioCounters)) + + idx := 0 + for device, ioCounter := range ioCounters { + initializeInt64DataPoint(idps.At(idx+0), startTime, now, device, readDirectionLabelValue, int64(ioCounter.ReadBytes)) + initializeInt64DataPoint(idps.At(idx+1), startTime, now, device, writeDirectionLabelValue, int64(ioCounter.WriteBytes)) + idx += 2 + } +} + +func initializeDiskOpsMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskOpsDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(ioCounters)) + + idx := 0 + for device, ioCounter := range ioCounters { + initializeInt64DataPoint(idps.At(idx+0), startTime, now, device, readDirectionLabelValue, int64(ioCounter.ReadCount)) + initializeInt64DataPoint(idps.At(idx+1), startTime, now, device, writeDirectionLabelValue, int64(ioCounter.WriteCount)) + idx += 2 + } +} + +func initializeDiskIOTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskIOTimeDescriptor.CopyTo(metric) + + ddps := metric.DoubleSum().DataPoints() + ddps.Resize(len(ioCounters)) + + idx := 0 + for device, ioCounter := range ioCounters { + initializeDoubleDataPoint(ddps.At(idx+0), startTime, now, device, "", float64(ioCounter.IoTime)/1e3) + idx++ + } +} + +func initializeDiskOperationTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskOperationTimeDescriptor.CopyTo(metric) + + ddps := metric.DoubleSum().DataPoints() + ddps.Resize(2 * len(ioCounters)) + + idx := 0 + for device, ioCounter := range ioCounters { + initializeDoubleDataPoint(ddps.At(idx+0), startTime, now, device, readDirectionLabelValue, float64(ioCounter.ReadTime)/1e3) + initializeDoubleDataPoint(ddps.At(idx+1), startTime, now, device, writeDirectionLabelValue, float64(ioCounter.WriteTime)/1e3) + idx += 2 + } +} + +func initializeDiskPendingOperationsMetric(metric pdata.Metric, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskPendingOperationsDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(len(ioCounters)) + + idx := 0 + for device, ioCounter := range ioCounters { + initializeDiskPendingDataPoint(idps.At(idx), now, device, int64(ioCounter.IopsInProgress)) + idx++ + } +} + +func initializeInt64DataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(deviceLabelName, deviceLabel) + if directionLabel != "" { + labelsMap.Insert(directionLabelName, directionLabel) + } + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func initializeDoubleDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value float64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(deviceLabelName, deviceLabel) + if directionLabel != "" { + labelsMap.Insert(directionLabelName, directionLabel) + } + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func initializeDiskPendingDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, deviceLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(deviceLabelName, deviceLabel) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func (s *scraper) filterByDevice(ioCounters map[string]disk.IOCountersStat) map[string]disk.IOCountersStat { + if s.includeFS == nil && s.excludeFS == nil { + return ioCounters + } + + for device := range ioCounters { + if !s.includeDevice(device) { + delete(ioCounters, device) + } + } + return ioCounters +} + +func (s *scraper) includeDevice(deviceName string) bool { + return (s.includeFS == nil || s.includeFS.Matches(deviceName)) && + (s.excludeFS == nil || !s.excludeFS.Matches(deviceName)) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go new file mode 100644 index 00000000000..5c169a874b4 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!windows + +package diskscraper + +import ( + "github.com/shirou/gopsutil/disk" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +const systemSpecificMetricsLen = 0 + +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go new file mode 100644 index 00000000000..541a28bfdd9 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package diskscraper + +import ( + "github.com/shirou/gopsutil/disk" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +const systemSpecificMetricsLen = 1 + +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + metric := metrics.At(startIdx) + diskMergedDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(ioCounters)) + + idx := 0 + for device, ioCounter := range ioCounters { + initializeInt64DataPoint(idps.At(idx+0), startTime, now, device, readDirectionLabelValue, int64(ioCounter.MergedReadCount)) + initializeInt64DataPoint(idps.At(idx+1), startTime, now, device, writeDirectionLabelValue, int64(ioCounter.MergedWriteCount)) + idx += 2 + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_test.go new file mode 100644 index 00000000000..516e233e634 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_test.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package diskscraper + +import ( + "context" + "errors" + "testing" + + "github.com/shirou/gopsutil/disk" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +func TestScrape_Others(t *testing.T) { + type testCase struct { + name string + ioCountersFunc func(names ...string) (map[string]disk.IOCountersStat, error) + expectedErr string + } + + testCases := []testCase{ + { + name: "Error", + ioCountersFunc: func(names ...string) (map[string]disk.IOCountersStat, error) { return nil, errors.New("err1") }, + expectedErr: "err1", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper, err := newDiskScraper(context.Background(), &Config{}) + require.NoError(t, err, "Failed to create disk scraper: %v", err) + + if test.ioCountersFunc != nil { + scraper.ioCounters = test.ioCountersFunc + } + + err = scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize disk scraper: %v", err) + + _, err = scraper.scrape(context.Background()) + assert.EqualError(t, err, test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, metricsLen, err.(consumererror.PartialScrapeError).Failed) + } + }) + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go new file mode 100644 index 00000000000..921120dd7b0 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package diskscraper + +import ( + "context" + "errors" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +func TestScrape(t *testing.T) { + type testCase struct { + name string + config Config + bootTimeFunc func() (uint64, error) + newErrRegex string + initializationErr string + expectMetrics bool + expectedStartTime pdata.TimestampUnixNano + } + + testCases := []testCase{ + { + name: "Standard", + expectMetrics: true, + }, + { + name: "Validate Start Time", + bootTimeFunc: func() (uint64, error) { return 100, nil }, + expectMetrics: true, + expectedStartTime: 100 * 1e9, + }, + { + name: "Boot Time Error", + bootTimeFunc: func() (uint64, error) { return 0, errors.New("err1") }, + initializationErr: "err1", + }, + { + name: "Include Filter that matches nothing", + config: Config{Include: MatchConfig{filterset.Config{MatchType: "strict"}, []string{"@*^#&*$^#)"}}}, + expectMetrics: false, + }, + { + name: "Invalid Include Filter", + config: Config{Include: MatchConfig{Devices: []string{"test"}}}, + newErrRegex: "^error creating device include filters:", + }, + { + name: "Invalid Exclude Filter", + config: Config{Exclude: MatchConfig{Devices: []string{"test"}}}, + newErrRegex: "^error creating device exclude filters:", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper, err := newDiskScraper(context.Background(), &test.config) + if test.newErrRegex != "" { + require.Error(t, err) + require.Regexp(t, test.newErrRegex, err) + return + } + require.NoError(t, err, "Failed to create disk scraper: %v", err) + + if test.bootTimeFunc != nil { + scraper.bootTime = test.bootTimeFunc + } + + err = scraper.start(context.Background(), componenttest.NewNopHost()) + if test.initializationErr != "" { + assert.EqualError(t, err, test.initializationErr) + return + } + require.NoError(t, err, "Failed to initialize disk scraper: %v", err) + + metrics, err := scraper.scrape(context.Background()) + require.NoError(t, err, "Failed to scrape metrics: %v", err) + + if !test.expectMetrics { + assert.Equal(t, 0, metrics.Len()) + return + } + + assert.GreaterOrEqual(t, metrics.Len(), 4) + + assertInt64DiskMetricValid(t, metrics.At(0), diskIODescriptor, true, test.expectedStartTime) + assertInt64DiskMetricValid(t, metrics.At(1), diskOpsDescriptor, true, test.expectedStartTime) + assertDoubleDiskMetricValid(t, metrics.At(2), diskIOTimeDescriptor, false, test.expectedStartTime) + assertDoubleDiskMetricValid(t, metrics.At(3), diskOperationTimeDescriptor, true, test.expectedStartTime) + assertDiskPendingOperationsMetricValid(t, metrics.At(4)) + + if runtime.GOOS == "linux" { + assertInt64DiskMetricValid(t, metrics.At(5), diskMergedDescriptor, true, test.expectedStartTime) + } + + internal.AssertSameTimeStampForAllMetrics(t, metrics) + }) + } +} + +func assertInt64DiskMetricValid(t *testing.T, metric pdata.Metric, expectedDescriptor pdata.Metric, expectDirectionLabels bool, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, expectedDescriptor, metric) + if startTime != 0 { + internal.AssertIntSumMetricStartTimeEquals(t, metric, startTime) + } + + minExpectedPoints := 1 + if expectDirectionLabels { + minExpectedPoints = 2 + } + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), minExpectedPoints) + + internal.AssertIntSumMetricLabelExists(t, metric, 0, deviceLabelName) + if expectDirectionLabels { + internal.AssertIntSumMetricLabelHasValue(t, metric, 0, directionLabelName, readDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 1, directionLabelName, writeDirectionLabelValue) + } +} + +func assertDoubleDiskMetricValid(t *testing.T, metric pdata.Metric, expectedDescriptor pdata.Metric, expectDirectionLabels bool, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, expectedDescriptor, metric) + if startTime != 0 { + internal.AssertDoubleSumMetricStartTimeEquals(t, metric, startTime) + } + + minExpectedPoints := 1 + if expectDirectionLabels { + minExpectedPoints = 2 + } + assert.GreaterOrEqual(t, metric.DoubleSum().DataPoints().Len(), minExpectedPoints) + + internal.AssertDoubleSumMetricLabelExists(t, metric, 0, deviceLabelName) + if expectDirectionLabels { + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 0, directionLabelName, readDirectionLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, metric.DoubleSum().DataPoints().Len()-1, directionLabelName, writeDirectionLabelValue) + } +} + +func assertDiskPendingOperationsMetricValid(t *testing.T, metric pdata.Metric) { + internal.AssertDescriptorEqual(t, diskPendingOperationsDescriptor, metric) + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), 1) + internal.AssertIntSumMetricLabelExists(t, metric, 0, deviceLabelName) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go new file mode 100644 index 00000000000..0a6a56afd3c --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go @@ -0,0 +1,214 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package diskscraper + +import ( + "context" + "fmt" + "time" + + "github.com/shirou/gopsutil/host" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" +) + +const ( + metricsLen = 5 + + logicalDisk = "LogicalDisk" + + readsPerSec = "Disk Reads/sec" + writesPerSec = "Disk Writes/sec" + + readBytesPerSec = "Disk Read Bytes/sec" + writeBytesPerSec = "Disk Write Bytes/sec" + + idleTime = "% Idle Time" + + avgDiskSecsPerRead = "Avg. Disk sec/Read" + avgDiskSecsPerWrite = "Avg. Disk sec/Write" + + queueLength = "Current Disk Queue Length" +) + +// scraper for Disk Metrics +type scraper struct { + config *Config + startTime pdata.TimestampUnixNano + includeFS filterset.FilterSet + excludeFS filterset.FilterSet + + perfCounterScraper perfcounters.PerfCounterScraper + + // for mocking + bootTime func() (uint64, error) +} + +// newDiskScraper creates a Disk Scraper +func newDiskScraper(_ context.Context, cfg *Config) (*scraper, error) { + scraper := &scraper{config: cfg, perfCounterScraper: &perfcounters.PerfLibScraper{}, bootTime: host.BootTime} + + var err error + + if len(cfg.Include.Devices) > 0 { + scraper.includeFS, err = filterset.CreateFilterSet(cfg.Include.Devices, &cfg.Include.Config) + if err != nil { + return nil, fmt.Errorf("error creating device include filters: %w", err) + } + } + + if len(cfg.Exclude.Devices) > 0 { + scraper.excludeFS, err = filterset.CreateFilterSet(cfg.Exclude.Devices, &cfg.Exclude.Config) + if err != nil { + return nil, fmt.Errorf("error creating device exclude filters: %w", err) + } + } + + return scraper, nil +} + +func (s *scraper) start(context.Context, component.Host) error { + bootTime, err := s.bootTime() + if err != nil { + return err + } + + s.startTime = pdata.TimestampUnixNano(bootTime * 1e9) + + return s.perfCounterScraper.Initialize(logicalDisk) +} + +func (s *scraper) scrape(ctx context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + now := internal.TimeToUnixNano(time.Now()) + + counters, err := s.perfCounterScraper.Scrape() + if err != nil { + return metrics, consumererror.NewPartialScrapeError(err, metricsLen) + } + + logicalDiskObject, err := counters.GetObject(logicalDisk) + if err != nil { + return metrics, consumererror.NewPartialScrapeError(err, metricsLen) + } + + // filter devices by name + logicalDiskObject.Filter(s.includeFS, s.excludeFS, false) + + logicalDiskCounterValues, err := logicalDiskObject.GetValues(readsPerSec, writesPerSec, readBytesPerSec, writeBytesPerSec, idleTime, avgDiskSecsPerRead, avgDiskSecsPerWrite, queueLength) + if err != nil { + return metrics, consumererror.NewPartialScrapeError(err, metricsLen) + } + + if len(logicalDiskCounterValues) > 0 { + metrics.Resize(metricsLen) + initializeDiskIOMetric(metrics.At(0), s.startTime, now, logicalDiskCounterValues) + initializeDiskOpsMetric(metrics.At(1), s.startTime, now, logicalDiskCounterValues) + initializeDiskIOTimeMetric(metrics.At(2), s.startTime, now, logicalDiskCounterValues) + initializeDiskOperationTimeMetric(metrics.At(3), s.startTime, now, logicalDiskCounterValues) + initializeDiskPendingOperationsMetric(metrics.At(4), now, logicalDiskCounterValues) + } + + return metrics, nil +} + +func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, logicalDiskCounterValues []*perfcounters.CounterValues) { + diskIODescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(logicalDiskCounterValues)) + for idx, logicalDiskCounter := range logicalDiskCounterValues { + initializeInt64DataPoint(idps.At(2*idx+0), startTime, now, logicalDiskCounter.InstanceName, readDirectionLabelValue, logicalDiskCounter.Values[readBytesPerSec]) + initializeInt64DataPoint(idps.At(2*idx+1), startTime, now, logicalDiskCounter.InstanceName, writeDirectionLabelValue, logicalDiskCounter.Values[writeBytesPerSec]) + } +} + +func initializeDiskOpsMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, logicalDiskCounterValues []*perfcounters.CounterValues) { + diskOpsDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(logicalDiskCounterValues)) + for idx, logicalDiskCounter := range logicalDiskCounterValues { + initializeInt64DataPoint(idps.At(2*idx+0), startTime, now, logicalDiskCounter.InstanceName, readDirectionLabelValue, logicalDiskCounter.Values[readsPerSec]) + initializeInt64DataPoint(idps.At(2*idx+1), startTime, now, logicalDiskCounter.InstanceName, writeDirectionLabelValue, logicalDiskCounter.Values[writesPerSec]) + } +} + +func initializeDiskIOTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, logicalDiskCounterValues []*perfcounters.CounterValues) { + diskIOTimeDescriptor.CopyTo(metric) + + ddps := metric.DoubleSum().DataPoints() + ddps.Resize(len(logicalDiskCounterValues)) + for idx, logicalDiskCounter := range logicalDiskCounterValues { + // disk active time = system boot time - disk idle time + initializeDoubleDataPoint(ddps.At(idx), startTime, now, logicalDiskCounter.InstanceName, "", float64(now-startTime)/1e9-float64(logicalDiskCounter.Values[idleTime])/1e7) + } +} + +func initializeDiskOperationTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, logicalDiskCounterValues []*perfcounters.CounterValues) { + diskOperationTimeDescriptor.CopyTo(metric) + + ddps := metric.DoubleSum().DataPoints() + ddps.Resize(2 * len(logicalDiskCounterValues)) + for idx, logicalDiskCounter := range logicalDiskCounterValues { + initializeDoubleDataPoint(ddps.At(2*idx+0), startTime, now, logicalDiskCounter.InstanceName, readDirectionLabelValue, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7) + initializeDoubleDataPoint(ddps.At(2*idx+1), startTime, now, logicalDiskCounter.InstanceName, writeDirectionLabelValue, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7) + } +} + +func initializeDiskPendingOperationsMetric(metric pdata.Metric, now pdata.TimestampUnixNano, logicalDiskCounterValues []*perfcounters.CounterValues) { + diskPendingOperationsDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(len(logicalDiskCounterValues)) + for idx, logicalDiskCounter := range logicalDiskCounterValues { + initializeDiskPendingDataPoint(idps.At(idx), now, logicalDiskCounter.InstanceName, logicalDiskCounter.Values[queueLength]) + } +} + +func initializeInt64DataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(deviceLabelName, deviceLabel) + if directionLabel != "" { + labelsMap.Insert(directionLabelName, directionLabel) + } + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func initializeDoubleDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value float64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(deviceLabelName, deviceLabel) + if directionLabel != "" { + labelsMap.Insert(directionLabelName, directionLabel) + } + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func initializeDiskPendingDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, deviceLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(deviceLabelName, deviceLabel) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows_test.go new file mode 100644 index 00000000000..e6940fda432 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows_test.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package diskscraper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" +) + +func TestScrape_Error(t *testing.T) { + type testCase struct { + name string + scrapeErr error + getObjectErr error + getValuesErr error + expectedErr string + } + + testCases := []testCase{ + { + name: "scrapeError", + scrapeErr: errors.New("err1"), + expectedErr: "err1", + }, + { + name: "getObjectErr", + getObjectErr: errors.New("err1"), + expectedErr: "err1", + }, + { + name: "getValuesErr", + getValuesErr: errors.New("err1"), + expectedErr: "err1", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper, err := newDiskScraper(context.Background(), &Config{}) + require.NoError(t, err, "Failed to create disk scraper: %v", err) + + scraper.perfCounterScraper = perfcounters.NewMockPerfCounterScraperError(test.scrapeErr, test.getObjectErr, test.getValuesErr) + + err = scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize disk scraper: %v", err) + + _, err = scraper.scrape(context.Background()) + assert.EqualError(t, err, test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, metricsLen, err.(consumererror.PartialScrapeError).Failed) + } + }) + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go new file mode 100644 index 00000000000..caffb4c32d3 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package diskscraper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for Disk scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "disk" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateMetricsScraper creates a scraper based on provided config. +func (f *Factory) CreateMetricsScraper( + ctx context.Context, + _ *zap.Logger, + config internal.Config, +) (scraperhelper.MetricsScraper, error) { + cfg := config.(*Config) + s, err := newDiskScraper(ctx, cfg) + if err != nil { + return nil, err + } + + ms := scraperhelper.NewMetricsScraper( + TypeStr, + s.scrape, + scraperhelper.WithStart(s.start), + ) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory_test.go new file mode 100644 index 00000000000..e87592b6e21 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory_test.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package diskscraper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.NoError(t, err) + assert.NotNil(t, scraper) +} + +func TestCreateMetricsScraper_Error(t *testing.T) { + factory := &Factory{} + cfg := &Config{Include: MatchConfig{Devices: []string{""}}} + + _, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.Error(t, err) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/config.go new file mode 100644 index 00000000000..b28e08913fa --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/config.go @@ -0,0 +1,140 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filesystemscraper + +import ( + "fmt" + + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +// Config relating to FileSystem Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // IncludeDevices specifies a filter on the devices that should be included in the generated metrics. + IncludeDevices DeviceMatchConfig `mapstructure:"include_devices"` + // ExcludeDevices specifies a filter on the devices that should be excluded from the generated metrics. + ExcludeDevices DeviceMatchConfig `mapstructure:"exclude_devices"` + + // IncludeFSTypes specifies a filter on the filesystem types that should be included in the generated metrics. + IncludeFSTypes FSTypeMatchConfig `mapstructure:"include_fs_types"` + // ExcludeFSTypes specifies a filter on the filesystem types points that should be excluded from the generated metrics. + ExcludeFSTypes FSTypeMatchConfig `mapstructure:"exclude_fs_types"` + + // IncludeMountPoints specifies a filter on the mount points that should be included in the generated metrics. + IncludeMountPoints MountPointMatchConfig `mapstructure:"include_mount_points"` + // ExcludeMountPoints specifies a filter on the mount points that should be excluded from the generated metrics. + ExcludeMountPoints MountPointMatchConfig `mapstructure:"exclude_mount_points"` +} + +type DeviceMatchConfig struct { + filterset.Config `mapstructure:",squash"` + + Devices []string `mapstructure:"devices"` +} + +type FSTypeMatchConfig struct { + filterset.Config `mapstructure:",squash"` + + FSTypes []string `mapstructure:"fs_types"` +} + +type MountPointMatchConfig struct { + filterset.Config `mapstructure:",squash"` + + MountPoints []string `mapstructure:"mount_points"` +} + +type fsFilter struct { + includeDeviceFilter filterset.FilterSet + excludeDeviceFilter filterset.FilterSet + includeFSTypeFilter filterset.FilterSet + excludeFSTypeFilter filterset.FilterSet + includeMountPointFilter filterset.FilterSet + excludeMountPointFilter filterset.FilterSet + filtersExist bool +} + +func (cfg *Config) createFilter() (*fsFilter, error) { + var err error + filter := fsFilter{} + + filter.includeDeviceFilter, err = newIncludeFilterHelper(cfg.IncludeDevices.Devices, &cfg.IncludeDevices.Config, deviceLabelName) + if err != nil { + return nil, err + } + + filter.excludeDeviceFilter, err = newExcludeFilterHelper(cfg.ExcludeDevices.Devices, &cfg.ExcludeDevices.Config, deviceLabelName) + if err != nil { + return nil, err + } + + filter.includeFSTypeFilter, err = newIncludeFilterHelper(cfg.IncludeFSTypes.FSTypes, &cfg.IncludeFSTypes.Config, typeLabelName) + if err != nil { + return nil, err + } + + filter.excludeFSTypeFilter, err = newExcludeFilterHelper(cfg.ExcludeFSTypes.FSTypes, &cfg.ExcludeFSTypes.Config, typeLabelName) + if err != nil { + return nil, err + } + + filter.includeMountPointFilter, err = newIncludeFilterHelper(cfg.IncludeMountPoints.MountPoints, &cfg.IncludeMountPoints.Config, mountPointLabelName) + if err != nil { + return nil, err + } + + filter.excludeMountPointFilter, err = newExcludeFilterHelper(cfg.ExcludeMountPoints.MountPoints, &cfg.ExcludeMountPoints.Config, mountPointLabelName) + if err != nil { + return nil, err + } + + filter.setFiltersExist() + return &filter, nil +} + +func (f *fsFilter) setFiltersExist() { + f.filtersExist = f.includeMountPointFilter != nil || f.excludeMountPointFilter != nil || + f.includeFSTypeFilter != nil || f.excludeFSTypeFilter != nil || + f.includeDeviceFilter != nil || f.excludeDeviceFilter != nil +} + +const ( + excludeKey = "exclude" + includeKey = "include" +) + +func newIncludeFilterHelper(items []string, filterSet *filterset.Config, typ string) (filterset.FilterSet, error) { + return newFilterHelper(items, filterSet, includeKey, typ) +} + +func newExcludeFilterHelper(items []string, filterSet *filterset.Config, typ string) (filterset.FilterSet, error) { + return newFilterHelper(items, filterSet, excludeKey, typ) +} + +func newFilterHelper(items []string, filterSet *filterset.Config, typ string, filterType string) (filterset.FilterSet, error) { + var err error + var filter filterset.FilterSet + + if len(items) > 0 { + filter, err = filterset.CreateFilterSet(items, filterSet) + if err != nil { + return nil, fmt.Errorf("error creating %s %s filters: %w", filterType, typ, err) + } + } + return filter, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go new file mode 100644 index 00000000000..04d02fe5262 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filesystemscraper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for FileSystem scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "filesystem" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// Type gets the type of the scraper config created by this Factory. +func (f *Factory) Type() string { + return TypeStr +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateMetricsScraper creates a scraper based on provided config. +func (f *Factory) CreateMetricsScraper( + ctx context.Context, + _ *zap.Logger, + config internal.Config, +) (scraperhelper.MetricsScraper, error) { + cfg := config.(*Config) + s, err := newFileSystemScraper(ctx, cfg) + if err != nil { + return nil, err + } + + ms := scraperhelper.NewMetricsScraper(TypeStr, s.Scrape) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory_test.go new file mode 100644 index 00000000000..7d2712f02bb --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory_test.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filesystemscraper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.NoError(t, err) + assert.NotNil(t, scraper) +} + +func TestCreateMetricsScraper_Error(t *testing.T) { + factory := &Factory{} + cfg := &Config{IncludeDevices: DeviceMatchConfig{Devices: []string{""}}} + + _, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.Error(t, err) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_metadata.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_metadata.go new file mode 100644 index 00000000000..b13a2211685 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_metadata.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filesystemscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// labels + +const ( + deviceLabelName = "device" + mountModeLabelName = "mode" + mountPointLabelName = "mountpoint" + stateLabelName = "state" + typeLabelName = "type" +) + +// state label values + +const ( + freeLabelValue = "free" + reservedLabelValue = "reserved" + usedLabelValue = "used" +) + +// descriptors + +var fileSystemUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.filesystem.usage") + metric.SetDescription("Filesystem bytes used.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var fileSystemINodesUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.filesystem.inodes.usage") + metric.SetDescription("FileSystem iNodes used.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go new file mode 100644 index 00000000000..cbe32859820 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filesystemscraper + +import ( + "context" + "strings" + "time" + + "github.com/shirou/gopsutil/disk" + + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +const ( + standardMetricsLen = 1 + metricsLen = standardMetricsLen + systemSpecificMetricsLen +) + +// scraper for FileSystem Metrics +type scraper struct { + config *Config + fsFilter fsFilter + + // for mocking gopsutil disk.Partitions & disk.Usage + partitions func(bool) ([]disk.PartitionStat, error) + usage func(string) (*disk.UsageStat, error) +} + +type deviceUsage struct { + partition disk.PartitionStat + usage *disk.UsageStat +} + +// newFileSystemScraper creates a FileSystem Scraper +func newFileSystemScraper(_ context.Context, cfg *Config) (*scraper, error) { + fsFilter, err := cfg.createFilter() + if err != nil { + return nil, err + } + + scraper := &scraper{config: cfg, partitions: disk.Partitions, usage: disk.Usage, fsFilter: *fsFilter} + return scraper, nil +} + +// Scrape +func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + now := internal.TimeToUnixNano(time.Now()) + + // omit logical (virtual) filesystems (not relevant for windows) + partitions, err := s.partitions( /*all=*/ false) + if err != nil { + return metrics, consumererror.NewPartialScrapeError(err, metricsLen) + } + + var errors []error + usages := make([]*deviceUsage, 0, len(partitions)) + for _, partition := range partitions { + if !s.fsFilter.includePartition(partition) { + continue + } + usage, usageErr := s.usage(partition.Mountpoint) + if usageErr != nil { + errors = append(errors, consumererror.NewPartialScrapeError(usageErr, 0)) + continue + } + + usages = append(usages, &deviceUsage{partition, usage}) + } + + if len(usages) > 0 { + metrics.Resize(metricsLen) + initializeFileSystemUsageMetric(metrics.At(0), now, usages) + appendSystemSpecificMetrics(metrics, 1, now, usages) + } + + err = scraperhelper.CombineScrapeErrors(errors) + if err != nil && len(usages) == 0 { + partialErr := err.(consumererror.PartialScrapeError) + partialErr.Failed = metricsLen + err = partialErr + } + + return metrics, err +} + +func initializeFileSystemUsageMetric(metric pdata.Metric, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { + fileSystemUsageDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(fileSystemStatesLen * len(deviceUsages)) + for i, deviceUsage := range deviceUsages { + appendFileSystemUsageStateDataPoints(idps, i*fileSystemStatesLen, now, deviceUsage) + } +} + +func initializeFileSystemUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, partition disk.PartitionStat, stateLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(deviceLabelName, partition.Device) + labelsMap.Insert(typeLabelName, partition.Fstype) + labelsMap.Insert(mountModeLabelName, getMountMode(partition.Opts)) + labelsMap.Insert(mountPointLabelName, partition.Mountpoint) + labelsMap.Insert(stateLabelName, stateLabel) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func getMountMode(opts string) string { + splitOptions := strings.Split(opts, ",") + if exists(splitOptions, "rw") { + return "rw" + } else if exists(splitOptions, "ro") { + return "ro" + } + return "unknown" +} + +func exists(options []string, opt string) bool { + for _, o := range options { + if o == opt { + return true + } + } + return false +} + +func (f *fsFilter) includePartition(partition disk.PartitionStat) bool { + // If filters do not exist, return early. + if !f.filtersExist || (f.includeDevice(partition.Device) && + f.includeFSType(partition.Fstype) && + f.includeMountPoint(partition.Mountpoint)) { + return true + } + return false +} + +func (f *fsFilter) includeDevice(deviceName string) bool { + return (f.includeDeviceFilter == nil || f.includeDeviceFilter.Matches(deviceName)) && + (f.excludeDeviceFilter == nil || !f.excludeDeviceFilter.Matches(deviceName)) +} + +func (f *fsFilter) includeFSType(fsType string) bool { + return (f.includeFSTypeFilter == nil || f.includeFSTypeFilter.Matches(fsType)) && + (f.excludeFSTypeFilter == nil || !f.excludeFSTypeFilter.Matches(fsType)) +} + +func (f *fsFilter) includeMountPoint(mountPoint string) bool { + return (f.includeMountPointFilter == nil || f.includeMountPointFilter.Matches(mountPoint)) && + (f.excludeMountPointFilter == nil || !f.excludeMountPointFilter.Matches(mountPoint)) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go new file mode 100644 index 00000000000..f8365a5be97 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin,!freebsd,!openbsd,!solaris + +package filesystemscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +const fileSystemStatesLen = 2 + +func appendFileSystemUsageStateDataPoints(idps pdata.IntDataPointSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsage *deviceUsage) { + initializeFileSystemUsageDataPoint(idps.At(startIdx+0), now, deviceUsage.partition, usedLabelValue, int64(deviceUsage.usage.Used)) + initializeFileSystemUsageDataPoint(idps.At(startIdx+1), now, deviceUsage.partition, freeLabelValue, int64(deviceUsage.usage.Free)) +} + +const systemSpecificMetricsLen = 0 + +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go new file mode 100644 index 00000000000..12d2aee3365 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go @@ -0,0 +1,288 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filesystemscraper + +import ( + "context" + "errors" + "runtime" + "testing" + + "github.com/shirou/gopsutil/disk" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +func TestScrape(t *testing.T) { + type testCase struct { + name string + config Config + partitionsFunc func(bool) ([]disk.PartitionStat, error) + usageFunc func(string) (*disk.UsageStat, error) + expectMetrics bool + expectedDeviceDataPoints int + expectedDeviceLabelValues []map[string]string + newErrRegex string + expectedErr string + } + + testCases := []testCase{ + { + name: "Standard", + expectMetrics: true, + }, + { + name: "Include single device filter", + config: Config{IncludeDevices: DeviceMatchConfig{filterset.Config{MatchType: "strict"}, []string{"a"}}}, + partitionsFunc: func(bool) ([]disk.PartitionStat, error) { + return []disk.PartitionStat{{Device: "a"}, {Device: "b"}}, nil + }, + usageFunc: func(string) (*disk.UsageStat, error) { + return &disk.UsageStat{}, nil + }, + expectMetrics: true, + expectedDeviceDataPoints: 1, + }, + { + name: "Include Device Filter that matches nothing", + config: Config{IncludeDevices: DeviceMatchConfig{filterset.Config{MatchType: "strict"}, []string{"@*^#&*$^#)"}}}, + expectMetrics: false, + }, + { + name: "Include filter with devices, filesystem type and mount points", + config: Config{ + IncludeDevices: DeviceMatchConfig{ + Config: filterset.Config{ + MatchType: filterset.Strict, + }, + Devices: []string{"device_a", "device_b"}, + }, + ExcludeFSTypes: FSTypeMatchConfig{ + Config: filterset.Config{ + MatchType: filterset.Strict, + }, + FSTypes: []string{"fs_type_b"}, + }, + ExcludeMountPoints: MountPointMatchConfig{ + Config: filterset.Config{ + MatchType: filterset.Strict, + }, + MountPoints: []string{"mount_point_b", "mount_point_c"}, + }, + }, + usageFunc: func(s string) (*disk.UsageStat, error) { + return &disk.UsageStat{ + Fstype: "fs_type_a", + }, nil + }, + partitionsFunc: func(b bool) ([]disk.PartitionStat, error) { + return []disk.PartitionStat{ + { + Device: "device_a", + Mountpoint: "mount_point_a", + Fstype: "fs_type_a", + }, + { + Device: "device_a", + Mountpoint: "mount_point_b", + Fstype: "fs_type_b", + }, + { + Device: "device_b", + Mountpoint: "mount_point_c", + Fstype: "fs_type_b", + }, + { + Device: "device_b", + Mountpoint: "mount_point_d", + Fstype: "fs_type_c", + }, + }, nil + }, + expectMetrics: true, + expectedDeviceDataPoints: 2, + expectedDeviceLabelValues: []map[string]string{ + { + "device": "device_a", + "mountpoint": "mount_point_a", + "type": "fs_type_a", + "mode": "unknown", + }, + { + "device": "device_b", + "mountpoint": "mount_point_d", + "type": "fs_type_c", + "mode": "unknown", + }, + }, + }, + { + name: "Invalid Include Device Filter", + config: Config{IncludeDevices: DeviceMatchConfig{Devices: []string{"test"}}}, + newErrRegex: "^error creating device include filters:", + }, + { + name: "Invalid Exclude Device Filter", + config: Config{ExcludeDevices: DeviceMatchConfig{Devices: []string{"test"}}}, + newErrRegex: "^error creating device exclude filters:", + }, + { + name: "Invalid Include Filesystems Filter", + config: Config{IncludeFSTypes: FSTypeMatchConfig{FSTypes: []string{"test"}}}, + newErrRegex: "^error creating type include filters:", + }, + { + name: "Invalid Exclude Filesystems Filter", + config: Config{ExcludeFSTypes: FSTypeMatchConfig{FSTypes: []string{"test"}}}, + newErrRegex: "^error creating type exclude filters:", + }, + { + name: "Invalid Include Moountpoints Filter", + config: Config{IncludeMountPoints: MountPointMatchConfig{MountPoints: []string{"test"}}}, + newErrRegex: "^error creating mountpoint include filters:", + }, + { + name: "Invalid Exclude Moountpoints Filter", + config: Config{ExcludeMountPoints: MountPointMatchConfig{MountPoints: []string{"test"}}}, + newErrRegex: "^error creating mountpoint exclude filters:", + }, + { + name: "Partitions Error", + partitionsFunc: func(bool) ([]disk.PartitionStat, error) { return nil, errors.New("err1") }, + expectedErr: "err1", + }, + { + name: "Usage Error", + usageFunc: func(string) (*disk.UsageStat, error) { return nil, errors.New("err2") }, + expectedErr: "err2", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper, err := newFileSystemScraper(context.Background(), &test.config) + if test.newErrRegex != "" { + require.Error(t, err) + require.Regexp(t, test.newErrRegex, err) + return + } + require.NoError(t, err, "Failed to create file system scraper: %v", err) + + if test.partitionsFunc != nil { + scraper.partitions = test.partitionsFunc + } + if test.usageFunc != nil { + scraper.usage = test.usageFunc + } + + metrics, err := scraper.Scrape(context.Background()) + if test.expectedErr != "" { + assert.Contains(t, err.Error(), test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, metricsLen, err.(consumererror.PartialScrapeError).Failed) + } + + return + } + require.NoError(t, err, "Failed to scrape metrics: %v", err) + + if !test.expectMetrics { + assert.Equal(t, 0, metrics.Len()) + return + } + + assert.GreaterOrEqual(t, metrics.Len(), 1) + + assertFileSystemUsageMetricValid( + t, + metrics.At(0), + fileSystemUsageDescriptor, + test.expectedDeviceDataPoints*fileSystemStatesLen, + test.expectedDeviceLabelValues, + ) + + if isUnix() { + assertFileSystemUsageMetricHasUnixSpecificStateLabels(t, metrics.At(0)) + assertFileSystemUsageMetricValid( + t, + metrics.At(1), + fileSystemINodesUsageDescriptor, + test.expectedDeviceDataPoints*2, + test.expectedDeviceLabelValues, + ) + } + + internal.AssertSameTimeStampForAllMetrics(t, metrics) + }) + } +} + +func assertFileSystemUsageMetricValid( + t *testing.T, + metric pdata.Metric, + descriptor pdata.Metric, + expectedDeviceDataPoints int, + expectedDeviceLabelValues []map[string]string) { + internal.AssertDescriptorEqual(t, descriptor, metric) + for i := 0; i < metric.IntSum().DataPoints().Len(); i++ { + for _, label := range []string{deviceLabelName, typeLabelName, mountModeLabelName, mountPointLabelName} { + internal.AssertIntSumMetricLabelExists(t, metric, i, label) + } + } + + if expectedDeviceDataPoints > 0 { + assert.Equal(t, expectedDeviceDataPoints, metric.IntSum().DataPoints().Len()) + + // Assert label values if specified. + if expectedDeviceLabelValues != nil { + dpsPerDevice := expectedDeviceDataPoints / len(expectedDeviceLabelValues) + deviceIdx := 0 + for i := 0; i < metric.IntSum().DataPoints().Len(); i += dpsPerDevice { + for j := i; j < i+dpsPerDevice; j++ { + for labelKey, labelValue := range expectedDeviceLabelValues[deviceIdx] { + internal.AssertIntSumMetricLabelHasValue(t, metric, j, labelKey, labelValue) + } + } + deviceIdx++ + } + } + } else { + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), fileSystemStatesLen) + } + internal.AssertIntSumMetricLabelHasValue(t, metric, 0, stateLabelName, usedLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 1, stateLabelName, freeLabelValue) +} + +func assertFileSystemUsageMetricHasUnixSpecificStateLabels(t *testing.T, metric pdata.Metric) { + internal.AssertIntSumMetricLabelHasValue(t, metric, 2, stateLabelName, reservedLabelValue) +} + +func isUnix() bool { + for _, unixOS := range []string{"linux", "darwin", "freebsd", "openbsd", "solaris"} { + if runtime.GOOS == unixOS { + return true + } + } + + return false +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go new file mode 100644 index 00000000000..fafe5ec8264 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux darwin freebsd openbsd solaris + +package filesystemscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +const fileSystemStatesLen = 3 + +func appendFileSystemUsageStateDataPoints(idps pdata.IntDataPointSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsage *deviceUsage) { + initializeFileSystemUsageDataPoint(idps.At(startIdx+0), now, deviceUsage.partition, usedLabelValue, int64(deviceUsage.usage.Used)) + initializeFileSystemUsageDataPoint(idps.At(startIdx+1), now, deviceUsage.partition, freeLabelValue, int64(deviceUsage.usage.Free)) + initializeFileSystemUsageDataPoint(idps.At(startIdx+2), now, deviceUsage.partition, reservedLabelValue, int64(deviceUsage.usage.Total-deviceUsage.usage.Used-deviceUsage.usage.Free)) +} + +const systemSpecificMetricsLen = 1 + +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { + metric := metrics.At(startIdx) + fileSystemINodesUsageDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(deviceUsages)) + for idx, deviceUsage := range deviceUsages { + startIndex := 2 * idx + initializeFileSystemUsageDataPoint(idps.At(startIndex+0), now, deviceUsage.partition, usedLabelValue, int64(deviceUsage.usage.InodesUsed)) + initializeFileSystemUsageDataPoint(idps.At(startIndex+1), now, deviceUsage.partition, freeLabelValue, int64(deviceUsage.usage.InodesFree)) + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/config.go new file mode 100644 index 00000000000..3f3d35fb8b6 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/config.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loadscraper + +import "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + +// Config relating to Load Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go new file mode 100644 index 00000000000..fbb6e5a593d --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loadscraper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for Load scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "load" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateMetricsScraper creates a scraper based on provided config. +func (f *Factory) CreateMetricsScraper( + ctx context.Context, + logger *zap.Logger, + config internal.Config, +) (scraperhelper.MetricsScraper, error) { + cfg := config.(*Config) + s := newLoadScraper(ctx, logger, cfg) + + ms := scraperhelper.NewMetricsScraper( + TypeStr, + s.scrape, + scraperhelper.WithStart(s.start), + scraperhelper.WithShutdown(s.shutdown), + ) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory_test.go new file mode 100644 index 00000000000..d457c4a4662 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory_test.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loadscraper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.NoError(t, err) + assert.NotNil(t, scraper) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_metadata.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_metadata.go new file mode 100644 index 00000000000..f52437a1611 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_metadata.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loadscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// descriptors + +var loadAvg1MDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.cpu.load_average.1m") + metric.SetDescription("Average CPU Load over 1 minute.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + return metric +}() + +var loadAvg5mDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.cpu.load_average.5m") + metric.SetDescription("Average CPU Load over 5 minutes.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + return metric +}() + +var loadAvg15mDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.cpu.load_average.15m") + metric.SetDescription("Average CPU Load over 15 minutes.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + return metric +}() diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go new file mode 100644 index 00000000000..d13b7540b66 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loadscraper + +import ( + "context" + "time" + + "github.com/shirou/gopsutil/load" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +const metricsLen = 3 + +// scraper for Load Metrics +type scraper struct { + logger *zap.Logger + config *Config + + // for mocking + load func() (*load.AvgStat, error) +} + +// newLoadScraper creates a set of Load related metrics +func newLoadScraper(_ context.Context, logger *zap.Logger, cfg *Config) *scraper { + return &scraper{logger: logger, config: cfg, load: getSampledLoadAverages} +} + +// start +func (s *scraper) start(ctx context.Context, _ component.Host) error { + return startSampling(ctx, s.logger) +} + +// shutdown +func (s *scraper) shutdown(ctx context.Context) error { + return stopSampling(ctx) +} + +// scrape +func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + now := internal.TimeToUnixNano(time.Now()) + avgLoadValues, err := s.load() + if err != nil { + return metrics, consumererror.NewPartialScrapeError(err, metricsLen) + } + + metrics.Resize(metricsLen) + initializeLoadMetric(metrics.At(0), loadAvg1MDescriptor, now, avgLoadValues.Load1) + initializeLoadMetric(metrics.At(1), loadAvg5mDescriptor, now, avgLoadValues.Load5) + initializeLoadMetric(metrics.At(2), loadAvg15mDescriptor, now, avgLoadValues.Load15) + return metrics, nil +} + +func initializeLoadMetric(metric pdata.Metric, metricDescriptor pdata.Metric, now pdata.TimestampUnixNano, value float64) { + metricDescriptor.CopyTo(metric) + + idps := metric.DoubleGauge().DataPoints() + idps.Resize(1) + dp := idps.At(0) + dp.SetTimestamp(now) + dp.SetValue(value) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_others.go new file mode 100644 index 00000000000..5b0c2afcbd4 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_others.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package loadscraper + +import ( + "context" + + "github.com/shirou/gopsutil/load" + "go.uber.org/zap" +) + +// unix based systems sample & compute load averages in the kernel, so nothing to do here +func startSampling(_ context.Context, _ *zap.Logger) error { + return nil +} + +func stopSampling(_ context.Context) error { + return nil +} + +func getSampledLoadAverages() (*load.AvgStat, error) { + return load.Avg() +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go new file mode 100644 index 00000000000..0e5aaae219a --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loadscraper + +import ( + "context" + "errors" + "testing" + + "github.com/shirou/gopsutil/load" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +func TestScrape(t *testing.T) { + type testCase struct { + name string + loadFunc func() (*load.AvgStat, error) + expectedErr string + } + + testCases := []testCase{ + { + name: "Standard", + }, + { + name: "Load Error", + loadFunc: func() (*load.AvgStat, error) { return nil, errors.New("err1") }, + expectedErr: "err1", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper := newLoadScraper(context.Background(), zap.NewNop(), &Config{}) + if test.loadFunc != nil { + scraper.load = test.loadFunc + } + + err := scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize load scraper: %v", err) + defer func() { assert.NoError(t, scraper.shutdown(context.Background())) }() + + metrics, err := scraper.scrape(context.Background()) + if test.expectedErr != "" { + assert.EqualError(t, err, test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, metricsLen, err.(consumererror.PartialScrapeError).Failed) + } + + return + } + require.NoError(t, err, "Failed to scrape metrics: %v", err) + + // expect 3 metrics + assert.Equal(t, 3, metrics.Len()) + + // expect a single datapoint for 1m, 5m & 15m load metrics + assertMetricHasSingleDatapoint(t, metrics.At(0), loadAvg1MDescriptor) + assertMetricHasSingleDatapoint(t, metrics.At(1), loadAvg5mDescriptor) + assertMetricHasSingleDatapoint(t, metrics.At(2), loadAvg15mDescriptor) + + internal.AssertSameTimeStampForAllMetrics(t, metrics) + }) + } +} + +func assertMetricHasSingleDatapoint(t *testing.T, metric pdata.Metric, descriptor pdata.Metric) { + internal.AssertDescriptorEqual(t, descriptor, metric) + assert.Equal(t, 1, metric.DoubleGauge().DataPoints().Len()) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows.go new file mode 100644 index 00000000000..2dddfba7fc6 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows.go @@ -0,0 +1,169 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package loadscraper + +import ( + "context" + "math" + "sync" + "time" + + "github.com/shirou/gopsutil/load" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" +) + +// Sample processor queue length at a 5s frequency, and calculate exponentially weighted moving averages +// as per https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation + +const ( + system = "System" + processorQueueLength = "Processor Queue Length" +) + +var ( + samplingFrequency = 5 * time.Second + + loadAvgFactor1m = 1 / math.Exp(samplingFrequency.Seconds()/time.Minute.Seconds()) + loadAvgFactor5m = 1 / math.Exp(samplingFrequency.Seconds()/(5*time.Minute).Seconds()) + loadAvgFactor15m = 1 / math.Exp(samplingFrequency.Seconds()/(15*time.Minute).Seconds()) +) + +var ( + scraperCount int + startupLock sync.Mutex + + samplerInstance *sampler +) + +type sampler struct { + done chan struct{} + logger *zap.Logger + perfCounterScraper perfcounters.PerfCounterScraper + loadAvg1m float64 + loadAvg5m float64 + loadAvg15m float64 + lock sync.RWMutex +} + +func startSampling(_ context.Context, logger *zap.Logger) error { + startupLock.Lock() + defer startupLock.Unlock() + + // startSampling may be called multiple times if multiple scrapers are + // initialized - but we only want to initialize a single load sampler + scraperCount++ + if scraperCount > 1 { + return nil + } + + var err error + samplerInstance, err = newSampler(logger) + if err != nil { + return err + } + + samplerInstance.startSamplingTicker() + return nil +} + +func newSampler(logger *zap.Logger) (*sampler, error) { + perfCounterScraper := &perfcounters.PerfLibScraper{} + if err := perfCounterScraper.Initialize(system); err != nil { + return nil, err + } + + sampler := &sampler{ + logger: logger, + perfCounterScraper: perfCounterScraper, + done: make(chan struct{}), + } + + return sampler, nil +} + +func (sw *sampler) startSamplingTicker() { + go func() { + ticker := time.NewTicker(samplingFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + sw.sampleLoad() + case <-sw.done: + return + } + } + }() +} + +func (sw *sampler) sampleLoad() { + counters, err := sw.perfCounterScraper.Scrape() + if err != nil { + sw.logger.Error("Load Scraper: failed to measure processor queue length", zap.Error(err)) + return + } + + systemObject, err := counters.GetObject(system) + if err != nil { + sw.logger.Error("Load Scraper: failed to measure processor queue length", zap.Error(err)) + return + } + + counterValues, err := systemObject.GetValues(processorQueueLength) + if err != nil { + sw.logger.Error("Load Scraper: failed to measure processor queue length", zap.Error(err)) + return + } + + currentLoad := float64(counterValues[0].Values[processorQueueLength]) + + sw.lock.Lock() + defer sw.lock.Unlock() + sw.loadAvg1m = sw.loadAvg1m*loadAvgFactor1m + currentLoad*(1-loadAvgFactor1m) + sw.loadAvg5m = sw.loadAvg5m*loadAvgFactor5m + currentLoad*(1-loadAvgFactor5m) + sw.loadAvg15m = sw.loadAvg15m*loadAvgFactor15m + currentLoad*(1-loadAvgFactor15m) +} + +func stopSampling(_ context.Context) error { + startupLock.Lock() + defer startupLock.Unlock() + + // only stop sampling if all load scrapers have been closed + scraperCount-- + if scraperCount > 0 { + return nil + } + + close(samplerInstance.done) + return nil +} + +func getSampledLoadAverages() (*load.AvgStat, error) { + samplerInstance.lock.RLock() + defer samplerInstance.lock.RUnlock() + + avgStat := &load.AvgStat{ + Load1: samplerInstance.loadAvg1m, + Load5: samplerInstance.loadAvg5m, + Load15: samplerInstance.loadAvg15m, + } + + return avgStat, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows_test.go new file mode 100644 index 00000000000..9966af8a6c4 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows_test.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package loadscraper + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" +) + +func TestStartSampling(t *testing.T) { + // override sampling frequency to 2ms + samplingFrequency = 2 * time.Millisecond + + // startSampling should set up perf counter and start sampling + startSampling(context.Background(), zap.NewNop()) + assertSamplingUnderway(t) + + // override the processor queue length perf counter with a mock + // that will ensure a positive value is returned + assert.IsType(t, &perfcounters.PerfLibScraper{}, samplerInstance.perfCounterScraper) + samplerInstance.perfCounterScraper = perfcounters.NewMockPerfCounterScraper(map[string]map[string][]int64{ + system: {processorQueueLength: {100}}, + }) + + // second call to startSampling should succeed, but not do anything + startSampling(context.Background(), zap.NewNop()) + assertSamplingUnderway(t) + assert.IsType(t, &perfcounters.MockPerfCounterScraper{}, samplerInstance.perfCounterScraper) + + // ensure that a positive load avg is returned by a call to + // "getSampledLoadAverages" which validates the value from the + // mock perf counter was used + require.Eventually(t, func() bool { + avgLoadValues, err := getSampledLoadAverages() + assert.NoError(t, err) + return avgLoadValues.Load1 > 0 && avgLoadValues.Load5 > 0 && avgLoadValues.Load15 > 0 + }, time.Second, time.Millisecond, "Load Avg was not set after 1s") + + // sampling should continue after first call to stopSampling since + // startSampling was called twice + stopSampling(context.Background()) + assertSamplingUnderway(t) + + // second call to stopSampling should close perf counter, stop + // sampling, and clean up the sampler + stopSampling(context.Background()) + assertSamplingStopped(t) +} + +func assertSamplingUnderway(t *testing.T) { + assert.NotNil(t, samplerInstance) + assert.NotNil(t, samplerInstance.perfCounterScraper) + + select { + case <-samplerInstance.done: + assert.Fail(t, "Load scraper sampling done channel unexpectedly closed") + default: + } +} + +func assertSamplingStopped(t *testing.T) { + select { + case <-samplerInstance.done: + default: + assert.Fail(t, "Load scraper sampling done channel not closed") + } +} + +func TestSampleLoad(t *testing.T) { + counterReturnValues := []int64{10, 20, 30, 40, 50} + mockPerfCounterScraper := perfcounters.NewMockPerfCounterScraper(map[string]map[string][]int64{ + system: {processorQueueLength: counterReturnValues}, + }) + + samplerInstance = &sampler{perfCounterScraper: mockPerfCounterScraper} + + for i := 0; i < len(counterReturnValues); i++ { + samplerInstance.sampleLoad() + } + + assert.Equal(t, calcExpectedLoad(counterReturnValues, loadAvgFactor1m), samplerInstance.loadAvg1m) + assert.Equal(t, calcExpectedLoad(counterReturnValues, loadAvgFactor5m), samplerInstance.loadAvg5m) + assert.Equal(t, calcExpectedLoad(counterReturnValues, loadAvgFactor15m), samplerInstance.loadAvg15m) +} + +func calcExpectedLoad(scrapedValues []int64, loadAvgFactor float64) float64 { + // replicate the calculations that should be performed to determine the exponentially + // weighted moving averages based on the specified scraped values + var expectedLoad float64 + for i := 0; i < len(scrapedValues); i++ { + expectedLoad = expectedLoad*loadAvgFactor + float64(scrapedValues[i])*(1-loadAvgFactor) + } + return expectedLoad +} + +func Benchmark_SampleLoad(b *testing.B) { + s, _ := newSampler(zap.NewNop()) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + s.sampleLoad() + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/config.go new file mode 100644 index 00000000000..3f5f5c6a853 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/config.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memoryscraper + +import "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + +// Config relating to Memory Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go new file mode 100644 index 00000000000..94aefcacf97 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memoryscraper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for Memory scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "memory" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateMetricsScraper creates a scraper based on provided config. +func (f *Factory) CreateMetricsScraper( + ctx context.Context, + _ *zap.Logger, + config internal.Config, +) (scraperhelper.MetricsScraper, error) { + cfg := config.(*Config) + s := newMemoryScraper(ctx, cfg) + + ms := scraperhelper.NewMetricsScraper(TypeStr, s.Scrape) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory_test.go new file mode 100644 index 00000000000..3ca86050ac5 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory_test.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memoryscraper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.NoError(t, err) + assert.NotNil(t, scraper) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go new file mode 100644 index 00000000000..16b49dc725c --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memoryscraper + +import ( + "context" + "time" + + "github.com/shirou/gopsutil/mem" + + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +const metricsLen = 1 + +// scraper for Memory Metrics +type scraper struct { + config *Config + + // for mocking gopsutil mem.VirtualMemory + virtualMemory func() (*mem.VirtualMemoryStat, error) +} + +// newMemoryScraper creates a Memory Scraper +func newMemoryScraper(_ context.Context, cfg *Config) *scraper { + return &scraper{config: cfg, virtualMemory: mem.VirtualMemory} +} + +// Scrape +func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + now := internal.TimeToUnixNano(time.Now()) + memInfo, err := s.virtualMemory() + if err != nil { + return metrics, consumererror.NewPartialScrapeError(err, metricsLen) + } + + metrics.Resize(metricsLen) + initializeMemoryUsageMetric(metrics.At(0), now, memInfo) + return metrics, nil +} + +func initializeMemoryUsageMetric(metric pdata.Metric, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { + metadata.Metrics.SystemMemoryUsage.New().CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(memStatesLen) + appendMemoryUsageStateDataPoints(idps, now, memInfo) +} + +func initializeMemoryUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(metadata.Labels.MemState, stateLabel) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go new file mode 100644 index 00000000000..350ba887c27 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package memoryscraper + +import ( + "github.com/shirou/gopsutil/mem" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +const memStatesLen = 6 + +func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { + initializeMemoryUsageDataPoint(idps.At(0), now, metadata.LabelMemState.Used, int64(memInfo.Used)) + initializeMemoryUsageDataPoint(idps.At(1), now, metadata.LabelMemState.Free, int64(memInfo.Free)) + initializeMemoryUsageDataPoint(idps.At(2), now, metadata.LabelMemState.Buffered, int64(memInfo.Buffers)) + initializeMemoryUsageDataPoint(idps.At(3), now, metadata.LabelMemState.Cached, int64(memInfo.Cached)) + initializeMemoryUsageDataPoint(idps.At(4), now, metadata.LabelMemState.SlabReclaimable, int64(memInfo.SReclaimable)) + initializeMemoryUsageDataPoint(idps.At(5), now, metadata.LabelMemState.SlabUnreclaimable, int64(memInfo.SUnreclaim)) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go new file mode 100644 index 00000000000..474a5aa4cf4 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!windows + +package memoryscraper + +import ( + "github.com/shirou/gopsutil/mem" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +const memStatesLen = 3 + +func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { + initializeMemoryUsageDataPoint(idps.At(0), now, metadata.LabelMemState.Used, int64(memInfo.Used)) + initializeMemoryUsageDataPoint(idps.At(1), now, metadata.LabelMemState.Free, int64(memInfo.Free)) + initializeMemoryUsageDataPoint(idps.At(2), now, metadata.LabelMemState.Inactive, int64(memInfo.Inactive)) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go new file mode 100644 index 00000000000..8b2b1a6a0c7 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memoryscraper + +import ( + "context" + "errors" + "runtime" + "testing" + + "github.com/shirou/gopsutil/mem" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +func TestScrape(t *testing.T) { + type testCase struct { + name string + virtualMemoryFunc func() (*mem.VirtualMemoryStat, error) + expectedErr string + } + + testCases := []testCase{ + { + name: "Standard", + }, + { + name: "Error", + virtualMemoryFunc: func() (*mem.VirtualMemoryStat, error) { return nil, errors.New("err1") }, + expectedErr: "err1", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper := newMemoryScraper(context.Background(), &Config{}) + if test.virtualMemoryFunc != nil { + scraper.virtualMemory = test.virtualMemoryFunc + } + + metrics, err := scraper.Scrape(context.Background()) + if test.expectedErr != "" { + assert.EqualError(t, err, test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, metricsLen, err.(consumererror.PartialScrapeError).Failed) + } + + return + } + require.NoError(t, err, "Failed to scrape metrics: %v", err) + + assert.Equal(t, 1, metrics.Len()) + + assertMemoryUsageMetricValid(t, metrics.At(0), metadata.Metrics.SystemMemoryUsage.New()) + + if runtime.GOOS == "linux" { + assertMemoryUsageMetricHasLinuxSpecificStateLabels(t, metrics.At(0)) + } else if runtime.GOOS != "windows" { + internal.AssertIntSumMetricLabelHasValue(t, metrics.At(0), 2, metadata.Labels.MemState, metadata.LabelMemState.Inactive) + } + + internal.AssertSameTimeStampForAllMetrics(t, metrics) + }) + } +} + +func assertMemoryUsageMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric) { + internal.AssertDescriptorEqual(t, descriptor, metric) + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), 2) + internal.AssertIntSumMetricLabelHasValue(t, metric, 0, metadata.Labels.MemState, metadata.LabelMemState.Used) + internal.AssertIntSumMetricLabelHasValue(t, metric, 1, metadata.Labels.MemState, metadata.LabelMemState.Free) +} + +func assertMemoryUsageMetricHasLinuxSpecificStateLabels(t *testing.T, metric pdata.Metric) { + internal.AssertIntSumMetricLabelHasValue(t, metric, 2, metadata.Labels.MemState, metadata.LabelMemState.Buffered) + internal.AssertIntSumMetricLabelHasValue(t, metric, 3, metadata.Labels.MemState, metadata.LabelMemState.Cached) + internal.AssertIntSumMetricLabelHasValue(t, metric, 4, metadata.Labels.MemState, metadata.LabelMemState.SlabReclaimable) + internal.AssertIntSumMetricLabelHasValue(t, metric, 5, metadata.Labels.MemState, metadata.LabelMemState.SlabUnreclaimable) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go new file mode 100644 index 00000000000..e3d1c12b984 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package memoryscraper + +import ( + "github.com/shirou/gopsutil/mem" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" +) + +const memStatesLen = 2 + +func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { + initializeMemoryUsageDataPoint(idps.At(0), now, metadata.LabelMemState.Used, int64(memInfo.Used)) + initializeMemoryUsageDataPoint(idps.At(1), now, metadata.LabelMemState.Free, int64(memInfo.Available)) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/config.go new file mode 100644 index 00000000000..c63f5fb7b45 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/config.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkscraper + +import ( + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +// Config relating to Network Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // Include specifies a filter on the network interfaces that should be included from the generated metrics. + Include MatchConfig `mapstructure:"include"` + // Exclude specifies a filter on the network interfaces that should be excluded from the generated metrics. + Exclude MatchConfig `mapstructure:"exclude"` +} + +type MatchConfig struct { + filterset.Config `mapstructure:",squash"` + + Interfaces []string `mapstructure:"interfaces"` +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go new file mode 100644 index 00000000000..dfc97aec205 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkscraper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for Network scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "network" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateMetricsScraper creates a scraper based on provided config. +func (f *Factory) CreateMetricsScraper( + ctx context.Context, + _ *zap.Logger, + config internal.Config, +) (scraperhelper.MetricsScraper, error) { + cfg := config.(*Config) + s, err := newNetworkScraper(ctx, cfg) + if err != nil { + return nil, err + } + + ms := scraperhelper.NewMetricsScraper( + TypeStr, + s.scrape, + scraperhelper.WithStart(s.start), + ) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory_test.go new file mode 100644 index 00000000000..f47ac841226 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory_test.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkscraper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.NoError(t, err) + assert.NotNil(t, scraper) +} + +func TestCreateMetricsScraper_Error(t *testing.T) { + factory := &Factory{} + cfg := &Config{Include: MatchConfig{Interfaces: []string{""}}} + + _, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.Error(t, err) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_metadata.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_metadata.go new file mode 100644 index 00000000000..62e51130563 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_metadata.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// network metric constants + +const ( + interfaceLabelName = "interface" + directionLabelName = "direction" + stateLabelName = "state" +) + +// direction label values + +const ( + receiveDirectionLabelValue = "receive" + transmitDirectionLabelValue = "transmit" +) + +// descriptors + +var networkPacketsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.network.packets") + metric.SetDescription("The number of packets transferred.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var networkDroppedPacketsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.network.dropped_packets") + metric.SetDescription("The number of packets dropped.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var networkErrorsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.network.errors") + metric.SetDescription("The number of errors encountered") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var networkIODescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.network.io") + metric.SetDescription("The number of bytes transmitted and received") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var networkTCPConnectionsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.network.tcp_connections") + metric.SetDescription("The number of tcp connections") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_others.go new file mode 100644 index 00000000000..045e545b214 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_others.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package networkscraper + +var allTCPStates = []string{ + "CLOSE_WAIT", + "CLOSE", + "CLOSING", + "DELETE", + "ESTABLISHED", + "FIN_WAIT_1", + "FIN_WAIT_2", + "LAST_ACK", + "LISTEN", + "SYN_SENT", + "SYN_RECV", + "TIME_WAIT", +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go new file mode 100644 index 00000000000..753b2723c70 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go @@ -0,0 +1,244 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkscraper + +import ( + "context" + "fmt" + "time" + + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/net" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +const ( + networkMetricsLen = 4 + connectionsMetricsLen = 1 +) + +// scraper for Network Metrics +type scraper struct { + config *Config + startTime pdata.TimestampUnixNano + includeFS filterset.FilterSet + excludeFS filterset.FilterSet + + // for mocking + bootTime func() (uint64, error) + ioCounters func(bool) ([]net.IOCountersStat, error) + connections func(string) ([]net.ConnectionStat, error) +} + +// newNetworkScraper creates a set of Network related metrics +func newNetworkScraper(_ context.Context, cfg *Config) (*scraper, error) { + scraper := &scraper{config: cfg, bootTime: host.BootTime, ioCounters: net.IOCounters, connections: net.Connections} + + var err error + + if len(cfg.Include.Interfaces) > 0 { + scraper.includeFS, err = filterset.CreateFilterSet(cfg.Include.Interfaces, &cfg.Include.Config) + if err != nil { + return nil, fmt.Errorf("error creating network interface include filters: %w", err) + } + } + + if len(cfg.Exclude.Interfaces) > 0 { + scraper.excludeFS, err = filterset.CreateFilterSet(cfg.Exclude.Interfaces, &cfg.Exclude.Config) + if err != nil { + return nil, fmt.Errorf("error creating network interface exclude filters: %w", err) + } + } + + return scraper, nil +} + +func (s *scraper) start(context.Context, component.Host) error { + bootTime, err := s.bootTime() + if err != nil { + return err + } + + s.startTime = pdata.TimestampUnixNano(bootTime * 1e9) + return nil +} + +func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + var errors []error + + err := s.scrapeAndAppendNetworkCounterMetrics(metrics, s.startTime) + if err != nil { + errors = append(errors, err) + } + + err = s.scrapeAndAppendNetworkTCPConnectionsMetric(metrics) + if err != nil { + errors = append(errors, err) + } + + return metrics, scraperhelper.CombineScrapeErrors(errors) +} + +func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics pdata.MetricSlice, startTime pdata.TimestampUnixNano) error { + now := internal.TimeToUnixNano(time.Now()) + + // get total stats only + ioCounters, err := s.ioCounters( /*perNetworkInterfaceController=*/ true) + if err != nil { + return consumererror.NewPartialScrapeError(err, networkMetricsLen) + } + + // filter network interfaces by name + ioCounters = s.filterByInterface(ioCounters) + + if len(ioCounters) > 0 { + startIdx := metrics.Len() + metrics.Resize(startIdx + networkMetricsLen) + initializeNetworkPacketsMetric(metrics.At(startIdx+0), networkPacketsDescriptor, startTime, now, ioCounters) + initializeNetworkDroppedPacketsMetric(metrics.At(startIdx+1), networkDroppedPacketsDescriptor, startTime, now, ioCounters) + initializeNetworkErrorsMetric(metrics.At(startIdx+2), networkErrorsDescriptor, startTime, now, ioCounters) + initializeNetworkIOMetric(metrics.At(startIdx+3), networkIODescriptor, startTime, now, ioCounters) + } + + return nil +} + +func initializeNetworkPacketsMetric(metric pdata.Metric, metricDescriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { + metricDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(ioCountersSlice)) + for idx, ioCounters := range ioCountersSlice { + initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, transmitDirectionLabelValue, int64(ioCounters.PacketsSent)) + initializeNetworkDataPoint(idps.At(2*idx+1), startTime, now, ioCounters.Name, receiveDirectionLabelValue, int64(ioCounters.PacketsRecv)) + } +} + +func initializeNetworkDroppedPacketsMetric(metric pdata.Metric, metricDescriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { + metricDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(ioCountersSlice)) + for idx, ioCounters := range ioCountersSlice { + initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, transmitDirectionLabelValue, int64(ioCounters.Dropout)) + initializeNetworkDataPoint(idps.At(2*idx+1), startTime, now, ioCounters.Name, receiveDirectionLabelValue, int64(ioCounters.Dropin)) + } +} + +func initializeNetworkErrorsMetric(metric pdata.Metric, metricDescriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { + metricDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(ioCountersSlice)) + for idx, ioCounters := range ioCountersSlice { + initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, transmitDirectionLabelValue, int64(ioCounters.Errout)) + initializeNetworkDataPoint(idps.At(2*idx+1), startTime, now, ioCounters.Name, receiveDirectionLabelValue, int64(ioCounters.Errin)) + } +} + +func initializeNetworkIOMetric(metric pdata.Metric, metricDescriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { + metricDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(ioCountersSlice)) + for idx, ioCounters := range ioCountersSlice { + initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, transmitDirectionLabelValue, int64(ioCounters.BytesSent)) + initializeNetworkDataPoint(idps.At(2*idx+1), startTime, now, ioCounters.Name, receiveDirectionLabelValue, int64(ioCounters.BytesRecv)) + } +} + +func initializeNetworkDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, interfaceLabel, directionLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(interfaceLabelName, interfaceLabel) + labelsMap.Insert(directionLabelName, directionLabel) + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func (s *scraper) scrapeAndAppendNetworkTCPConnectionsMetric(metrics pdata.MetricSlice) error { + now := internal.TimeToUnixNano(time.Now()) + + connections, err := s.connections("tcp") + if err != nil { + return consumererror.NewPartialScrapeError(err, connectionsMetricsLen) + } + + connectionStatusCounts := getTCPConnectionStatusCounts(connections) + + startIdx := metrics.Len() + metrics.Resize(startIdx + connectionsMetricsLen) + initializeNetworkTCPConnectionsMetric(metrics.At(startIdx), now, connectionStatusCounts) + return nil +} + +func getTCPConnectionStatusCounts(connections []net.ConnectionStat) map[string]int64 { + tcpStatuses := make(map[string]int64, len(allTCPStates)) + for _, state := range allTCPStates { + tcpStatuses[state] = 0 + } + + for _, connection := range connections { + tcpStatuses[connection.Status]++ + } + return tcpStatuses +} + +func initializeNetworkTCPConnectionsMetric(metric pdata.Metric, now pdata.TimestampUnixNano, connectionStateCounts map[string]int64) { + networkTCPConnectionsDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(len(connectionStateCounts)) + + i := 0 + for connectionState, count := range connectionStateCounts { + initializeNetworkTCPConnectionsDataPoint(idps.At(i), now, connectionState, count) + i++ + } +} + +func initializeNetworkTCPConnectionsDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(stateLabelName, stateLabel) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func (s *scraper) filterByInterface(ioCounters []net.IOCountersStat) []net.IOCountersStat { + if s.includeFS == nil && s.excludeFS == nil { + return ioCounters + } + + filteredIOCounters := make([]net.IOCountersStat, 0, len(ioCounters)) + for _, io := range ioCounters { + if s.includeInterface(io.Name) { + filteredIOCounters = append(filteredIOCounters, io) + } + } + return filteredIOCounters +} + +func (s *scraper) includeInterface(interfaceName string) bool { + return (s.includeFS == nil || s.includeFS.Matches(interfaceName)) && + (s.excludeFS == nil || !s.excludeFS.Matches(interfaceName)) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go new file mode 100644 index 00000000000..c3498f2ff94 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go @@ -0,0 +1,171 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkscraper + +import ( + "context" + "errors" + "testing" + + "github.com/shirou/gopsutil/net" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +func TestScrape(t *testing.T) { + type testCase struct { + name string + config Config + bootTimeFunc func() (uint64, error) + ioCountersFunc func(bool) ([]net.IOCountersStat, error) + connectionsFunc func(string) ([]net.ConnectionStat, error) + expectNetworkMetrics bool + expectedStartTime pdata.TimestampUnixNano + newErrRegex string + initializationErr string + expectedErr string + expectedErrCount int + } + + testCases := []testCase{ + { + name: "Standard", + expectNetworkMetrics: true, + }, + { + name: "Validate Start Time", + bootTimeFunc: func() (uint64, error) { return 100, nil }, + expectNetworkMetrics: true, + expectedStartTime: 100 * 1e9, + }, + { + name: "Include Filter that matches nothing", + config: Config{Include: MatchConfig{filterset.Config{MatchType: "strict"}, []string{"@*^#&*$^#)"}}}, + expectNetworkMetrics: false, + }, + { + name: "Invalid Include Filter", + config: Config{Include: MatchConfig{Interfaces: []string{"test"}}}, + newErrRegex: "^error creating network interface include filters:", + }, + { + name: "Invalid Exclude Filter", + config: Config{Exclude: MatchConfig{Interfaces: []string{"test"}}}, + newErrRegex: "^error creating network interface exclude filters:", + }, + { + name: "Boot Time Error", + bootTimeFunc: func() (uint64, error) { return 0, errors.New("err1") }, + initializationErr: "err1", + }, + { + name: "IOCounters Error", + ioCountersFunc: func(bool) ([]net.IOCountersStat, error) { return nil, errors.New("err2") }, + expectedErr: "err2", + expectedErrCount: networkMetricsLen, + }, + { + name: "Connections Error", + connectionsFunc: func(string) ([]net.ConnectionStat, error) { return nil, errors.New("err3") }, + expectedErr: "err3", + expectedErrCount: connectionsMetricsLen, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper, err := newNetworkScraper(context.Background(), &test.config) + if test.newErrRegex != "" { + require.Error(t, err) + require.Regexp(t, test.newErrRegex, err) + return + } + require.NoError(t, err, "Failed to create network scraper: %v", err) + + if test.bootTimeFunc != nil { + scraper.bootTime = test.bootTimeFunc + } + if test.ioCountersFunc != nil { + scraper.ioCounters = test.ioCountersFunc + } + if test.connectionsFunc != nil { + scraper.connections = test.connectionsFunc + } + + err = scraper.start(context.Background(), componenttest.NewNopHost()) + if test.initializationErr != "" { + assert.EqualError(t, err, test.initializationErr) + return + } + require.NoError(t, err, "Failed to initialize network scraper: %v", err) + + metrics, err := scraper.scrape(context.Background()) + if test.expectedErr != "" { + assert.EqualError(t, err, test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, test.expectedErrCount, err.(consumererror.PartialScrapeError).Failed) + } + + return + } + require.NoError(t, err, "Failed to scrape metrics: %v", err) + + expectedMetricCount := 1 + if test.expectNetworkMetrics { + expectedMetricCount += 4 + } + assert.Equal(t, expectedMetricCount, metrics.Len()) + + idx := 0 + if test.expectNetworkMetrics { + assertNetworkIOMetricValid(t, metrics.At(idx+0), networkPacketsDescriptor, test.expectedStartTime) + assertNetworkIOMetricValid(t, metrics.At(idx+1), networkDroppedPacketsDescriptor, test.expectedStartTime) + assertNetworkIOMetricValid(t, metrics.At(idx+2), networkErrorsDescriptor, test.expectedStartTime) + assertNetworkIOMetricValid(t, metrics.At(idx+3), networkIODescriptor, test.expectedStartTime) + internal.AssertSameTimeStampForMetrics(t, metrics, 0, 4) + idx += 4 + } + + assertNetworkTCPConnectionsMetricValid(t, metrics.At(idx+0)) + internal.AssertSameTimeStampForMetrics(t, metrics, idx, idx+1) + }) + } +} + +func assertNetworkIOMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, descriptor, metric) + if startTime != 0 { + internal.AssertIntSumMetricStartTimeEquals(t, metric, startTime) + } + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), 2) + internal.AssertIntSumMetricLabelExists(t, metric, 0, interfaceLabelName) + internal.AssertIntSumMetricLabelHasValue(t, metric, 0, directionLabelName, transmitDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 1, directionLabelName, receiveDirectionLabelValue) +} + +func assertNetworkTCPConnectionsMetricValid(t *testing.T, metric pdata.Metric) { + internal.AssertDescriptorEqual(t, networkTCPConnectionsDescriptor, metric) + internal.AssertIntSumMetricLabelExists(t, metric, 0, stateLabelName) + assert.Equal(t, 12, metric.IntSum().DataPoints().Len()) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_windows.go new file mode 100644 index 00000000000..ccd995895ee --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_windows.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package networkscraper + +var allTCPStates = []string{ + "CLOSE_WAIT", + "CLOSED", + "CLOSING", + "DELETE", + "ESTABLISHED", + "FIN_WAIT_1", + "FIN_WAIT_2", + "LAST_ACK", + "LISTEN", + "SYN_SENT", + "SYN_RECEIVED", + "TIME_WAIT", +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/config.go new file mode 100644 index 00000000000..4245727c8b1 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/config.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processesscraper + +import "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + +// Config relating to Processes Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go new file mode 100644 index 00000000000..06f843f75b1 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processesscraper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for Processes scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "processes" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateMetricsScraper creates a scraper based on provided config. +func (f *Factory) CreateMetricsScraper( + ctx context.Context, + _ *zap.Logger, + config internal.Config, +) (scraperhelper.MetricsScraper, error) { + cfg := config.(*Config) + s := newProcessesScraper(ctx, cfg) + + ms := scraperhelper.NewMetricsScraper( + TypeStr, + s.scrape, + scraperhelper.WithStart(s.start), + ) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory_test.go new file mode 100644 index 00000000000..cb74af9d2b2 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory_test.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processesscraper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + + assert.NoError(t, err) + assert.NotNil(t, scraper) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_metadata.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_metadata.go new file mode 100644 index 00000000000..813bd2749b9 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_metadata.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processesscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// descriptors + +var processesRunningDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.processes.running") + metric.SetDescription("Total number of running processes.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var processesBlockedDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.processes.blocked") + metric.SetDescription("Total number of blocked processes.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go new file mode 100644 index 00000000000..23714a77b92 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processesscraper + +import ( + "context" + + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/load" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/pdata" +) + +const metricsLen = systemSpecificMetricsLen + +// scraper for Processes Metrics +type scraper struct { + config *Config + startTime pdata.TimestampUnixNano + + // for mocking gopsutil load.Misc + misc getMiscStats +} + +type getMiscStats func() (*load.MiscStat, error) + +// newProcessesScraper creates a set of Processes related metrics +func newProcessesScraper(_ context.Context, cfg *Config) *scraper { + return &scraper{config: cfg, misc: load.Misc} +} + +func (s *scraper) start(context.Context, component.Host) error { + bootTime, err := host.BootTime() + if err != nil { + return err + } + + s.startTime = pdata.TimestampUnixNano(bootTime) + return nil +} + +func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + err := appendSystemSpecificProcessesMetrics(metrics, 0, s.misc) + return metrics, err +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go new file mode 100644 index 00000000000..33f07cdc12b --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin,!freebsd,!openbsd + +package processesscraper + +import "go.opentelemetry.io/collector/consumer/pdata" + +const systemSpecificMetricsLen = 2 + +func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex int, miscFunc getMiscStats) error { + return nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go new file mode 100644 index 00000000000..5d2ff7b1a9d --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processesscraper + +import ( + "context" + "errors" + "runtime" + "testing" + + "github.com/shirou/gopsutil/load" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +var systemSpecificMetrics = map[string][]pdata.Metric{ + "linux": {processesRunningDescriptor, processesBlockedDescriptor}, + "darwin": {processesRunningDescriptor, processesBlockedDescriptor}, + "freebsd": {processesRunningDescriptor, processesBlockedDescriptor}, + "openbsd": {processesRunningDescriptor, processesBlockedDescriptor}, +} + +func TestScrape(t *testing.T) { + type testCase struct { + name string + miscFunc func() (*load.MiscStat, error) + expectedErr string + } + + testCases := []testCase{ + { + name: "Standard", + }, + { + name: "Error", + miscFunc: func() (*load.MiscStat, error) { return nil, errors.New("err1") }, + expectedErr: "err1", + }, + } + + expectedMetrics := systemSpecificMetrics[runtime.GOOS] + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper := newProcessesScraper(context.Background(), &Config{}) + if test.miscFunc != nil { + scraper.misc = test.miscFunc + } + + err := scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize processes scraper: %v", err) + + metrics, err := scraper.scrape(context.Background()) + if len(expectedMetrics) > 0 && test.expectedErr != "" { + assert.EqualError(t, err, test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, metricsLen, err.(consumererror.PartialScrapeError).Failed) + } + + return + } + require.NoError(t, err, "Failed to scrape metrics: %v", err) + + assert.Equal(t, len(expectedMetrics), metrics.Len()) + for i, expectedMetricDescriptor := range expectedMetrics { + assertProcessesMetricValid(t, metrics.At(i), expectedMetricDescriptor) + } + + internal.AssertSameTimeStampForAllMetrics(t, metrics) + }) + } +} + +func assertProcessesMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric) { + internal.AssertDescriptorEqual(t, descriptor, metric) + assert.Equal(t, metric.IntSum().DataPoints().Len(), 1) + assert.Equal(t, metric.IntSum().DataPoints().At(0).LabelsMap().Len(), 0) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go new file mode 100644 index 00000000000..82794399630 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux darwin freebsd openbsd + +package processesscraper + +import ( + "time" + + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +const systemSpecificMetricsLen = 2 + +func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex int, miscFunc getMiscStats) error { + now := internal.TimeToUnixNano(time.Now()) + misc, err := miscFunc() + if err != nil { + return consumererror.NewPartialScrapeError(err, systemSpecificMetricsLen) + } + + metrics.Resize(startIndex + systemSpecificMetricsLen) + initializeProcessesMetric(metrics.At(startIndex+0), processesRunningDescriptor, now, int64(misc.ProcsRunning)) + initializeProcessesMetric(metrics.At(startIndex+1), processesBlockedDescriptor, now, int64(misc.ProcsBlocked)) + return nil +} + +func initializeProcessesMetric(metric pdata.Metric, descriptor pdata.Metric, now pdata.TimestampUnixNano, value int64) { + descriptor.CopyTo(metric) + + ddps := metric.IntSum().DataPoints() + ddps.Resize(1) + ddps.At(0).SetTimestamp(now) + ddps.At(0).SetValue(value) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go new file mode 100644 index 00000000000..d5a3d913074 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processscraper + +import ( + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +// Config relating to Process Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // Include specifies a filter on the process names that should be included from the generated metrics. + // Exclude specifies a filter on the process names that should be excluded from the generated metrics. + // If neither `include` or `exclude` are set, process metrics will be generated for all processes. + Include MatchConfig `mapstructure:"include"` + Exclude MatchConfig `mapstructure:"exclude"` +} + +type MatchConfig struct { + filterset.Config `mapstructure:",squash"` + + Names []string `mapstructure:"names"` +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go new file mode 100644 index 00000000000..d9ef18ad599 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processscraper + +import ( + "context" + "errors" + "runtime" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for Process scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "process" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateResourceMetricsScraper creates a resource scraper based on provided config. +func (f *Factory) CreateResourceMetricsScraper( + _ context.Context, + _ *zap.Logger, + config internal.Config, +) (scraperhelper.ResourceMetricsScraper, error) { + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + return nil, errors.New("process scraper only available on Linux or Windows") + } + + cfg := config.(*Config) + s, err := newProcessScraper(cfg) + if err != nil { + return nil, err + } + + ms := scraperhelper.NewResourceMetricsScraper( + TypeStr, + s.scrape, + scraperhelper.WithStart(s.start), + ) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory_test.go new file mode 100644 index 00000000000..d16165e2379 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory_test.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processscraper + +import ( + "context" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateResourceMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateResourceMetricsScraper(context.Background(), zap.NewNop(), cfg) + + if runtime.GOOS == "linux" || runtime.GOOS == "windows" { + assert.NoError(t, err) + assert.NotNil(t, scraper) + } else { + assert.Error(t, err) + assert.Nil(t, scraper) + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go new file mode 100644 index 00000000000..d432a7611fc --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processscraper + +import ( + "strings" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/process" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +// processMetadata stores process related metadata along +// with the process handle, and provides a function to +// initialize a pdata.Resource with the metadata + +type processMetadata struct { + pid int32 + executable *executableMetadata + command *commandMetadata + username string + handle processHandle +} + +type executableMetadata struct { + name string + path string +} + +type commandMetadata struct { + command string + commandLine string + commandLineSlice []string +} + +func (m *processMetadata) initializeResource(resource pdata.Resource) { + attr := resource.Attributes() + attr.InitEmptyWithCapacity(6) + m.insertPid(attr) + m.insertExecutable(attr) + m.insertCommand(attr) + m.insertUsername(attr) +} + +func (m *processMetadata) insertPid(attr pdata.AttributeMap) { + attr.InsertInt(conventions.AttributeProcessID, int64(m.pid)) +} + +func (m *processMetadata) insertExecutable(attr pdata.AttributeMap) { + attr.InsertString(conventions.AttributeProcessExecutableName, m.executable.name) + attr.InsertString(conventions.AttributeProcessExecutablePath, m.executable.path) +} + +func (m *processMetadata) insertCommand(attr pdata.AttributeMap) { + if m.command == nil { + return + } + + attr.InsertString(conventions.AttributeProcessCommand, m.command.command) + if m.command.commandLineSlice != nil { + // TODO insert slice here once this is supported by the data model + // (see https://github.com/open-telemetry/opentelemetry-collector/pull/1142) + attr.InsertString(conventions.AttributeProcessCommandLine, strings.Join(m.command.commandLineSlice, " ")) + } else { + attr.InsertString(conventions.AttributeProcessCommandLine, m.command.commandLine) + } +} + +func (m *processMetadata) insertUsername(attr pdata.AttributeMap) { + if m.username == "" { + return + } + + attr.InsertString(conventions.AttributeProcessOwner, m.username) +} + +// processHandles provides a wrapper around []*process.Process +// to support testing + +type processHandles interface { + Pid(index int) int32 + At(index int) processHandle + Len() int +} + +type processHandle interface { + Name() (string, error) + Exe() (string, error) + Username() (string, error) + Cmdline() (string, error) + CmdlineSlice() ([]string, error) + Times() (*cpu.TimesStat, error) + MemoryInfo() (*process.MemoryInfoStat, error) + IOCounters() (*process.IOCountersStat, error) +} + +type gopsProcessHandles struct { + handles []*process.Process +} + +func (p *gopsProcessHandles) Pid(index int) int32 { + return p.handles[index].Pid +} + +func (p *gopsProcessHandles) At(index int) processHandle { + return p.handles[index] +} + +func (p *gopsProcessHandles) Len() int { + return len(p.handles) +} + +func getProcessHandlesInternal() (processHandles, error) { + processes, err := process.Processes() + if err != nil { + return nil, err + } + + return &gopsProcessHandles{handles: processes}, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_metadata.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_metadata.go new file mode 100644 index 00000000000..449f05e2a56 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_metadata.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// labels + +const ( + directionLabelName = "direction" + stateLabelName = "state" +) + +// direction label values + +const ( + readDirectionLabelValue = "read" + writeDirectionLabelValue = "write" +) + +// state label values + +const ( + userStateLabelValue = "user" + systemStateLabelValue = "system" + waitStateLabelValue = "wait" +) + +// descriptors + +var cpuTimeDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("process.cpu.time") + metric.SetDescription("Total CPU seconds broken down by different states.") + metric.SetUnit("s") + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + sum := metric.DoubleSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var physicalMemoryUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("process.memory.physical_usage") + metric.SetDescription("The amount of physical memory in use.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var virtualMemoryUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("process.memory.virtual_usage") + metric.SetDescription("Virtual memory size.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var diskIODescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("process.disk.io") + metric.SetDescription("Disk bytes transferred.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go new file mode 100644 index 00000000000..8588b6c75c1 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -0,0 +1,253 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processscraper + +import ( + "context" + "fmt" + "time" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/process" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +const ( + cpuMetricsLen = 1 + memoryMetricsLen = 2 + diskMetricsLen = 1 + + metricsLen = cpuMetricsLen + memoryMetricsLen + diskMetricsLen +) + +// scraper for Process Metrics +type scraper struct { + config *Config + startTime pdata.TimestampUnixNano + includeFS filterset.FilterSet + excludeFS filterset.FilterSet + + // for mocking + bootTime func() (uint64, error) + getProcessHandles func() (processHandles, error) +} + +// newProcessScraper creates a Process Scraper +func newProcessScraper(cfg *Config) (*scraper, error) { + scraper := &scraper{config: cfg, bootTime: host.BootTime, getProcessHandles: getProcessHandlesInternal} + + var err error + + if len(cfg.Include.Names) > 0 { + scraper.includeFS, err = filterset.CreateFilterSet(cfg.Include.Names, &cfg.Include.Config) + if err != nil { + return nil, fmt.Errorf("error creating process include filters: %w", err) + } + } + + if len(cfg.Exclude.Names) > 0 { + scraper.excludeFS, err = filterset.CreateFilterSet(cfg.Exclude.Names, &cfg.Exclude.Config) + if err != nil { + return nil, fmt.Errorf("error creating process exclude filters: %w", err) + } + } + + return scraper, nil +} + +func (s *scraper) start(context.Context, component.Host) error { + bootTime, err := s.bootTime() + if err != nil { + return err + } + + s.startTime = pdata.TimestampUnixNano(bootTime * 1e9) + return nil +} + +func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) { + rms := pdata.NewResourceMetricsSlice() + + var errs []error + + metadata, err := s.getProcessMetadata() + if err != nil { + if !consumererror.IsPartialScrapeError(err) { + return rms, err + } + + errs = append(errs, err) + } + + rms.Resize(len(metadata)) + for i, md := range metadata { + rm := rms.At(i) + md.initializeResource(rm.Resource()) + + ilms := rm.InstrumentationLibraryMetrics() + ilms.Resize(1) + metrics := ilms.At(0).Metrics() + + now := internal.TimeToUnixNano(time.Now()) + + if err = scrapeAndAppendCPUTimeMetric(metrics, s.startTime, now, md.handle); err != nil { + errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading cpu times for process %q (pid %v): %w", md.executable.name, md.pid, err), cpuMetricsLen)) + } + + if err = scrapeAndAppendMemoryUsageMetrics(metrics, now, md.handle); err != nil { + errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading memory info for process %q (pid %v): %w", md.executable.name, md.pid, err), memoryMetricsLen)) + } + + if err = scrapeAndAppendDiskIOMetric(metrics, s.startTime, now, md.handle); err != nil { + errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading disk usage for process %q (pid %v): %w", md.executable.name, md.pid, err), diskMetricsLen)) + } + } + + return rms, scraperhelper.CombineScrapeErrors(errs) +} + +// getProcessMetadata returns a slice of processMetadata, including handles, +// for all currently running processes. If errors occur obtaining information +// for some processes, an error will be returned, but any processes that were +// successfully obtained will still be returned. +func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { + handles, err := s.getProcessHandles() + if err != nil { + return nil, err + } + + var errs []error + metadata := make([]*processMetadata, 0, handles.Len()) + for i := 0; i < handles.Len(); i++ { + pid := handles.Pid(i) + handle := handles.At(i) + + executable, err := getProcessExecutable(handle) + if err != nil { + errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading process name for pid %v: %w", pid, err), 1)) + continue + } + + // filter processes by name + if (s.includeFS != nil && !s.includeFS.Matches(executable.name)) || + (s.excludeFS != nil && s.excludeFS.Matches(executable.name)) { + continue + } + + command, err := getProcessCommand(handle) + if err != nil { + errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading command for process %q (pid %v): %w", executable.name, pid, err), 0)) + } + + username, err := handle.Username() + if err != nil { + errs = append(errs, consumererror.NewPartialScrapeError(fmt.Errorf("error reading username for process %q (pid %v): %w", executable.name, pid, err), 0)) + } + + md := &processMetadata{ + pid: pid, + executable: executable, + command: command, + username: username, + handle: handle, + } + + metadata = append(metadata, md) + } + + return metadata, scraperhelper.CombineScrapeErrors(errs) +} + +func scrapeAndAppendCPUTimeMetric(metrics pdata.MetricSlice, startTime, now pdata.TimestampUnixNano, handle processHandle) error { + times, err := handle.Times() + if err != nil { + return err + } + + startIdx := metrics.Len() + metrics.Resize(startIdx + cpuMetricsLen) + initializeCPUTimeMetric(metrics.At(startIdx), startTime, now, times) + return nil +} + +func initializeCPUTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, times *cpu.TimesStat) { + cpuTimeDescriptor.CopyTo(metric) + + ddps := metric.DoubleSum().DataPoints() + ddps.Resize(cpuStatesLen) + appendCPUTimeStateDataPoints(ddps, startTime, now, times) +} + +func scrapeAndAppendMemoryUsageMetrics(metrics pdata.MetricSlice, now pdata.TimestampUnixNano, handle processHandle) error { + mem, err := handle.MemoryInfo() + if err != nil { + return err + } + + startIdx := metrics.Len() + metrics.Resize(startIdx + memoryMetricsLen) + initializeMemoryUsageMetric(metrics.At(startIdx+0), physicalMemoryUsageDescriptor, now, int64(mem.RSS)) + initializeMemoryUsageMetric(metrics.At(startIdx+1), virtualMemoryUsageDescriptor, now, int64(mem.VMS)) + return nil +} + +func initializeMemoryUsageMetric(metric pdata.Metric, descriptor pdata.Metric, now pdata.TimestampUnixNano, usage int64) { + descriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(1) + initializeMemoryUsageDataPoint(idps.At(0), now, usage) +} + +func initializeMemoryUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, usage int64) { + dataPoint.SetTimestamp(now) + dataPoint.SetValue(usage) +} + +func scrapeAndAppendDiskIOMetric(metrics pdata.MetricSlice, startTime, now pdata.TimestampUnixNano, handle processHandle) error { + io, err := handle.IOCounters() + if err != nil { + return err + } + + startIdx := metrics.Len() + metrics.Resize(startIdx + diskMetricsLen) + initializeDiskIOMetric(metrics.At(startIdx), startTime, now, io) + return nil +} + +func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, io *process.IOCountersStat) { + diskIODescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2) + initializeDiskIODataPoint(idps.At(0), startTime, now, int64(io.ReadBytes), readDirectionLabelValue) + initializeDiskIODataPoint(idps.At(1), startTime, now, int64(io.WriteBytes), writeDirectionLabelValue) +} + +func initializeDiskIODataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, value int64, directionLabel string) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(directionLabelName, directionLabel) + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go new file mode 100644 index 00000000000..62dc40cde23 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package processscraper + +import ( + "github.com/shirou/gopsutil/cpu" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +const cpuStatesLen = 3 + +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { + initializeCPUTimeDataPoint(ddps.At(0), startTime, now, cpuTime.User, userStateLabelValue) + initializeCPUTimeDataPoint(ddps.At(1), startTime, now, cpuTime.System, systemStateLabelValue) + initializeCPUTimeDataPoint(ddps.At(2), startTime, now, cpuTime.Iowait, waitStateLabelValue) +} + +func initializeCPUTimeDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, value float64, stateLabel string) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(stateLabelName, stateLabel) + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func getProcessExecutable(proc processHandle) (*executableMetadata, error) { + name, err := proc.Name() + if err != nil { + return nil, err + } + + exe, err := proc.Exe() + if err != nil { + return nil, err + } + + executable := &executableMetadata{name: name, path: exe} + return executable, nil +} + +func getProcessCommand(proc processHandle) (*commandMetadata, error) { + cmdline, err := proc.CmdlineSlice() + if err != nil { + return nil, err + } + + var cmd string + if len(cmdline) > 0 { + cmd = cmdline[0] + } + + command := &commandMetadata{command: cmd, commandLineSlice: cmdline} + return command, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go new file mode 100644 index 00000000000..b3056d8a933 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!windows + +package processscraper + +import ( + "github.com/shirou/gopsutil/cpu" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +const cpuStatesLen = 0 + +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { +} + +func getProcessExecutable(processHandle) (*executableMetadata, error) { + return nil, nil +} + +func getProcessCommand(processHandle) (*commandMetadata, error) { + return nil, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go new file mode 100644 index 00000000000..bb836907ec2 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go @@ -0,0 +1,485 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processscraper + +import ( + "context" + "errors" + "fmt" + "runtime" + "testing" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/process" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/translator/conventions" +) + +func skipTestOnUnsupportedOS(t *testing.T) { + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + t.Skipf("skipping test on %v", runtime.GOOS) + } +} + +func TestScrape(t *testing.T) { + skipTestOnUnsupportedOS(t) + + const bootTime = 100 + const expectedStartTime = 100 * 1e9 + + scraper, err := newProcessScraper(&Config{}) + scraper.bootTime = func() (uint64, error) { return bootTime, nil } + require.NoError(t, err, "Failed to create process scraper: %v", err) + err = scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize process scraper: %v", err) + + resourceMetrics, err := scraper.scrape(context.Background()) + + // may receive some partial errors as a result of attempting to: + // a) read native system processes on Windows (e.g. Registry process) + // b) read info on processes that have just terminated + // + // so validate that we have less processes that were failed to be scraped + // than processes that were successfully scraped & some valid data is + // returned + if err != nil { + require.True(t, consumererror.IsPartialScrapeError(err)) + noProcessesScraped := resourceMetrics.Len() + noProcessesErrored := err.(consumererror.PartialScrapeError).Failed + require.Lessf(t, noProcessesErrored, noProcessesScraped, "Failed to scrape metrics - more processes failed to be scraped than were successfully scraped: %v", err) + } + + require.Greater(t, resourceMetrics.Len(), 1) + assertProcessResourceAttributesExist(t, resourceMetrics) + assertCPUTimeMetricValid(t, resourceMetrics, expectedStartTime) + assertMemoryUsageMetricValid(t, physicalMemoryUsageDescriptor, resourceMetrics) + assertMemoryUsageMetricValid(t, virtualMemoryUsageDescriptor, resourceMetrics) + assertDiskIOMetricValid(t, resourceMetrics, expectedStartTime) + assertSameTimeStampForAllMetricsWithinResource(t, resourceMetrics) +} + +func assertProcessResourceAttributesExist(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice) { + for i := 0; i < resourceMetrics.Len(); i++ { + attr := resourceMetrics.At(0).Resource().Attributes() + internal.AssertContainsAttribute(t, attr, conventions.AttributeProcessID) + internal.AssertContainsAttribute(t, attr, conventions.AttributeProcessExecutableName) + internal.AssertContainsAttribute(t, attr, conventions.AttributeProcessExecutablePath) + internal.AssertContainsAttribute(t, attr, conventions.AttributeProcessCommand) + internal.AssertContainsAttribute(t, attr, conventions.AttributeProcessCommandLine) + internal.AssertContainsAttribute(t, attr, conventions.AttributeProcessOwner) + } +} + +func assertCPUTimeMetricValid(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice, startTime pdata.TimestampUnixNano) { + cpuTimeMetric := getMetric(t, cpuTimeDescriptor, resourceMetrics) + internal.AssertDescriptorEqual(t, cpuTimeDescriptor, cpuTimeMetric) + if startTime != 0 { + internal.AssertDoubleSumMetricStartTimeEquals(t, cpuTimeMetric, startTime) + } + internal.AssertDoubleSumMetricLabelHasValue(t, cpuTimeMetric, 0, stateLabelName, userStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, cpuTimeMetric, 1, stateLabelName, systemStateLabelValue) + if runtime.GOOS == "linux" { + internal.AssertDoubleSumMetricLabelHasValue(t, cpuTimeMetric, 2, stateLabelName, waitStateLabelValue) + } +} + +func assertMemoryUsageMetricValid(t *testing.T, descriptor pdata.Metric, resourceMetrics pdata.ResourceMetricsSlice) { + memoryUsageMetric := getMetric(t, descriptor, resourceMetrics) + internal.AssertDescriptorEqual(t, descriptor, memoryUsageMetric) +} + +func assertDiskIOMetricValid(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice, startTime pdata.TimestampUnixNano) { + diskIOMetric := getMetric(t, diskIODescriptor, resourceMetrics) + internal.AssertDescriptorEqual(t, diskIODescriptor, diskIOMetric) + if startTime != 0 { + internal.AssertIntSumMetricStartTimeEquals(t, diskIOMetric, startTime) + } + internal.AssertIntSumMetricLabelHasValue(t, diskIOMetric, 0, directionLabelName, readDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, diskIOMetric, 1, directionLabelName, writeDirectionLabelValue) +} + +func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice) { + for i := 0; i < resourceMetrics.Len(); i++ { + ilms := resourceMetrics.At(i).InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + internal.AssertSameTimeStampForAllMetrics(t, ilms.At(j).Metrics()) + } + } +} + +func getMetric(t *testing.T, descriptor pdata.Metric, rms pdata.ResourceMetricsSlice) pdata.Metric { + for i := 0; i < rms.Len(); i++ { + metrics := getMetricSlice(t, rms.At(i)) + for j := 0; j < metrics.Len(); j++ { + metric := metrics.At(j) + if metric.Name() == descriptor.Name() { + return metric + } + } + } + + require.Fail(t, fmt.Sprintf("no metric with name %s was returned", descriptor.Name())) + return pdata.NewMetric() +} + +func getMetricSlice(t *testing.T, rm pdata.ResourceMetrics) pdata.MetricSlice { + ilms := rm.InstrumentationLibraryMetrics() + require.Equal(t, 1, ilms.Len()) + return ilms.At(0).Metrics() +} + +func TestScrapeMetrics_NewError(t *testing.T) { + skipTestOnUnsupportedOS(t) + + _, err := newProcessScraper(&Config{Include: MatchConfig{Names: []string{"test"}}}) + require.Error(t, err) + require.Regexp(t, "^error creating process include filters:", err.Error()) + + _, err = newProcessScraper(&Config{Exclude: MatchConfig{Names: []string{"test"}}}) + require.Error(t, err) + require.Regexp(t, "^error creating process exclude filters:", err.Error()) +} + +func TestScrapeMetrics_GetProcessesError(t *testing.T) { + skipTestOnUnsupportedOS(t) + + scraper, err := newProcessScraper(&Config{}) + require.NoError(t, err, "Failed to create process scraper: %v", err) + + scraper.getProcessHandles = func() (processHandles, error) { return nil, errors.New("err1") } + + err = scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize process scraper: %v", err) + + metrics, err := scraper.scrape(context.Background()) + assert.EqualError(t, err, "err1") + assert.Equal(t, 0, metrics.Len()) + assert.False(t, consumererror.IsPartialScrapeError(err)) +} + +type processHandlesMock struct { + handles []*processHandleMock +} + +func (p *processHandlesMock) Pid(index int) int32 { + return 1 +} + +func (p *processHandlesMock) At(index int) processHandle { + return p.handles[index] +} + +func (p *processHandlesMock) Len() int { + return len(p.handles) +} + +type processHandleMock struct { + mock.Mock +} + +func (p *processHandleMock) Name() (ret string, err error) { + args := p.MethodCalled("Name") + return args.String(0), args.Error(1) +} + +func (p *processHandleMock) Exe() (string, error) { + args := p.MethodCalled("Exe") + return args.String(0), args.Error(1) +} + +func (p *processHandleMock) Username() (string, error) { + args := p.MethodCalled("Username") + return args.String(0), args.Error(1) +} + +func (p *processHandleMock) Cmdline() (string, error) { + args := p.MethodCalled("Cmdline") + return args.String(0), args.Error(1) +} + +func (p *processHandleMock) CmdlineSlice() ([]string, error) { + args := p.MethodCalled("CmdlineSlice") + return args.Get(0).([]string), args.Error(1) +} + +func (p *processHandleMock) Times() (*cpu.TimesStat, error) { + args := p.MethodCalled("Times") + return args.Get(0).(*cpu.TimesStat), args.Error(1) +} + +func (p *processHandleMock) MemoryInfo() (*process.MemoryInfoStat, error) { + args := p.MethodCalled("MemoryInfo") + return args.Get(0).(*process.MemoryInfoStat), args.Error(1) +} + +func (p *processHandleMock) IOCounters() (*process.IOCountersStat, error) { + args := p.MethodCalled("IOCounters") + return args.Get(0).(*process.IOCountersStat), args.Error(1) +} + +func newDefaultHandleMock() *processHandleMock { + handleMock := &processHandleMock{} + handleMock.On("Username").Return("username", nil) + handleMock.On("Cmdline").Return("cmdline", nil) + handleMock.On("CmdlineSlice").Return([]string{"cmdline"}, nil) + handleMock.On("Times").Return(&cpu.TimesStat{}, nil) + handleMock.On("MemoryInfo").Return(&process.MemoryInfoStat{}, nil) + handleMock.On("IOCounters").Return(&process.IOCountersStat{}, nil) + return handleMock +} + +func TestScrapeMetrics_Filtered(t *testing.T) { + skipTestOnUnsupportedOS(t) + + type testCase struct { + name string + names []string + include []string + exclude []string + expectedNames []string + } + + testCases := []testCase{ + { + name: "No Filter", + names: []string{"test1", "test2"}, + include: []string{"test*"}, + expectedNames: []string{"test1", "test2"}, + }, + { + name: "Include All", + names: []string{"test1", "test2"}, + include: []string{"test*"}, + expectedNames: []string{"test1", "test2"}, + }, + { + name: "Include One", + names: []string{"test1", "test2"}, + include: []string{"test1"}, + expectedNames: []string{"test1"}, + }, + { + name: "Exclude All", + names: []string{"test1", "test2"}, + exclude: []string{"test*"}, + expectedNames: []string{}, + }, + { + name: "Include & Exclude", + names: []string{"test1", "test2"}, + include: []string{"test*"}, + exclude: []string{"test2"}, + expectedNames: []string{"test1"}, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + config := &Config{} + + if len(test.include) > 0 { + config.Include = MatchConfig{ + Names: test.include, + Config: filterset.Config{MatchType: filterset.Regexp}, + } + } + if len(test.exclude) > 0 { + config.Exclude = MatchConfig{ + Names: test.exclude, + Config: filterset.Config{MatchType: filterset.Regexp}, + } + } + + scraper, err := newProcessScraper(config) + require.NoError(t, err, "Failed to create process scraper: %v", err) + err = scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize process scraper: %v", err) + + handles := make([]*processHandleMock, 0, len(test.names)) + for _, name := range test.names { + handleMock := newDefaultHandleMock() + handleMock.On("Name").Return(name, nil) + handleMock.On("Exe").Return(name, nil) + handles = append(handles, handleMock) + } + + scraper.getProcessHandles = func() (processHandles, error) { + return &processHandlesMock{handles: handles}, nil + } + + resourceMetrics, err := scraper.scrape(context.Background()) + require.NoError(t, err) + + assert.Equal(t, len(test.expectedNames), resourceMetrics.Len()) + for i, expectedName := range test.expectedNames { + rm := resourceMetrics.At(i) + name, _ := rm.Resource().Attributes().Get(conventions.AttributeProcessExecutableName) + assert.Equal(t, expectedName, name.StringVal()) + } + }) + } +} + +func TestScrapeMetrics_ProcessErrors(t *testing.T) { + skipTestOnUnsupportedOS(t) + + type testCase struct { + name string + osFilter string + nameError error + exeError error + usernameError error + cmdlineError error + timesError error + memoryInfoError error + ioCountersError error + expectedError string + } + + testCases := []testCase{ + { + name: "Name Error", + osFilter: "windows", + nameError: errors.New("err1"), + expectedError: `error reading process name for pid 1: err1`, + }, + { + name: "Exe Error", + exeError: errors.New("err1"), + expectedError: `error reading process name for pid 1: err1`, + }, + { + name: "Cmdline Error", + cmdlineError: errors.New("err2"), + expectedError: `error reading command for process "test" (pid 1): err2`, + }, + { + name: "Username Error", + usernameError: errors.New("err3"), + expectedError: `error reading username for process "test" (pid 1): err3`, + }, + { + name: "Times Error", + timesError: errors.New("err4"), + expectedError: `error reading cpu times for process "test" (pid 1): err4`, + }, + { + name: "Memory Info Error", + memoryInfoError: errors.New("err5"), + expectedError: `error reading memory info for process "test" (pid 1): err5`, + }, + { + name: "IO Counters Error", + ioCountersError: errors.New("err6"), + expectedError: `error reading disk usage for process "test" (pid 1): err6`, + }, + { + name: "Multiple Errors", + cmdlineError: errors.New("err2"), + usernameError: errors.New("err3"), + timesError: errors.New("err4"), + memoryInfoError: errors.New("err5"), + ioCountersError: errors.New("err6"), + expectedError: `[[error reading command for process "test" (pid 1): err2; ` + + `error reading username for process "test" (pid 1): err3]; ` + + `error reading cpu times for process "test" (pid 1): err4; ` + + `error reading memory info for process "test" (pid 1): err5; ` + + `error reading disk usage for process "test" (pid 1): err6]`, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + if test.osFilter == runtime.GOOS { + t.Skipf("skipping test %v on %v", test.name, runtime.GOOS) + } + + scraper, err := newProcessScraper(&Config{}) + require.NoError(t, err, "Failed to create process scraper: %v", err) + err = scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize process scraper: %v", err) + + username := "username" + if test.usernameError != nil { + username = "" + } + + handleMock := &processHandleMock{} + handleMock.On("Name").Return("test", test.nameError) + handleMock.On("Exe").Return("test", test.exeError) + handleMock.On("Username").Return(username, test.usernameError) + handleMock.On("Cmdline").Return("cmdline", test.cmdlineError) + handleMock.On("CmdlineSlice").Return([]string{"cmdline"}, test.cmdlineError) + handleMock.On("Times").Return(&cpu.TimesStat{}, test.timesError) + handleMock.On("MemoryInfo").Return(&process.MemoryInfoStat{}, test.memoryInfoError) + handleMock.On("IOCounters").Return(&process.IOCountersStat{}, test.ioCountersError) + + scraper.getProcessHandles = func() (processHandles, error) { + return &processHandlesMock{handles: []*processHandleMock{handleMock}}, nil + } + + resourceMetrics, err := scraper.scrape(context.Background()) + + md := pdata.NewMetrics() + resourceMetrics.MoveAndAppendTo(md.ResourceMetrics()) + expectedResourceMetricsLen, expectedMetricsLen := getExpectedLengthOfReturnedMetrics(test.nameError, test.exeError, test.timesError, test.memoryInfoError, test.ioCountersError) + assert.Equal(t, expectedResourceMetricsLen, md.ResourceMetrics().Len()) + assert.Equal(t, expectedMetricsLen, md.MetricCount()) + + assert.EqualError(t, err, test.expectedError) + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + expectedFailures := getExpectedScrapeFailures(test.nameError, test.exeError, test.timesError, test.memoryInfoError, test.ioCountersError) + assert.Equal(t, expectedFailures, err.(consumererror.PartialScrapeError).Failed) + } + }) + } +} + +func getExpectedLengthOfReturnedMetrics(nameError, exeError, timeError, memError, diskError error) (int, int) { + if nameError != nil || exeError != nil { + return 0, 0 + } + + expectedLen := 0 + if timeError == nil { + expectedLen += cpuMetricsLen + } + if memError == nil { + expectedLen += memoryMetricsLen + } + if diskError == nil { + expectedLen += diskMetricsLen + } + return 1, expectedLen +} + +func getExpectedScrapeFailures(nameError, exeError, timeError, memError, diskError error) int { + expectedResourceMetricsLen, expectedMetricsLen := getExpectedLengthOfReturnedMetrics(nameError, exeError, timeError, memError, diskError) + if expectedResourceMetricsLen == 0 { + return 1 + } + + return metricsLen - expectedMetricsLen +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go new file mode 100644 index 00000000000..45d0dfd345c --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package processscraper + +import ( + "path/filepath" + "regexp" + + "github.com/shirou/gopsutil/cpu" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +const cpuStatesLen = 2 + +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { + initializeCPUTimeDataPoint(ddps.At(0), startTime, now, cpuTime.User, userStateLabelValue) + initializeCPUTimeDataPoint(ddps.At(1), startTime, now, cpuTime.System, systemStateLabelValue) +} + +func initializeCPUTimeDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, value float64, stateLabel string) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(stateLabelName, stateLabel) + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func getProcessExecutable(proc processHandle) (*executableMetadata, error) { + exe, err := proc.Exe() + if err != nil { + return nil, err + } + + name := filepath.Base(exe) + executable := &executableMetadata{name: name, path: exe} + return executable, nil +} + +// matches the first argument before an unquoted space or slash +var cmdRegex = regexp.MustCompile(`^((?:[^"]*?"[^"]*?")*?[^"]*?)(?:[ \/]|$)`) + +func getProcessCommand(proc processHandle) (*commandMetadata, error) { + cmdline, err := proc.Cmdline() + if err != nil { + return nil, err + } + + cmd := cmdline + match := cmdRegex.FindStringSubmatch(cmdline) + if match != nil { + cmd = match[1] + } + + command := &commandMetadata{command: cmd, commandLine: cmdline} + return command, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/config.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/config.go new file mode 100644 index 00000000000..dddf062ccb9 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/config.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swapscraper + +import "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + +// Config relating to Swap Metric Scraper. +type Config struct { + internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/factory.go new file mode 100644 index 00000000000..88ce7250b6c --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/factory.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swapscraper + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +// This file implements Factory for Swap scraper. + +const ( + // The value of "type" key in configuration. + TypeStr = "swap" +) + +// Factory is the Factory for scraper. +type Factory struct { +} + +// CreateDefaultConfig creates the default configuration for the Scraper. +func (f *Factory) CreateDefaultConfig() internal.Config { + return &Config{} +} + +// CreateMetricsScraper creates a scraper based on provided config. +func (f *Factory) CreateMetricsScraper( + ctx context.Context, + _ *zap.Logger, + config internal.Config, +) (scraperhelper.MetricsScraper, error) { + cfg := config.(*Config) + s := newSwapScraper(ctx, cfg) + + ms := scraperhelper.NewMetricsScraper( + TypeStr, + s.scrape, + scraperhelper.WithStart(s.start), + ) + + return ms, nil +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/factory_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/factory_test.go new file mode 100644 index 00000000000..a3668fdcf8d --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/factory_test.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swapscraper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := &Factory{} + cfg := factory.CreateDefaultConfig() + assert.IsType(t, &Config{}, cfg) +} + +func TestCreateMetricsScraper(t *testing.T) { + factory := &Factory{} + cfg := &Config{} + + scraper, err := factory.CreateMetricsScraper(context.Background(), zap.NewNop(), cfg) + assert.NoError(t, err) + assert.NotNil(t, scraper) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/pagefile_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/pagefile_windows.go new file mode 100644 index 00000000000..e25b1d9620a --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/pagefile_windows.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package swapscraper + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modKernel32 = windows.NewLazySystemDLL("kernel32.dll") + modPsapi = windows.NewLazySystemDLL("psapi.dll") + + procGetNativeSystemInfo = modKernel32.NewProc("GetNativeSystemInfo") + procEnumPageFilesW = modPsapi.NewProc("EnumPageFilesW") +) + +type systemInfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +func getPageSize() uint64 { + var sysInfo systemInfo + procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&sysInfo))) + return uint64(sysInfo.dwPageSize) +} + +type pageFileData struct { + name string + usedPages uint64 + totalPages uint64 +} + +// system type as defined in https://docs.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-enum_page_file_information +type enumPageFileInformation struct { + cb uint32 + reserved uint32 + totalSize uint64 + totalInUse uint64 + peakUsage uint64 +} + +func getPageFileStats() ([]*pageFileData, error) { + // the following system call invokes the supplied callback function once for each page file before returning + // see https://docs.microsoft.com/en-us/windows/win32/api/psapi/nf-psapi-enumpagefilesw + var pageFiles []*pageFileData + result, _, _ := procEnumPageFilesW.Call(windows.NewCallback(pEnumPageFileCallbackW), uintptr(unsafe.Pointer(&pageFiles))) + if result == 0 { + return nil, windows.GetLastError() + } + + return pageFiles, nil +} + +// system callback as defined in https://docs.microsoft.com/en-us/windows/win32/api/psapi/nc-psapi-penum_page_file_callbackw +func pEnumPageFileCallbackW(pageFiles *[]*pageFileData, enumPageFileInfo *enumPageFileInformation, lpFilenamePtr *[syscall.MAX_LONG_PATH]uint16) *bool { + pageFileName := syscall.UTF16ToString((*lpFilenamePtr)[:]) + + pfData := &pageFileData{ + name: pageFileName, + usedPages: enumPageFileInfo.totalInUse, + totalPages: enumPageFileInfo.totalSize, + } + + *pageFiles = append(*pageFiles, pfData) + + // return true to continue enumerating page files + ret := true + return &ret +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_metadata.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_metadata.go new file mode 100644 index 00000000000..649b14c1042 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_metadata.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swapscraper + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// labels + +const ( + deviceLabelName = "device" + directionLabelName = "direction" + stateLabelName = "state" + typeLabelName = "type" +) + +// direction label values + +const ( + inDirectionLabelValue = "page_in" + outDirectionLabelValue = "page_out" +) + +// state label values + +const ( + cachedLabelValue = "cached" + freeLabelValue = "free" + usedLabelValue = "used" +) + +// type label values + +const ( + majorTypeLabelValue = "major" + minorTypeLabelValue = "minor" +) + +var swapUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.swap.usage") + metric.SetDescription("Swap (unix) or pagefile (windows) usage.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var swapPagingDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.swap.paging_ops") + metric.SetDescription("The number of paging operations.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() + +var swapPageFaultsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.SetName("system.swap.page_faults") + metric.SetDescription("The number of page faults.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +}() diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others.go new file mode 100644 index 00000000000..1de7e3afb77 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others.go @@ -0,0 +1,160 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package swapscraper + +import ( + "context" + "time" + + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/mem" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +const ( + swapUsageMetricsLen = 1 + pagingMetricsLen = 2 +) + +// scraper for Swap Metrics +type scraper struct { + config *Config + startTime pdata.TimestampUnixNano + + // for mocking + bootTime func() (uint64, error) + virtualMemory func() (*mem.VirtualMemoryStat, error) + swapMemory func() (*mem.SwapMemoryStat, error) +} + +// newSwapScraper creates a Swap Scraper +func newSwapScraper(_ context.Context, cfg *Config) *scraper { + return &scraper{config: cfg, bootTime: host.BootTime, virtualMemory: mem.VirtualMemory, swapMemory: mem.SwapMemory} +} + +func (s *scraper) start(context.Context, component.Host) error { + bootTime, err := s.bootTime() + if err != nil { + return err + } + + s.startTime = pdata.TimestampUnixNano(bootTime * 1e9) + return nil +} + +func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + var errors []error + + err := s.scrapeAndAppendSwapUsageMetric(metrics) + if err != nil { + errors = append(errors, err) + } + + err = s.scrapeAndAppendPagingMetrics(metrics) + if err != nil { + errors = append(errors, err) + } + + return metrics, scraperhelper.CombineScrapeErrors(errors) +} + +func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics pdata.MetricSlice) error { + now := internal.TimeToUnixNano(time.Now()) + vmem, err := s.virtualMemory() + if err != nil { + return consumererror.NewPartialScrapeError(err, swapUsageMetricsLen) + } + + idx := metrics.Len() + metrics.Resize(idx + swapUsageMetricsLen) + initializeSwapUsageMetric(metrics.At(idx), now, vmem) + return nil +} + +func initializeSwapUsageMetric(metric pdata.Metric, now pdata.TimestampUnixNano, vmem *mem.VirtualMemoryStat) { + swapUsageDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(3) + initializeSwapUsageDataPoint(idps.At(0), now, usedLabelValue, int64(vmem.SwapTotal-vmem.SwapFree-vmem.SwapCached)) + initializeSwapUsageDataPoint(idps.At(1), now, freeLabelValue, int64(vmem.SwapFree)) + initializeSwapUsageDataPoint(idps.At(2), now, cachedLabelValue, int64(vmem.SwapCached)) +} + +func initializeSwapUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(stateLabelName, stateLabel) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func (s *scraper) scrapeAndAppendPagingMetrics(metrics pdata.MetricSlice) error { + now := internal.TimeToUnixNano(time.Now()) + swap, err := s.swapMemory() + if err != nil { + return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + } + + idx := metrics.Len() + metrics.Resize(idx + pagingMetricsLen) + initializePagingMetric(metrics.At(idx+0), s.startTime, now, swap) + initializePageFaultsMetric(metrics.At(idx+1), s.startTime, now, swap) + return nil +} + +func initializePagingMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, swap *mem.SwapMemoryStat) { + swapPagingDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(4) + initializePagingDataPoint(idps.At(0), startTime, now, majorTypeLabelValue, inDirectionLabelValue, int64(swap.Sin)) + initializePagingDataPoint(idps.At(1), startTime, now, majorTypeLabelValue, outDirectionLabelValue, int64(swap.Sout)) + initializePagingDataPoint(idps.At(2), startTime, now, minorTypeLabelValue, inDirectionLabelValue, int64(swap.PgIn)) + initializePagingDataPoint(idps.At(3), startTime, now, minorTypeLabelValue, outDirectionLabelValue, int64(swap.PgOut)) +} + +func initializePagingDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, typeLabel string, directionLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(typeLabelName, typeLabel) + labelsMap.Insert(directionLabelName, directionLabel) + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func initializePageFaultsMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, swap *mem.SwapMemoryStat) { + swapPageFaultsDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(1) + initializePageFaultDataPoint(idps.At(0), startTime, now, minorTypeLabelValue, int64(swap.PgFault)) + // TODO add swap.PgMajFault once available in gopsutil +} + +func initializePageFaultDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, typeLabel string, value int64) { + dataPoint.LabelsMap().Insert(typeLabelName, typeLabel) + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others_test.go new file mode 100644 index 00000000000..b4b908feca7 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others_test.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package swapscraper + +import ( + "context" + "errors" + "testing" + + "github.com/shirou/gopsutil/mem" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +func TestScrape_Errors(t *testing.T) { + type testCase struct { + name string + virtualMemoryFunc func() (*mem.VirtualMemoryStat, error) + swapMemoryFunc func() (*mem.SwapMemoryStat, error) + expectedError string + expectedErrCount int + } + + testCases := []testCase{ + { + name: "virtualMemoryError", + virtualMemoryFunc: func() (*mem.VirtualMemoryStat, error) { return nil, errors.New("err1") }, + expectedError: "err1", + expectedErrCount: swapUsageMetricsLen, + }, + { + name: "swapMemoryError", + swapMemoryFunc: func() (*mem.SwapMemoryStat, error) { return nil, errors.New("err2") }, + expectedError: "err2", + expectedErrCount: pagingMetricsLen, + }, + { + name: "multipleErrors", + virtualMemoryFunc: func() (*mem.VirtualMemoryStat, error) { return nil, errors.New("err1") }, + swapMemoryFunc: func() (*mem.SwapMemoryStat, error) { return nil, errors.New("err2") }, + expectedError: "[err1; err2]", + expectedErrCount: swapUsageMetricsLen + pagingMetricsLen, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper := newSwapScraper(context.Background(), &Config{}) + if test.virtualMemoryFunc != nil { + scraper.virtualMemory = test.virtualMemoryFunc + } + if test.swapMemoryFunc != nil { + scraper.swapMemory = test.swapMemoryFunc + } + + err := scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize swap scraper: %v", err) + + _, err = scraper.scrape(context.Background()) + assert.EqualError(t, err, test.expectedError) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, test.expectedErrCount, err.(consumererror.PartialScrapeError).Failed) + } + }) + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_test.go new file mode 100644 index 00000000000..4331fedc285 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_test.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swapscraper + +import ( + "context" + "errors" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" +) + +func TestScrape(t *testing.T) { + type testCase struct { + name string + bootTimeFunc func() (uint64, error) + expectedStartTime pdata.TimestampUnixNano + initializationErr string + } + + testCases := []testCase{ + { + name: "Standard", + }, + { + name: "Validate Start Time", + bootTimeFunc: func() (uint64, error) { return 100, nil }, + expectedStartTime: 100 * 1e9, + }, + { + name: "Boot Time Error", + bootTimeFunc: func() (uint64, error) { return 0, errors.New("err1") }, + initializationErr: "err1", + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper := newSwapScraper(context.Background(), &Config{}) + if test.bootTimeFunc != nil { + scraper.bootTime = test.bootTimeFunc + } + + err := scraper.start(context.Background(), componenttest.NewNopHost()) + if test.initializationErr != "" { + assert.EqualError(t, err, test.initializationErr) + return + } + require.NoError(t, err, "Failed to initialize swap scraper: %v", err) + + metrics, err := scraper.scrape(context.Background()) + require.NoError(t, err) + + // expect 3 metrics (windows does not currently support page_faults metric) + expectedMetrics := 3 + if runtime.GOOS == "windows" { + expectedMetrics = 2 + } + assert.Equal(t, expectedMetrics, metrics.Len()) + + assertSwapUsageMetricValid(t, metrics.At(0)) + internal.AssertSameTimeStampForMetrics(t, metrics, 0, 1) + + assertPagingMetricValid(t, metrics.At(1), test.expectedStartTime) + if runtime.GOOS != "windows" { + assertPageFaultsMetricValid(t, metrics.At(2), test.expectedStartTime) + } + internal.AssertSameTimeStampForMetrics(t, metrics, 1, metrics.Len()) + }) + } +} + +func assertSwapUsageMetricValid(t *testing.T, hostSwapUsageMetric pdata.Metric) { + internal.AssertDescriptorEqual(t, swapUsageDescriptor, hostSwapUsageMetric) + + // it's valid for a system to have no swap space / paging file, so if no data points were returned, do no validation + if hostSwapUsageMetric.IntSum().DataPoints().Len() == 0 { + return + } + + // expect at least used, free & cached datapoint + expectedDataPoints := 3 + // windows does not return a cached datapoint + if runtime.GOOS == "windows" { + expectedDataPoints = 2 + } + + assert.GreaterOrEqual(t, hostSwapUsageMetric.IntSum().DataPoints().Len(), expectedDataPoints) + internal.AssertIntSumMetricLabelHasValue(t, hostSwapUsageMetric, 0, stateLabelName, usedLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, hostSwapUsageMetric, 1, stateLabelName, freeLabelValue) + // on non-windows, also expect a cached state label + if runtime.GOOS != "windows" { + internal.AssertIntSumMetricLabelHasValue(t, hostSwapUsageMetric, 2, stateLabelName, cachedLabelValue) + } + // on windows, also expect the page file device name label + if runtime.GOOS == "windows" { + internal.AssertIntSumMetricLabelExists(t, hostSwapUsageMetric, 0, deviceLabelName) + internal.AssertIntSumMetricLabelExists(t, hostSwapUsageMetric, 1, deviceLabelName) + } +} + +func assertPagingMetricValid(t *testing.T, pagingMetric pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, swapPagingDescriptor, pagingMetric) + if startTime != 0 { + internal.AssertIntSumMetricStartTimeEquals(t, pagingMetric, startTime) + } + + // expect an in & out datapoint, for both major and minor paging types (windows does not currently support minor paging data) + expectedDataPoints := 4 + if runtime.GOOS == "windows" { + expectedDataPoints = 2 + } + assert.Equal(t, expectedDataPoints, pagingMetric.IntSum().DataPoints().Len()) + + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 0, typeLabelName, majorTypeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 0, directionLabelName, inDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 1, typeLabelName, majorTypeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 1, directionLabelName, outDirectionLabelValue) + if runtime.GOOS != "windows" { + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 2, typeLabelName, minorTypeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 2, directionLabelName, inDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 3, typeLabelName, minorTypeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 3, directionLabelName, outDirectionLabelValue) + } +} + +func assertPageFaultsMetricValid(t *testing.T, pageFaultsMetric pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, swapPageFaultsDescriptor, pageFaultsMetric) + if startTime != 0 { + internal.AssertIntSumMetricStartTimeEquals(t, pageFaultsMetric, startTime) + } + + assert.Equal(t, 1, pageFaultsMetric.IntSum().DataPoints().Len()) + internal.AssertIntSumMetricLabelHasValue(t, pageFaultsMetric, 0, typeLabelName, minorTypeLabelValue) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows.go new file mode 100644 index 00000000000..a48baae482e --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package swapscraper + +import ( + "context" + "sync" + "time" + + "github.com/shirou/gopsutil/host" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +const ( + swapUsageMetricsLen = 1 + pagingMetricsLen = 1 + + memory = "Memory" + + pageReadsPerSec = "Page Reads/sec" + pageWritesPerSec = "Page Writes/sec" +) + +// scraper for Swap Metrics +type scraper struct { + config *Config + startTime pdata.TimestampUnixNano + + pageSize uint64 + + perfCounterScraper perfcounters.PerfCounterScraper + + // for mocking + bootTime func() (uint64, error) + pageFileStats func() ([]*pageFileData, error) +} + +var ( + once sync.Once + pageSize uint64 +) + +// newSwapScraper creates a Swap Scraper +func newSwapScraper(_ context.Context, cfg *Config) *scraper { + once.Do(func() { pageSize = getPageSize() }) + + return &scraper{config: cfg, pageSize: pageSize, perfCounterScraper: &perfcounters.PerfLibScraper{}, bootTime: host.BootTime, pageFileStats: getPageFileStats} +} + +func (s *scraper) start(context.Context, component.Host) error { + bootTime, err := s.bootTime() + if err != nil { + return err + } + + s.startTime = pdata.TimestampUnixNano(bootTime * 1e9) + + return s.perfCounterScraper.Initialize(memory) +} + +func (s *scraper) scrape(context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() + + var errors []error + + err := s.scrapeAndAppendSwapUsageMetric(metrics) + if err != nil { + errors = append(errors, err) + } + + err = s.scrapeAndAppendPagingMetric(metrics) + if err != nil { + errors = append(errors, err) + } + + return metrics, scraperhelper.CombineScrapeErrors(errors) +} + +func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics pdata.MetricSlice) error { + now := internal.TimeToUnixNano(time.Now()) + pageFiles, err := s.pageFileStats() + if err != nil { + return consumererror.NewPartialScrapeError(err, swapUsageMetricsLen) + } + + idx := metrics.Len() + metrics.Resize(idx + swapUsageMetricsLen) + s.initializeSwapUsageMetric(metrics.At(idx), now, pageFiles) + return nil +} + +func (s *scraper) initializeSwapUsageMetric(metric pdata.Metric, now pdata.TimestampUnixNano, pageFiles []*pageFileData) { + swapUsageDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2 * len(pageFiles)) + + idx := 0 + for _, pageFile := range pageFiles { + initializeSwapUsageDataPoint(idps.At(idx+0), now, pageFile.name, usedLabelValue, int64(pageFile.usedPages*s.pageSize)) + initializeSwapUsageDataPoint(idps.At(idx+1), now, pageFile.name, freeLabelValue, int64((pageFile.totalPages-pageFile.usedPages)*s.pageSize)) + idx += 2 + } +} + +func initializeSwapUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, deviceLabel string, stateLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(deviceLabelName, deviceLabel) + labelsMap.Insert(stateLabelName, stateLabel) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} + +func (s *scraper) scrapeAndAppendPagingMetric(metrics pdata.MetricSlice) error { + now := internal.TimeToUnixNano(time.Now()) + + counters, err := s.perfCounterScraper.Scrape() + if err != nil { + return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + } + + memoryObject, err := counters.GetObject(memory) + if err != nil { + return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + } + + memoryCounterValues, err := memoryObject.GetValues(pageReadsPerSec, pageWritesPerSec) + if err != nil { + return consumererror.NewPartialScrapeError(err, pagingMetricsLen) + } + + if len(memoryCounterValues) > 0 { + idx := metrics.Len() + metrics.Resize(idx + pagingMetricsLen) + initializePagingMetric(metrics.At(idx), s.startTime, now, memoryCounterValues[0]) + } + + return nil +} + +func initializePagingMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, memoryCounterValues *perfcounters.CounterValues) { + swapPagingDescriptor.CopyTo(metric) + + idps := metric.IntSum().DataPoints() + idps.Resize(2) + initializePagingDataPoint(idps.At(0), startTime, now, inDirectionLabelValue, memoryCounterValues.Values[pageReadsPerSec]) + initializePagingDataPoint(idps.At(1), startTime, now, outDirectionLabelValue, memoryCounterValues.Values[pageWritesPerSec]) +} + +func initializePagingDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, directionLabel string, value int64) { + labelsMap := dataPoint.LabelsMap() + labelsMap.Insert(typeLabelName, majorTypeLabelValue) + labelsMap.Insert(directionLabelName, directionLabel) + dataPoint.SetStartTime(startTime) + dataPoint.SetTimestamp(now) + dataPoint.SetValue(value) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows_test.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows_test.go new file mode 100644 index 00000000000..08ddbbcc177 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows_test.go @@ -0,0 +1,127 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package swapscraper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" +) + +func TestScrape_Errors(t *testing.T) { + type testCase struct { + name string + pageSize uint64 + getPageFileStats func() ([]*pageFileData, error) + scrapeErr error + getObjectErr error + getValuesErr error + expectedErr string + expectedErrCount int + expectedUsedValue int64 + expectedFreeValue int64 + } + + testPageSize := uint64(4096) + testPageFileData := &pageFileData{usedPages: 100, totalPages: 300} + + testCases := []testCase{ + { + name: "standard", + pageSize: testPageSize, + getPageFileStats: func() ([]*pageFileData, error) { + return []*pageFileData{testPageFileData}, nil + }, + expectedUsedValue: int64(testPageFileData.usedPages * testPageSize), + expectedFreeValue: int64((testPageFileData.totalPages - testPageFileData.usedPages) * testPageSize), + }, + { + name: "pageFileError", + getPageFileStats: func() ([]*pageFileData, error) { return nil, errors.New("err1") }, + expectedErr: "err1", + expectedErrCount: swapUsageMetricsLen, + }, + { + name: "scrapeError", + scrapeErr: errors.New("err1"), + expectedErr: "err1", + expectedErrCount: pagingMetricsLen, + }, + { + name: "getObjectErr", + getObjectErr: errors.New("err1"), + expectedErr: "err1", + expectedErrCount: pagingMetricsLen, + }, + { + name: "getValuesErr", + getValuesErr: errors.New("err1"), + expectedErr: "err1", + expectedErrCount: pagingMetricsLen, + }, + { + name: "multipleErrors", + getPageFileStats: func() ([]*pageFileData, error) { return nil, errors.New("err1") }, + getObjectErr: errors.New("err2"), + expectedErr: "[err1; err2]", + expectedErrCount: swapUsageMetricsLen + pagingMetricsLen, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + scraper := newSwapScraper(context.Background(), &Config{}) + if test.getPageFileStats != nil { + scraper.pageFileStats = test.getPageFileStats + } + if test.pageSize > 0 { + scraper.pageSize = test.pageSize + } else { + assert.Greater(t, pageSize, uint64(0)) + assert.Zero(t, pageSize%4096) // page size on Windows should always be a multiple of 4KB + } + scraper.perfCounterScraper = perfcounters.NewMockPerfCounterScraperError(test.scrapeErr, test.getObjectErr, test.getValuesErr) + + err := scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize swap scraper: %v", err) + + metrics, err := scraper.scrape(context.Background()) + if test.expectedErr != "" { + assert.EqualError(t, err, test.expectedErr) + + isPartial := consumererror.IsPartialScrapeError(err) + assert.True(t, isPartial) + if isPartial { + assert.Equal(t, test.expectedErrCount, err.(consumererror.PartialScrapeError).Failed) + } + + return + } + + swapUsageMetric := metrics.At(0) + assert.Equal(t, test.expectedUsedValue, swapUsageMetric.IntSum().DataPoints().At(0).Value()) + assert.Equal(t, test.expectedFreeValue, swapUsageMetric.IntSum().DataPoints().At(1).Value()) + }) + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/testutils.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/testutils.go new file mode 100644 index 00000000000..ccab4cca48f --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/testutils.go @@ -0,0 +1,110 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func AssertContainsAttribute(t *testing.T, attr pdata.AttributeMap, key string) { + _, ok := attr.Get(key) + assert.True(t, ok) +} + +func AssertDescriptorEqual(t *testing.T, expected pdata.Metric, actual pdata.Metric) { + assert.Equal(t, expected.Name(), actual.Name()) + assert.Equal(t, expected.Description(), actual.Description()) + assert.Equal(t, expected.Unit(), actual.Unit()) + assert.Equal(t, expected.DataType(), actual.DataType()) +} + +func AssertIntSumMetricLabelHasValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal string) { + val, ok := metric.IntSum().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) + assert.Equal(t, expectedVal, val) +} + +func AssertIntGaugeMetricLabelHasValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal string) { + val, ok := metric.IntGauge().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) + assert.Equal(t, expectedVal, val) +} + +func AssertDoubleSumMetricLabelHasValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal string) { + val, ok := metric.DoubleSum().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) + assert.Equal(t, expectedVal, val) +} + +func AssertIntSumMetricLabelExists(t *testing.T, metric pdata.Metric, index int, labelName string) { + _, ok := metric.IntSum().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) +} + +func AssertDoubleSumMetricLabelExists(t *testing.T, metric pdata.Metric, index int, labelName string) { + _, ok := metric.DoubleSum().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) +} + +func AssertIntSumMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.TimestampUnixNano) { + idps := metric.IntSum().DataPoints() + for i := 0; i < idps.Len(); i++ { + require.Equal(t, startTime, idps.At(i).StartTime()) + } +} + +func AssertDoubleSumMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.TimestampUnixNano) { + ddps := metric.DoubleSum().DataPoints() + for i := 0; i < ddps.Len(); i++ { + require.Equal(t, startTime, ddps.At(i).StartTime()) + } +} + +func AssertSameTimeStampForAllMetrics(t *testing.T, metrics pdata.MetricSlice) { + AssertSameTimeStampForMetrics(t, metrics, 0, metrics.Len()) +} + +func AssertSameTimeStampForMetrics(t *testing.T, metrics pdata.MetricSlice, startIdx, endIdx int) { + var ts pdata.TimestampUnixNano + for i := startIdx; i < endIdx; i++ { + metric := metrics.At(i) + + dt := metric.DataType() + if dt == pdata.MetricDataTypeIntSum { + idps := metric.IntSum().DataPoints() + for j := 0; j < idps.Len(); j++ { + if ts == 0 { + ts = idps.At(j).Timestamp() + } + require.Equalf(t, ts, idps.At(j).Timestamp(), "metrics contained different end timestamp values") + } + } + + if dt == pdata.MetricDataTypeDoubleSum { + ddps := metric.DoubleSum().DataPoints() + for j := 0; j < ddps.Len(); j++ { + if ts == 0 { + ts = ddps.At(j).Timestamp() + } + require.Equalf(t, ts, ddps.At(j).Timestamp(), "metrics contained different end timestamp values") + } + } + } +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/utils.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/utils.go new file mode 100644 index 00000000000..a4d9cd29d1f --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/utils.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TimeToUnixNano(t time.Time) pdata.TimestampUnixNano { + return pdata.TimestampUnixNano(uint64(t.UnixNano())) +} diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/metadata.yaml b/internal/otel_collector/receiver/hostmetricsreceiver/metadata.yaml new file mode 100644 index 00000000000..1233137be8a --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/metadata.yaml @@ -0,0 +1,34 @@ +name: hostmetricsreceiver + +labels: + cpu: + description: CPU number starting at 0. + + cpu.state: + value: state + description: Breakdown of CPU usage by type. + enum: [idle, interrupt, nice, softirq, steal, system, user, wait] + + mem.state: + value: state + description: Breakdown of memory usage by type. + enum: [buffered, cached, inactive, free, slab_reclaimable, slab_unreclaimable, used] + +metrics: + system.cpu.time: + description: Total CPU seconds broken down by different states. + unit: s + data: + type: double sum + aggregation: cumulative + monotonic: true + labels: [cpu.state] + + system.memory.usage: + description: Bytes of memory in use. + unit: By + labels: [mem.state] + data: + type: int sum + aggregation: cumulative + monotonic: false diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config-invalidscraperkey.yaml b/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config-invalidscraperkey.yaml new file mode 100644 index 00000000000..6640f1d1e26 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config-invalidscraperkey.yaml @@ -0,0 +1,18 @@ +receivers: + hostmetrics: + scrapers: + invalidscraperkey: + + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + metrics: + receivers: [hostmetrics] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config-noscrapers.yaml b/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config-noscrapers.yaml new file mode 100644 index 00000000000..94e60d478a4 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config-noscrapers.yaml @@ -0,0 +1,15 @@ +receivers: + hostmetrics: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + metrics: + receivers: [hostmetrics] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config.yaml b/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config.yaml new file mode 100644 index 00000000000..9e9f2b0e15c --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/testdata/config.yaml @@ -0,0 +1,35 @@ +receivers: + hostmetrics: + scrapers: + cpu: + hostmetrics/customname: + collection_interval: 30s + scrapers: + cpu: + disk: + load: + filesystem: + memory: + network: + include: + interfaces: ["test1"] + match_type: "strict" + processes: + swap: + process: + include: + names: ["test2", "test3"] + match_type: "regexp" + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + metrics: + receivers: [hostmetrics] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/jaegerreceiver/README.md b/internal/otel_collector/receiver/jaegerreceiver/README.md new file mode 100644 index 00000000000..782716ee8c9 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/README.md @@ -0,0 +1,96 @@ +# Jaeger Receiver + +Receives trace data in [Jaeger](https://www.jaegertracing.io/) format. + +Supported pipeline types: traces + +## Getting Started + +By default, the Jaeger receiver will not serve any protocol. A protocol must be +named under the `protocols` object for the jaeger receiver to start. The +below protocols are supported, each supports an optional `endpoint` +object configuration parameter. + +- `grpc` (default `endpoint` = 0.0.0.0:14250) +- `thrift_binary` (default `endpoint` = 0.0.0.0:6832) +- `thrift_compact` (default `endpoint` = 0.0.0.0:6831) +- `thrift_http` (default `endpoint` = 0.0.0.0:14268) + +Examples: + +```yaml +receivers: + jaeger: + protocols: + grpc: + jaeger/withendpoint: + protocols: + grpc: + endpoint: 0.0.0.0:14260 +``` + +## Advanced Configuration + +UDP protocols (currently `thrift_binary` and `thrift_compact`) allow setting additional +server options: + +- `queue_size` (default 1000) sets max not yet handled requests to server +- `max_packet_size` (default 65_000) sets max UDP packet size +- `workers` (default 10) sets number of workers consuming the server queue +- `socket_buffer_size` (default 0 - no buffer) sets buffer size of connection socket in bytes + +Examples: + +```yaml +protocols: + thrift_binary: + endpoint: 0.0.0.0:6832 + queue_size: 5_000 + max_packet_size: 131_072 + workers: 50 + socket_buffer_size: 8_388_608 +``` + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configgrpc/README.md) including CORS +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) + +## Remote Sampling + +The Jaeger receiver also supports fetching sampling configuration from a remote +collector. It works by proxying client requests for remote sampling +configuration to the configured collector. + + +------------+ +-----------+ +---------------+ + | | get | | proxy | | + | client +--- sampling ---->+ agent +------------->+ collector | + | | strategy | | | | + +------------+ +-----------+ +---------------+ + +Remote sample proxying can be enabled by specifying the following lines in the +jaeger receiver config: + +```yaml +receivers: + jaeger: + protocols: + grpc: + remote_sampling: + fetch_endpoint: "jaeger-collector:1234" +``` + +Remote sampling can also be directly served by the collector by providing a +sampling json file: + +```yaml +receivers: + jaeger: + protocols: + grpc: + remote_sampling: + strategy_file: "/etc/strategy.json" +``` + +Note: the `grpc` protocol must be enabled for this to work as Jaeger serves its +remote sampling strategies over gRPC. diff --git a/internal/otel_collector/receiver/jaegerreceiver/config.go b/internal/otel_collector/receiver/jaegerreceiver/config.go new file mode 100644 index 00000000000..8df7bee3fe8 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/config.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerreceiver + +import ( + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" +) + +const ( + // The config field name to load the protocol map from + protocolsFieldName = "protocols" + + // Default UDP server options + defaultQueueSize = 1_000 + defaultMaxPacketSize = 65_000 + defaultServerWorkers = 10 + defaultSocketBufferSize = 0 +) + +// RemoteSamplingConfig defines config key for remote sampling fetch endpoint +type RemoteSamplingConfig struct { + HostEndpoint string `mapstructure:"host_endpoint"` + StrategyFile string `mapstructure:"strategy_file"` + configgrpc.GRPCClientSettings `mapstructure:",squash"` +} + +type Protocols struct { + GRPC *configgrpc.GRPCServerSettings `mapstructure:"grpc"` + ThriftHTTP *confighttp.HTTPServerSettings `mapstructure:"thrift_http"` + ThriftBinary *ProtocolUDP `mapstructure:"thrift_binary"` + ThriftCompact *ProtocolUDP `mapstructure:"thrift_compact"` +} + +type ProtocolUDP struct { + Endpoint string `mapstructure:"endpoint"` + ServerConfigUDP `mapstructure:",squash"` +} + +type ServerConfigUDP struct { + QueueSize int `mapstructure:"queue_size"` + MaxPacketSize int `mapstructure:"max_packet_size"` + Workers int `mapstructure:"workers"` + SocketBufferSize int `mapstructure:"socket_buffer_size"` +} + +func DefaultServerConfigUDP() ServerConfigUDP { + return ServerConfigUDP{ + QueueSize: defaultQueueSize, + MaxPacketSize: defaultMaxPacketSize, + Workers: defaultServerWorkers, + SocketBufferSize: defaultSocketBufferSize, + } +} + +// Config defines configuration for Jaeger receiver. +type Config struct { + configmodels.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + Protocols `mapstructure:"protocols"` + RemoteSampling *RemoteSamplingConfig `mapstructure:"remote_sampling"` +} diff --git a/internal/otel_collector/receiver/jaegerreceiver/config_test.go b/internal/otel_collector/receiver/jaegerreceiver/config_test.go new file mode 100644 index 00000000000..50dbd717098 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/config_test.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerreceiver + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, len(cfg.Receivers), 4) + + r1 := cfg.Receivers["jaeger/customname"].(*Config) + assert.Equal(t, r1, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "jaeger/customname", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "localhost:9876", + Transport: "tcp", + }, + }, + ThriftHTTP: &confighttp.HTTPServerSettings{ + Endpoint: ":3456", + }, + ThriftCompact: &ProtocolUDP{ + Endpoint: "0.0.0.0:456", + ServerConfigUDP: ServerConfigUDP{ + QueueSize: 100_000, + MaxPacketSize: 131_072, + Workers: 100, + SocketBufferSize: 65_536, + }, + }, + ThriftBinary: &ProtocolUDP{ + Endpoint: "0.0.0.0:789", + ServerConfigUDP: ServerConfigUDP{ + QueueSize: 1_000, + MaxPacketSize: 65_536, + Workers: 5, + SocketBufferSize: 0, + }, + }, + }, + RemoteSampling: &RemoteSamplingConfig{ + HostEndpoint: "0.0.0.0:5778", + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: "jaeger-collector:1234", + }, + StrategyFile: "/etc/strategies.json", + }, + }) + + rDefaults := cfg.Receivers["jaeger/defaults"].(*Config) + assert.Equal(t, rDefaults, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "jaeger/defaults", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: defaultGRPCBindEndpoint, + Transport: "tcp", + }, + }, + ThriftHTTP: &confighttp.HTTPServerSettings{ + Endpoint: defaultHTTPBindEndpoint, + }, + ThriftCompact: &ProtocolUDP{ + Endpoint: defaultThriftCompactBindEndpoint, + ServerConfigUDP: DefaultServerConfigUDP(), + }, + ThriftBinary: &ProtocolUDP{ + Endpoint: defaultThriftBinaryBindEndpoint, + ServerConfigUDP: DefaultServerConfigUDP(), + }, + }, + }) + + rMixed := cfg.Receivers["jaeger/mixed"].(*Config) + assert.Equal(t, rMixed, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "jaeger/mixed", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "localhost:9876", + Transport: "tcp", + }, + }, + ThriftCompact: &ProtocolUDP{ + Endpoint: defaultThriftCompactBindEndpoint, + ServerConfigUDP: DefaultServerConfigUDP(), + }, + }, + }) + + tlsConfig := cfg.Receivers["jaeger/tls"].(*Config) + + assert.Equal(t, tlsConfig, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "jaeger/tls", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "localhost:9876", + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "/test.crt", + KeyFile: "/test.key", + }, + }, + }, + ThriftHTTP: &confighttp.HTTPServerSettings{ + Endpoint: ":3456", + }, + }, + }) +} + +func TestFailedLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "bad_typo_default_proto_config.yaml"), factories) + assert.EqualError(t, err, "error reading receivers configuration for jaeger: unknown protocols in the Jaeger receiver") + + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "bad_proto_config.yaml"), factories) + assert.EqualError(t, err, "error reading receivers configuration for jaeger: 1 error(s) decoding:\n\n* 'protocols' has invalid keys: thrift_htttp") + + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "bad_no_proto_config.yaml"), factories) + assert.EqualError(t, err, "error reading receivers configuration for jaeger: must specify at least one protocol when using the Jaeger receiver") + + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "bad_empty_config.yaml"), factories) + assert.EqualError(t, err, "error reading receivers configuration for jaeger: empty config for Jaeger receiver") +} diff --git a/internal/otel_collector/receiver/jaegerreceiver/errors.go b/internal/otel_collector/receiver/jaegerreceiver/errors.go new file mode 100644 index 00000000000..41d41846b5a --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/errors.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerreceiver + +type httpError struct { + msg string + statusCode int +} + +func (h httpError) Error() string { + return h.msg +} diff --git a/internal/otel_collector/receiver/jaegerreceiver/factory.go b/internal/otel_collector/receiver/jaegerreceiver/factory.go new file mode 100644 index 00000000000..162e2f536f5 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/factory.go @@ -0,0 +1,255 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerreceiver + +// This file implements factory for Jaeger receiver. + +import ( + "context" + "fmt" + "net" + "strconv" + + "github.com/spf13/viper" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "jaeger" + + // Protocol values. + protoGRPC = "grpc" + protoThriftHTTP = "thrift_http" + protoThriftBinary = "thrift_binary" + protoThriftCompact = "thrift_compact" + + // Default endpoints to bind to. + defaultGRPCBindEndpoint = "0.0.0.0:14250" + defaultHTTPBindEndpoint = "0.0.0.0:14268" + defaultThriftCompactBindEndpoint = "0.0.0.0:6831" + defaultThriftBinaryBindEndpoint = "0.0.0.0:6832" + defaultAgentRemoteSamplingHTTPPort = 5778 +) + +func NewFactory() component.ReceiverFactory { + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithTraces(createTraceReceiver), + receiverhelper.WithCustomUnmarshaler(customUnmarshaler)) +} + +// customUnmarshaler is used to add defaults for named but empty protocols +func customUnmarshaler(componentViperSection *viper.Viper, intoCfg interface{}) error { + if componentViperSection == nil || len(componentViperSection.AllKeys()) == 0 { + return fmt.Errorf("empty config for Jaeger receiver") + } + + componentViperSection.SetConfigType("yaml") + + // UnmarshalExact will not set struct properties to nil even if no key is provided, + // so set the protocol structs to nil where the keys were omitted. + err := componentViperSection.UnmarshalExact(intoCfg) + if err != nil { + return err + } + + receiverCfg := intoCfg.(*Config) + + protocols := componentViperSection.GetStringMap(protocolsFieldName) + if len(protocols) == 0 { + return fmt.Errorf("must specify at least one protocol when using the Jaeger receiver") + } + + knownProtocols := 0 + if _, ok := protocols[protoGRPC]; !ok { + receiverCfg.GRPC = nil + } else { + knownProtocols++ + } + if _, ok := protocols[protoThriftHTTP]; !ok { + receiverCfg.ThriftHTTP = nil + } else { + knownProtocols++ + } + if _, ok := protocols[protoThriftBinary]; !ok { + receiverCfg.ThriftBinary = nil + } else { + knownProtocols++ + } + if _, ok := protocols[protoThriftCompact]; !ok { + receiverCfg.ThriftCompact = nil + } else { + knownProtocols++ + } + // UnmarshalExact will ignore empty entries like a protocol with no values, so if a typo happened + // in the protocol that is intended to be enabled will not be enabled. So check if the protocols + // include only known protocols. + if len(protocols) != knownProtocols { + return fmt.Errorf("unknown protocols in the Jaeger receiver") + } + return nil +} + +// CreateDefaultConfig creates the default configuration for Jaeger receiver. +func createDefaultConfig() configmodels.Receiver { + return &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: defaultGRPCBindEndpoint, + Transport: "tcp", + }, + }, + ThriftHTTP: &confighttp.HTTPServerSettings{ + Endpoint: defaultHTTPBindEndpoint, + }, + ThriftBinary: &ProtocolUDP{ + Endpoint: defaultThriftBinaryBindEndpoint, + ServerConfigUDP: DefaultServerConfigUDP(), + }, + ThriftCompact: &ProtocolUDP{ + Endpoint: defaultThriftCompactBindEndpoint, + ServerConfigUDP: DefaultServerConfigUDP(), + }, + }, + } +} + +// createTraceReceiver creates a trace receiver based on provided config. +func createTraceReceiver( + _ context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.TracesConsumer, +) (component.TracesReceiver, error) { + + // Convert settings in the source config to configuration struct + // that Jaeger receiver understands. + + rCfg := cfg.(*Config) + remoteSamplingConfig := rCfg.RemoteSampling + + var config configuration + // Set ports + if rCfg.Protocols.GRPC != nil { + var err error + config.CollectorGRPCPort, err = extractPortFromEndpoint(rCfg.Protocols.GRPC.NetAddr.Endpoint) + if err != nil { + return nil, fmt.Errorf("unable to extract port for GRPC: %w", err) + } + + config.CollectorGRPCOptions, err = rCfg.Protocols.GRPC.ToServerOption() + if err != nil { + return nil, err + } + } + + if rCfg.Protocols.ThriftHTTP != nil { + var err error + config.CollectorHTTPPort, err = extractPortFromEndpoint(rCfg.Protocols.ThriftHTTP.Endpoint) + if err != nil { + return nil, fmt.Errorf("unable to extract port for ThriftHTTP: %w", err) + } + } + + if rCfg.Protocols.ThriftBinary != nil { + config.AgentBinaryThriftConfig = rCfg.ThriftBinary.ServerConfigUDP + var err error + config.AgentBinaryThriftPort, err = extractPortFromEndpoint(rCfg.Protocols.ThriftBinary.Endpoint) + if err != nil { + return nil, fmt.Errorf("unable to extract port for ThriftBinary: %w", err) + } + } + + if rCfg.Protocols.ThriftCompact != nil { + config.AgentCompactThriftConfig = rCfg.ThriftCompact.ServerConfigUDP + var err error + config.AgentCompactThriftPort, err = extractPortFromEndpoint(rCfg.Protocols.ThriftCompact.Endpoint) + if err != nil { + return nil, fmt.Errorf("unable to extract port for ThriftCompact: %w", err) + } + } + + if remoteSamplingConfig != nil { + config.RemoteSamplingClientSettings = remoteSamplingConfig.GRPCClientSettings + if len(config.RemoteSamplingClientSettings.Endpoint) == 0 { + config.RemoteSamplingClientSettings.Endpoint = defaultGRPCBindEndpoint + } + + if len(remoteSamplingConfig.HostEndpoint) == 0 { + config.AgentHTTPPort = defaultAgentRemoteSamplingHTTPPort + } else { + var err error + config.AgentHTTPPort, err = extractPortFromEndpoint(remoteSamplingConfig.HostEndpoint) + if err != nil { + return nil, err + } + } + + // strategies are served over grpc so if grpc is not enabled and strategies are present return an error + if len(remoteSamplingConfig.StrategyFile) != 0 { + if config.CollectorGRPCPort == 0 { + return nil, fmt.Errorf("strategy file requires the GRPC protocol to be enabled") + } + + config.RemoteSamplingStrategyFile = remoteSamplingConfig.StrategyFile + } + } + + if (rCfg.Protocols.GRPC == nil && rCfg.Protocols.ThriftHTTP == nil && rCfg.Protocols.ThriftBinary == nil && rCfg.Protocols.ThriftCompact == nil) || + (config.CollectorGRPCPort == 0 && config.CollectorHTTPPort == 0 && config.CollectorThriftPort == 0 && config.AgentBinaryThriftPort == 0 && config.AgentCompactThriftPort == 0) { + err := fmt.Errorf("either GRPC(%v), ThriftHTTP(%v), ThriftCompact(%v), or ThriftBinary(%v) protocol endpoint with non-zero port must be enabled for %s receiver", + rCfg.Protocols.GRPC, + rCfg.Protocols.ThriftHTTP, + rCfg.Protocols.ThriftCompact, + rCfg.Protocols.ThriftBinary, + typeStr, + ) + return nil, err + } + + // Create the receiver. + return newJaegerReceiver(rCfg.Name(), &config, nextConsumer, params), nil +} + +// extract the port number from string in "address:port" format. If the +// port number cannot be extracted returns an error. +func extractPortFromEndpoint(endpoint string) (int, error) { + _, portStr, err := net.SplitHostPort(endpoint) + if err != nil { + return 0, fmt.Errorf("endpoint is not formatted correctly: %s", err.Error()) + } + port, err := strconv.ParseInt(portStr, 10, 0) + if err != nil { + return 0, fmt.Errorf("endpoint port is not a number: %s", err.Error()) + } + if port < 1 || port > 65535 { + return 0, fmt.Errorf("port number must be between 1 and 65535") + } + return int(port), nil +} diff --git a/internal/otel_collector/receiver/jaegerreceiver/factory_test.go b/internal/otel_collector/receiver/jaegerreceiver/factory_test.go new file mode 100644 index 00000000000..728b83674d6 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/factory_test.go @@ -0,0 +1,352 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerreceiver + +import ( + "context" + "fmt" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" +) + +func TestTypeStr(t *testing.T) { + factory := NewFactory() + + assert.Equal(t, "jaeger", string(factory.Type())) +} + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + // have to enable at least one protocol for the jaeger receiver to be created + cfg.(*Config).Protocols.GRPC = &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: defaultGRPCBindEndpoint, + Transport: "tcp", + }, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + tReceiver, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + assert.NoError(t, err, "receiver creation failed") + assert.NotNil(t, tReceiver, "receiver creation failed") + + mReceiver, err := factory.CreateMetricsReceiver(context.Background(), params, cfg, nil) + assert.Equal(t, err, configerror.ErrDataTypeIsNotSupported) + assert.Nil(t, mReceiver) +} + +func TestCreateReceiverGeneralConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + require.NoError(t, err) + require.NotNil(t, cfg) + + rCfg, ok := cfg.Receivers["jaeger/customname"] + require.True(t, ok) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + tReceiver, err := factory.CreateTracesReceiver(context.Background(), params, rCfg, nil) + assert.NoError(t, err, "receiver creation failed") + assert.NotNil(t, tReceiver, "receiver creation failed") + + mReceiver, err := factory.CreateMetricsReceiver(context.Background(), params, rCfg, nil) + assert.Equal(t, err, configerror.ErrDataTypeIsNotSupported) + assert.Nil(t, mReceiver) +} + +// default ports retrieved from https://www.jaegertracing.io/docs/1.16/deployment/ +func TestCreateDefaultGRPCEndpoint(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.GRPC = &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: defaultGRPCBindEndpoint, + Transport: "tcp", + }, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + r, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + + assert.NoError(t, err, "unexpected error creating receiver") + assert.Equal(t, 14250, r.(*jReceiver).config.CollectorGRPCPort, "grpc port should be default") +} + +func TestCreateTLSGPRCEndpoint(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.GRPC = &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: defaultGRPCBindEndpoint, + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "./testdata/server.crt", + KeyFile: "./testdata/server.key", + }, + }, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + assert.NoError(t, err, "tls-enabled receiver creation failed") +} + +func TestCreateInvalidHTTPEndpoint(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.ThriftHTTP = &confighttp.HTTPServerSettings{ + Endpoint: defaultHTTPBindEndpoint, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + r, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + + assert.NoError(t, err, "unexpected error creating receiver") + assert.Equal(t, 14268, r.(*jReceiver).config.CollectorHTTPPort, "http port should be default") +} + +func TestCreateInvalidThriftBinaryEndpoint(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.ThriftBinary = &ProtocolUDP{ + Endpoint: defaultThriftBinaryBindEndpoint, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + r, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + + assert.NoError(t, err, "unexpected error creating receiver") + assert.Equal(t, 6832, r.(*jReceiver).config.AgentBinaryThriftPort, "thrift port should be default") +} + +func TestCreateInvalidThriftCompactEndpoint(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.ThriftCompact = &ProtocolUDP{ + Endpoint: defaultThriftCompactBindEndpoint, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + r, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + + assert.NoError(t, err, "unexpected error creating receiver") + assert.Equal(t, 6831, r.(*jReceiver).config.AgentCompactThriftPort, "thrift port should be default") +} + +func TestDefaultAgentRemoteSamplingEndpointAndPort(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + rCfg := cfg.(*Config) + + rCfg.Protocols.ThriftCompact = &ProtocolUDP{ + Endpoint: defaultThriftCompactBindEndpoint, + } + rCfg.RemoteSampling = &RemoteSamplingConfig{} + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + r, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + + assert.NoError(t, err, "create trace receiver should not error") + assert.Equal(t, defaultGRPCBindEndpoint, r.(*jReceiver).config.RemoteSamplingClientSettings.Endpoint) + assert.Equal(t, defaultAgentRemoteSamplingHTTPPort, r.(*jReceiver).config.AgentHTTPPort, "agent http port should be default") +} + +func TestAgentRemoteSamplingEndpoint(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + rCfg := cfg.(*Config) + + endpoint := "localhost:1234" + rCfg.Protocols.ThriftCompact = &ProtocolUDP{ + Endpoint: defaultThriftCompactBindEndpoint, + } + rCfg.RemoteSampling = &RemoteSamplingConfig{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + }, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + r, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + + assert.NoError(t, err, "create trace receiver should not error") + assert.Equal(t, endpoint, r.(*jReceiver).config.RemoteSamplingClientSettings.Endpoint) + assert.Equal(t, defaultAgentRemoteSamplingHTTPPort, r.(*jReceiver).config.AgentHTTPPort, "agent http port should be default") +} + +func TestCreateNoPort(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.ThriftHTTP = &confighttp.HTTPServerSettings{ + Endpoint: "localhost:", + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + assert.Error(t, err, "receiver creation with no port number must fail") +} + +func TestCreateLargePort(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.ThriftHTTP = &confighttp.HTTPServerSettings{ + Endpoint: "localhost:65536", + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + assert.Error(t, err, "receiver creation with too large port number must fail") +} + +func TestCreateInvalidHost(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.GRPC = &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "1234", + Transport: "tcp", + }, + } + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + assert.Error(t, err, "receiver creation with bad hostname must fail") +} + +func TestCreateNoProtocols(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols = Protocols{} + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + assert.Error(t, err, "receiver creation with no protocols must fail") +} + +func TestThriftBinaryBadPort(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.ThriftBinary = &ProtocolUDP{ + Endpoint: "localhost:65536", + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + assert.Error(t, err, "receiver creation with a bad thrift binary port must fail") +} + +func TestThriftCompactBadPort(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + cfg.(*Config).Protocols.ThriftCompact = &ProtocolUDP{ + Endpoint: "localhost:65536", + } + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + assert.Error(t, err, "receiver creation with a bad thrift compact port must fail") +} + +func TestRemoteSamplingConfigPropagation(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + rCfg := cfg.(*Config) + + hostPort := 5778 + endpoint := "localhost:1234" + strategyFile := "strategies.json" + rCfg.Protocols.GRPC = &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: defaultGRPCBindEndpoint, + Transport: "tcp", + }, + } + rCfg.RemoteSampling = &RemoteSamplingConfig{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: endpoint, + }, + HostEndpoint: fmt.Sprintf("localhost:%d", hostPort), + StrategyFile: strategyFile, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + r, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + + assert.NoError(t, err, "create trace receiver should not error") + assert.Equal(t, endpoint, r.(*jReceiver).config.RemoteSamplingClientSettings.Endpoint) + assert.Equal(t, hostPort, r.(*jReceiver).config.AgentHTTPPort, "agent http port should be configured value") + assert.Equal(t, strategyFile, r.(*jReceiver).config.RemoteSamplingStrategyFile) +} + +func TestRemoteSamplingFileRequiresGRPC(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + rCfg := cfg.(*Config) + + // Remove all default protocols + rCfg.Protocols = Protocols{} + rCfg.Protocols.ThriftCompact = &ProtocolUDP{ + Endpoint: defaultThriftCompactBindEndpoint, + } + rCfg.RemoteSampling = &RemoteSamplingConfig{ + StrategyFile: "strategies.json", + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, nil) + + assert.Error(t, err, "create trace receiver should error") +} + +func TestCustomUnmarshalErrors(t *testing.T) { + factory := NewFactory() + + fu, ok := factory.(component.ConfigUnmarshaler) + assert.True(t, ok) + + err := fu.Unmarshal(config.NewViper(), nil) + assert.Error(t, err, "should not have been able to marshal to a nil config") + + err = fu.Unmarshal(config.NewViper(), &RemoteSamplingConfig{}) + assert.Error(t, err, "should not have been able to marshal to a non-jaegerreceiver config") +} diff --git a/internal/otel_collector/receiver/jaegerreceiver/jaeger_agent_test.go b/internal/otel_collector/receiver/jaegerreceiver/jaeger_agent_test.go new file mode 100644 index 00000000000..3e7a12a1526 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/jaeger_agent_test.go @@ -0,0 +1,253 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerreceiver + +import ( + "context" + "fmt" + "net" + "net/http" + "testing" + + "github.com/apache/thrift/lib/go/thrift" + "github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp" + "github.com/jaegertracing/jaeger/model" + jaegerconvert "github.com/jaegertracing/jaeger/model/converter/thrift/jaeger" + "github.com/jaegertracing/jaeger/proto-gen/api_v2" + "github.com/jaegertracing/jaeger/thrift-gen/agent" + jaegerthrift "github.com/jaegertracing/jaeger/thrift-gen/jaeger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/testutil" + "go.opentelemetry.io/collector/translator/conventions" + "go.opentelemetry.io/collector/translator/trace/jaeger" +) + +const jaegerAgent = "jaeger_agent_test" + +func TestJaegerAgentUDP_ThriftCompact(t *testing.T) { + port := testutil.GetAvailablePort(t) + addrForClient := fmt.Sprintf(":%d", port) + testJaegerAgent(t, addrForClient, &configuration{ + AgentCompactThriftPort: int(port), + AgentCompactThriftConfig: DefaultServerConfigUDP(), + }) +} + +func TestJaegerAgentUDP_ThriftCompact_InvalidPort(t *testing.T) { + port := 999999 + + config := &configuration{ + AgentCompactThriftPort: port, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerAgent, config, nil, params) + + assert.Error(t, jr.Start(context.Background(), componenttest.NewNopHost()), "should not have been able to startTraceReception") + + jr.Shutdown(context.Background()) +} + +func TestJaegerAgentUDP_ThriftBinary(t *testing.T) { + port := testutil.GetAvailablePort(t) + addrForClient := fmt.Sprintf(":%d", port) + testJaegerAgent(t, addrForClient, &configuration{ + AgentBinaryThriftPort: int(port), + AgentBinaryThriftConfig: DefaultServerConfigUDP(), + }) +} + +func TestJaegerAgentUDP_ThriftBinary_PortInUse(t *testing.T) { + // This test confirms that the thrift binary port is opened correctly. This is all we can test at the moment. See above. + port := testutil.GetAvailablePort(t) + + config := &configuration{ + AgentBinaryThriftPort: int(port), + AgentBinaryThriftConfig: DefaultServerConfigUDP(), + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerAgent, config, nil, params) + + assert.NoError(t, jr.startAgent(componenttest.NewNopHost()), "Start failed") + defer jr.Shutdown(context.Background()) + + l, err := net.Listen("udp", fmt.Sprintf("localhost:%d", port)) + assert.Error(t, err, "should not have been able to listen to the port") + + if l != nil { + l.Close() + } +} + +func TestJaegerAgentUDP_ThriftBinary_InvalidPort(t *testing.T) { + port := 999999 + + config := &configuration{ + AgentBinaryThriftPort: port, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerAgent, config, nil, params) + + assert.Error(t, jr.Start(context.Background(), componenttest.NewNopHost()), "should not have been able to startTraceReception") + + jr.Shutdown(context.Background()) +} + +func initializeGRPCTestServer(t *testing.T, beforeServe func(server *grpc.Server), opts ...grpc.ServerOption) (*grpc.Server, net.Addr) { + server := grpc.NewServer(opts...) + lis, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + beforeServe(server) + go func() { + err := server.Serve(lis) + require.NoError(t, err) + }() + return server, lis.Addr() +} + +type mockSamplingHandler struct { +} + +func (*mockSamplingHandler) GetSamplingStrategy(context.Context, *api_v2.SamplingStrategyParameters) (*api_v2.SamplingStrategyResponse, error) { + return &api_v2.SamplingStrategyResponse{StrategyType: api_v2.SamplingStrategyType_PROBABILISTIC}, nil +} + +func TestJaegerHTTP(t *testing.T) { + s, addr := initializeGRPCTestServer(t, func(s *grpc.Server) { + api_v2.RegisterSamplingManagerServer(s, &mockSamplingHandler{}) + }) + defer s.GracefulStop() + + port := testutil.GetAvailablePort(t) + config := &configuration{ + AgentHTTPPort: int(port), + RemoteSamplingClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: addr.String(), + TLSSetting: configtls.TLSClientSetting{ + Insecure: true, + }, + }, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerAgent, config, nil, params) + defer jr.Shutdown(context.Background()) + + assert.NoError(t, jr.Start(context.Background(), componenttest.NewNopHost()), "Start failed") + + // allow http server to start + assert.NoError(t, testutil.WaitForPort(t, port), "WaitForPort failed") + + resp, err := http.Get(fmt.Sprintf("http://localhost:%d/sampling?service=test", port)) + assert.NoError(t, err, "should not have failed to make request") + if resp != nil { + assert.Equal(t, 200, resp.StatusCode, "should have returned 200") + } + + resp, err = http.Get(fmt.Sprintf("http://localhost:%d/sampling?service=test", port)) + assert.NoError(t, err, "should not have failed to make request") + if resp != nil { + assert.Equal(t, 200, resp.StatusCode, "should have returned 200") + } + + resp, err = http.Get(fmt.Sprintf("http://localhost:%d/baggageRestrictions?service=test", port)) + assert.NoError(t, err, "should not have failed to make request") + if resp != nil { + assert.Equal(t, 200, resp.StatusCode, "should have returned 200") + } +} + +func testJaegerAgent(t *testing.T, agentEndpoint string, receiverConfig *configuration) { + // 1. Create the Jaeger receiver aka "server" + sink := new(consumertest.TracesSink) + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerAgent, receiverConfig, sink, params) + defer jr.Shutdown(context.Background()) + + assert.NoError(t, jr.Start(context.Background(), componenttest.NewNopHost()), "Start failed") + + // 2. Then send spans to the Jaeger receiver. + jexp, err := newClientUDP(agentEndpoint, jr.agentBinaryThriftEnabled()) + assert.NoError(t, err, "Failed to create the Jaeger OpenCensus exporter for the live application") + + // 3. Now finally send some spans + td := generateTraceData() + batches, err := jaeger.InternalTracesToJaegerProto(td) + require.NoError(t, err) + for _, batch := range batches { + require.NoError(t, jexp.EmitBatch(context.Background(), modelToThrift(batch))) + } + + testutil.WaitFor(t, func() bool { + return sink.SpansCount() > 0 + }) + + gotTraces := sink.AllTraces() + require.Equal(t, 1, len(gotTraces)) + assert.EqualValues(t, td, gotTraces[0]) +} + +func newClientUDP(hostPort string, binary bool) (*agent.AgentClient, error) { + clientTransport, err := thriftudp.NewTUDPClientTransport(hostPort, "") + if err != nil { + return nil, err + } + var protocolFactory thrift.TProtocolFactory + if binary { + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + } else { + protocolFactory = thrift.NewTCompactProtocolFactory() + } + return agent.NewAgentClientFactory(clientTransport, protocolFactory), nil +} + +// Cannot use the testdata because timestamps are nanoseconds. +func generateTraceData() pdata.Traces { + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + td.ResourceSpans().At(0).Resource().Attributes().UpsertString(conventions.AttributeServiceName, "test") + td.ResourceSpans().At(0).InstrumentationLibrarySpans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + span := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) + span.SetSpanID(pdata.NewSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7})) + span.SetTraceID(pdata.NewTraceID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 7, 6, 5, 4, 3, 2, 1, 0})) + span.SetStartTime(1581452772000000000) + span.SetEndTime(1581452773000000000) + return td +} + +func modelToThrift(batch *model.Batch) *jaegerthrift.Batch { + return &jaegerthrift.Batch{ + Process: processModelToThrift(batch.Process), + Spans: jaegerconvert.FromDomain(batch.Spans), + } +} + +func processModelToThrift(process *model.Process) *jaegerthrift.Process { + if process == nil { + return nil + } + return &jaegerthrift.Process{ + ServiceName: process.ServiceName, + } +} diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_empty_config.yaml b/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_empty_config.yaml new file mode 100644 index 00000000000..a2ff3c2c9f7 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_empty_config.yaml @@ -0,0 +1,15 @@ +receivers: + jaeger: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [jaeger] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_no_proto_config.yaml b/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_no_proto_config.yaml new file mode 100644 index 00000000000..b213ac63248 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_no_proto_config.yaml @@ -0,0 +1,16 @@ +receivers: + jaeger: + protocols: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [jaeger] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_proto_config.yaml b/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_proto_config.yaml new file mode 100644 index 00000000000..a87240a9303 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_proto_config.yaml @@ -0,0 +1,19 @@ +receivers: + # The following demonstrates how to enable protocols with defaults + jaeger: + protocols: + thrift_htttp: + endpoint: "127.0.0.1:123" + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [jaeger] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_typo_default_proto_config.yaml b/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_typo_default_proto_config.yaml new file mode 100644 index 00000000000..753510fd62d --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/bad_typo_default_proto_config.yaml @@ -0,0 +1,20 @@ +receivers: + # The following demonstrates how to enable protocols with defaults + jaeger: + protocols: + grpc: + endpoint: "127.0.0.1:123" + thrift_htttp: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [jaeger] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/ca.crt b/internal/otel_collector/receiver/jaegerreceiver/testdata/ca.crt new file mode 100644 index 00000000000..c8d6524ebad --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNjCCAh4CCQDq+ERthElKtzANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJB +VTESMBAGA1UECAwJQXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoM +CU15T3JnTmFtZTEVMBMGA1UEAwwMTXlDb21tb25OYW1lMB4XDTIwMDkyMjA1MjIx +MFoXDTMwMDkyMDA1MjIxMFowXTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1c3Ry +YWxpYTEPMA0GA1UEBwwGU3lkbmV5MRIwEAYDVQQKDAlNeU9yZ05hbWUxFTATBgNV +BAMMDE15Q29tbW9uTmFtZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANv/RHAB8f8VbGG5Wq9mZzqLREoLTfNG8pRCPFvD+UEbl7ldLrzz5T92s4VX7HjA +BsGDTrK7VgO1GZGQXV1fBlbFcAmkISiYWYCmIxD1BfEN+Sh/9OVfKXZJVSInvs/I +nLYvXiymxbtOh/C+/hlcZW9VA2IUkbTUb/qd7SK0pVpOK0KMdpVq5t1HqAP+ssB/ +ZtbWFL1Ai057HNbki+s7LfMeiPya9hY/CRk6ei3oSrxLqQCXUeJAtS/iMzUDyq7u +btDA7sNMUqYvG7nWF9AkUXRqp8DVsIJKGk4hN/aKvkJaJfHe66kirKeJWQXYp5Hh +632EDi8ku4dOVae7w50YnbsCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAwEEc13Oi +wvnz6tfhGUVUTfLauNn9qTdXBjNwgQV9z0QZrw9puGAAc1oRs8cmPx+TMROSPzXM +PnRrkFYanh4beg21j4iRVm0rYm796q8IaicerkpN5XzFSeyzwnwMauyOA9cXsMfB +vza8RH+GgUpQ5eZRTuBD03Ic0kfz39bz0gPod6/CWo7ONRV6AoEwVi1vsULLUbA0 +hL/XsjlihXU0XLtEx1DB5lKrATyFPCxR+kq6Q+EdfDq3r+B7rg+gyv6mCzaf5LZY +0+r7s/no+cWzm2LrRebvp00i0RfeqSu3Uwr51oEidkLeBQftQm9Xvkt4Z3O+LJjw +bf40dGXtFmgflw== +-----END CERTIFICATE----- diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/client.crt b/internal/otel_collector/receiver/jaegerreceiver/testdata/client.crt new file mode 100644 index 00000000000..53e9005fdf3 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/client.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMddMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjExWhcNMzAwOTIwMDUyMjExWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAzumNUxAx4hbTbru52IxI4OE/SvhKaqbJQffrOY5MFUnblOzIAz1VaaZh +J8nXG+T234KgcZAyW8CCnPyB+LhUxpDVLmeoxbOYM/7ZTsRbUph8yZjkySXii2Qu +pk7L01Q81ESSaSKGBgyc1Go8N/SSwi5XH79Ng/CtDfOLJZdE0gQdoQZFkaX/UZ+t +RFuUhmbOrAk31QnpccKGKOf+8sa16Voy8uv+jdP2QPyARGs5i/1OXGuQ/GnouaFb +o2hgklRPs8or5cLgmcczKoCNDw/Hvw5hys5Isei/e89/IRdtXiHICkIUrAYXOcMi +yZwxN5Gr+A0TvR6rPZqEzoqpFy2S2wIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQC6 +A8DEtVOKTdOXmT0b8eN65w7LJUUPLadEF3YF+osnu2DOBM6GLBcXyX88Km2mvss6 +Yc4H4iBlG7khXBScxPO+ofNC3j3HyklcdUWZNO2GMsnFMhmBXNrN7nBWkLNz67o7 +C1S6Xb+ZbYRTD0VxQ1HDIjYxMSAVxLahl2qvbC0vC3Z7SaDIzkJR2MfHD+d/9CKy +7WkohAs0P1uF7S4DgWjp/c39GRGY4aNgdxnXgbluuho96BlQAnoOaTNaXww6B5mu +7w/kd97Fw7aH5rPLxrs7TkkTDzreW8p7V8VxT7aS3kG7p8k617gVQJNEPLczvGt4 +Dp0mk+luCnY9EzClbAZc +-----END CERTIFICATE----- diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/client.key b/internal/otel_collector/receiver/jaegerreceiver/testdata/client.key new file mode 100644 index 00000000000..8895820fc25 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAzumNUxAx4hbTbru52IxI4OE/SvhKaqbJQffrOY5MFUnblOzI +Az1VaaZhJ8nXG+T234KgcZAyW8CCnPyB+LhUxpDVLmeoxbOYM/7ZTsRbUph8yZjk +ySXii2Qupk7L01Q81ESSaSKGBgyc1Go8N/SSwi5XH79Ng/CtDfOLJZdE0gQdoQZF +kaX/UZ+tRFuUhmbOrAk31QnpccKGKOf+8sa16Voy8uv+jdP2QPyARGs5i/1OXGuQ +/GnouaFbo2hgklRPs8or5cLgmcczKoCNDw/Hvw5hys5Isei/e89/IRdtXiHICkIU +rAYXOcMiyZwxN5Gr+A0TvR6rPZqEzoqpFy2S2wIDAQABAoIBAQCBWynrYBiPjE2q +0NojM7DsRmXXbNq5SoRQJbp9RSTzujT5Kf7xZ4rafhYHVfyRh9d/bJ754HhbPENr ++cEXycXWTf25AT1WPC1PdGhPAhtFy+dX8ao2xuSW2I8BkgmDmQpeenA+IkM/zOrP +MYtsQA7wLyBwzJyde/301umLGsL/uEs3VipWCJ9bk3GuTL6ir4gRdISYd+Rm8tRw +7aeM/uraIv4oK2LInih76xNr+i3vkMDXXzJ+P59Tu6qjt2ksBBjLTuqy+ZkXs2SU +BJ8g9Px/vqSxwTF9aX3nnasA5WyODPx2OTTaHnQ4MTaascG3Sp3Hq+kurPIwnCWG +bZwcuW5hAoGBAOb15byeEg3UssIaXg2HTtp9gShTxDEdoVmTjMTOGrsgZy6ajH6s +AMlOPTG91PH4FNkvxiqdIyN8R5zh+NHKt9pSnbhzFNfvxJG3whrYaE2LwJFhjNTD +rYaTWYS0HaDA/DgKhwEnUmxZaxyaY1ScXcJhAulg3TYU9RnDfVjDgCQtAoGBAOVY +N74ieH4e3EpmdRyH/WtdnTEJ+q/DbUF0LZAAZFoAHyCRMl6KuDObubzMoaoQIT0Q +p3AJCdp8ycwn0WYFL2kP2Wmp/JxDQyj2Uqh2dbU0G+0M3vYgTEvrqZ8pUWO5Dc3C +w2EswcYTq3tzX/kEMegGskkvZ9DlrRpAMRzHAlAnAoGARI0fz0grm6dSF4Kz/9f1 +c6xktY+HX/ync2r0EUYLcRdBCPgeU0rCQP3T8/ugROGZbo1biDJzx4iPyOTZcYt1 +3ns/DQw7V4x3D7k3B7jL3JhqY7xMjKo3ywXZQCYl1Rzyv7+AKrt9H2O7AxZf/TEc +MyGQN6zke7TkuuznO31rf1ECgYEAghw2I4vyx7pCR4Mw1Wrg/lQxpWx/58764LNE +Vfmi9Nw0zIkTBke0kLK8ALwmyxAziy0zkH/QMz+wTD4ascInT3dKZIOnaA9QvqBf +7GqoBJD3dthidUeFgVzE8iLCpcyKZD0mEq8Nj44BLxwZSnByz6tc4eAfCYgDWG0q +b6UHTukCgYEAol2Nv4sVOGMF799/O0rlqb3zDNTBUjeOiLPbhKqz1IFxT36eWfFO +kIkzp5ubqoHGO/ODZ+rH5H6ENxCp69mgd9cPp+Wf7NRkX94C5myYbSFQJTYJ5mh9 +Y0xlCu3V0sAcH9qrHqreBwInAvtWhwCcrURxSvCKfDScn3rAOtC2ePc= +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/config.yaml b/internal/otel_collector/receiver/jaegerreceiver/testdata/config.yaml new file mode 100644 index 00000000000..ca27938c2d4 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/config.yaml @@ -0,0 +1,66 @@ +receivers: + # The following demonstrates specifying different endpoints. + # The Jaeger receiver connects to ports on all available network interfaces. + # Ex: `endpoint: "9876"` is incorrect. + # Ex: `endpoint: "1.2.3.4:9876"` and ":9876" is correct. + jaeger/customname: + protocols: + grpc: + endpoint: "localhost:9876" + thrift_http: + endpoint: ":3456" + thrift_compact: + endpoint: "0.0.0.0:456" + queue_size: 100_000 + max_packet_size: 131_072 + workers: 100 + socket_buffer_size: 65_536 + thrift_binary: + endpoint: "0.0.0.0:789" + queue_size: 1_000 + max_packet_size: 65_536 + workers: 5 + socket_buffer_size: 0 + remote_sampling: + host_endpoint: "0.0.0.0:5778" + endpoint: "jaeger-collector:1234" + strategy_file: "/etc/strategies.json" + # The following demonstrates how to enable protocols with defaults. + jaeger/defaults: + protocols: + grpc: + thrift_http: + thrift_compact: + thrift_binary: + # The following demonstrates only enabling certain protocols with defaults/overrides. + jaeger/mixed: + protocols: + grpc: + endpoint: "localhost:9876" + thrift_compact: + # The following demonstrates specifying different endpoints. + # The Jaeger receiver connects to ports on all available network interfaces. + # Ex: `endpoint: "9876"` is incorrect. + # Ex: `endpoint: "1.2.3.4:9876"` and ":9876" is correct. + jaeger/tls: + protocols: + grpc: + tls_settings: + cert_file: /test.crt + key_file: /test.key + endpoint: "localhost:9876" + thrift_http: + endpoint: ":3456" + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [jaeger/defaults] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/server.crt b/internal/otel_collector/receiver/jaegerreceiver/testdata/server.crt new file mode 100644 index 00000000000..c9e8b95d39a --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/server.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdcMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjExWhcNMzAwOTIwMDUyMjExWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAur2w74htbzpU9gJW8Jmqso0r/0etrwSlijWYyxH9FX72vaXBV8ZYwPtX +NQq8vYiez9MBhjGF3MLa62Sc4ATtobKKdNPatZh47P/lS3ugWXse5mh6k9I1I1oQ +ikDvD1zwJmFEamQztU9WZ/VAKNHNf6nBNujfU0UMzJ4R1KG7bXR00+XA/oYyil0H +CV//1Y3FPdw2znj1ulwF7Odfcsfjo4mkMAl+Ep4aU/BkA9elV4hJqCOynNoHJ2zG +CPNYCi9dh3UpPfC8Lk2MqYvAIWIoJbqddaMoWJfNCCxIQ8uPYHIcK+Clqak+N1lM +ans/2NLNJg5QSOGN331QWkRAcBumXQIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQDN +ogqQ5hcH3F0B0imuaYD2CgeosAswtjxvQvz4Z7R/WSzl+yMfkoC45r6CUBUOOP3N +lN1ZD7D7S46sEwRxqOZIwHMvXGVvBPuXuxZfP90t8J4H6IdxQfpzQC5SwNO+s9Pw +YVKA+2FqQp1xvulJVr5BNruMv6v6ZZYDXh3E0W5m9kXiVjU6gOI8AAjO9OfFTsDL +2WB2+jxJ7tVuoTq9+sBPvR9SI5RUk7+0SauZQ8Y/QXNlAJ10BoZcNSHABo3D7WR5 +m2Qu38Sh5fz3Ae37QS/o76JKPsruCwsBQ2kbduYma+0G6Z+fMxyPT7C6b3moHiMs +tdrNBEJM4vbiD5nk0WWt +-----END CERTIFICATE----- diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/server.key b/internal/otel_collector/receiver/jaegerreceiver/testdata/server.key new file mode 100644 index 00000000000..707f20e1113 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAur2w74htbzpU9gJW8Jmqso0r/0etrwSlijWYyxH9FX72vaXB +V8ZYwPtXNQq8vYiez9MBhjGF3MLa62Sc4ATtobKKdNPatZh47P/lS3ugWXse5mh6 +k9I1I1oQikDvD1zwJmFEamQztU9WZ/VAKNHNf6nBNujfU0UMzJ4R1KG7bXR00+XA +/oYyil0HCV//1Y3FPdw2znj1ulwF7Odfcsfjo4mkMAl+Ep4aU/BkA9elV4hJqCOy +nNoHJ2zGCPNYCi9dh3UpPfC8Lk2MqYvAIWIoJbqddaMoWJfNCCxIQ8uPYHIcK+Cl +qak+N1lMans/2NLNJg5QSOGN331QWkRAcBumXQIDAQABAoIBAGJqVFR49wu2l04r +v3v/0GlXY0eflBZ4AXJMeuES8umgRxN9xt6mhuH11Gj85qmQ1fm7P8NkWCMXgl6q +YM7jagVc3gFiIZKw75If7s0QM1rVO1y81VUQZLbj4XGrdRIIrfvcKT1U37d/P498 +PjsFDyNn6I8yvXfaHTu9VrJUJj4xSaeUDX6I033nljbWFzPAHdpIGouPN0u69X5X +c2Y3DSC5P5vraD0v1rhnIz/pmUZGkLSXbxA0k4be7tD4Ek32TV91pGHj8AbJyu0Q +JCGJHpi7EiD27j89USgQppGpDouq7AgxdwKMFJs/8mZY7DGvQE3FhgsMPW18x2eD +fWyhppUCgYEA7PWvkX+/HkgBeZ9qFa9gN/HOFuVj80UiiCTy0mKzUNaQr6p3E1Rg +irQMpUrhB7heW2bS72QRbBgP+1DMvAZTuFj6H8ss8wxrJa1MFCVVGSPC2+qjKxPQ +GtOZt4mJjQukklz5dMHQphp3tlbLoDQeu3KRpa7KQy3dnbX4jpEMNiMCgYEAyb7+ +r0Hk0qxXB76O0nRVjK+1aas9JWuYOKfF7pk/z9576evmkhTGIjNjQTOQnJH24KkY +lTYaabcxwrj9O+8o0MmgapW5clkr0cMtL2C/VDTWlNOfFZ2T9yfRa8qtkblhCZ5M +rGU5rJyZYcqFSkiGi5b/Mu6wPY32rWoJ8bd1OX8CgYAlSOM/OaKQ2TOiN3sxvk6d +fua2o5F+jbpJQccTY4Rji99oRKJH4FbwfDQhLg8Kb/Ao4Zz/Hfe/0mlxWd1dGIHD +1/xDVGzWMXYKj6IQ6W7ibcYTZHAYLx3nmrPtNS73ioVyoj5+KKD0AeYkQrP3mTvc +ssJIF6CrwWPFlQRvKlOJkQKBgQC+hoV3db9nxiIayjePQRZZ2sZ0mKcSY95KAwfG +ISxGX1Kew43of33uZqFhvhTgCGkiGg+BOqsPE1cHEjT1GRNuujuo7OVJvDTJ0I0n +pTKLj6rmukQO4dYPH6eDKNFqQawGrVyzopUpEms4E051rLCDu4Ie05PVTfCcLPxf +LmaieQKBgDpgiJ3YRHD9SeEn0FREHludu3IAg2Wh57+rEMu/ZjEOHbjAKOmuHrGH +L1kXIS2uVB2BmDQlu1dNtPk2rJMi2onrlbD92A7xF/NDIJiOKQ/27YWQTF0Sj80a +4DyhFAHHBnr2MqFhPrdgoG0UgCE8NMEyKNxQBCI2vfOOg252kxUy +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/receiver/jaegerreceiver/testdata/strategies.json b/internal/otel_collector/receiver/jaegerreceiver/testdata/strategies.json new file mode 100644 index 00000000000..4e8eff73f3f --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/testdata/strategies.json @@ -0,0 +1,30 @@ +{ + "service_strategies": [ + { + "service": "foo", + "type": "probabilistic", + "param": 0.8, + "operation_strategies": [ + { + "operation": "op1", + "type": "probabilistic", + "param": 0.2 + }, + { + "operation": "op2", + "type": "probabilistic", + "param": 0.4 + } + ] + }, + { + "service": "bar", + "type": "ratelimiting", + "param": 5 + } + ], + "default_strategy": { + "type": "probabilistic", + "param": 0.5 + } + } \ No newline at end of file diff --git a/internal/otel_collector/receiver/jaegerreceiver/trace_receiver.go b/internal/otel_collector/receiver/jaegerreceiver/trace_receiver.go new file mode 100644 index 00000000000..c88307b29ca --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/trace_receiver.go @@ -0,0 +1,505 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerreceiver + +import ( + "context" + "fmt" + "io/ioutil" + "mime" + "net" + "net/http" + "sync" + + apacheThrift "github.com/apache/thrift/lib/go/thrift" + "github.com/gorilla/mux" + "github.com/jaegertracing/jaeger/cmd/agent/app/configmanager" + jSamplingConfig "github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc" + "github.com/jaegertracing/jaeger/cmd/agent/app/httpserver" + "github.com/jaegertracing/jaeger/cmd/agent/app/processors" + "github.com/jaegertracing/jaeger/cmd/agent/app/servers" + "github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp" + "github.com/jaegertracing/jaeger/cmd/collector/app/handler" + collectorSampling "github.com/jaegertracing/jaeger/cmd/collector/app/sampling" + staticStrategyStore "github.com/jaegertracing/jaeger/plugin/sampling/strategystore/static" + "github.com/jaegertracing/jaeger/proto-gen/api_v2" + "github.com/jaegertracing/jaeger/thrift-gen/agent" + "github.com/jaegertracing/jaeger/thrift-gen/baggage" + "github.com/jaegertracing/jaeger/thrift-gen/jaeger" + "github.com/jaegertracing/jaeger/thrift-gen/sampling" + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + "github.com/uber/jaeger-lib/metrics" + "go.uber.org/zap" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/obsreport" + jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" +) + +// configuration defines the behavior and the ports that +// the Jaeger receiver will use. +type configuration struct { + CollectorThriftPort int + CollectorHTTPPort int + CollectorGRPCPort int + CollectorGRPCOptions []grpc.ServerOption + + AgentCompactThriftPort int + AgentCompactThriftConfig ServerConfigUDP + AgentBinaryThriftPort int + AgentBinaryThriftConfig ServerConfigUDP + AgentHTTPPort int + RemoteSamplingClientSettings configgrpc.GRPCClientSettings + RemoteSamplingStrategyFile string +} + +// Receiver type is used to receive spans that were originally intended to be sent to Jaeger. +// This receiver is basically a Jaeger collector. +type jReceiver struct { + // mu protects the fields of this type + mu sync.Mutex + + nextConsumer consumer.TracesConsumer + instanceName string + + startOnce sync.Once + stopOnce sync.Once + + config *configuration + + grpc *grpc.Server + collectorServer *http.Server + + agentSamplingManager *jSamplingConfig.SamplingManager + agentProcessors []processors.Processor + agentServer *http.Server + + logger *zap.Logger +} + +const ( + agentTransportBinary = "udp_thrift_binary" + agentTransportCompact = "udp_thrift_compact" + collectorHTTPTransport = "collector_http" + grpcTransport = "grpc" + + thriftFormat = "thrift" + protobufFormat = "protobuf" +) + +var ( + acceptedThriftFormats = map[string]struct{}{ + "application/x-thrift": {}, + "application/vnd.apache.thrift.binary": {}, + } +) + +// newJaegerReceiver creates a TracesReceiver that receives traffic as a Jaeger collector, and +// also as a Jaeger agent. +func newJaegerReceiver( + instanceName string, + config *configuration, + nextConsumer consumer.TracesConsumer, + params component.ReceiverCreateParams, +) *jReceiver { + return &jReceiver{ + config: config, + nextConsumer: nextConsumer, + instanceName: instanceName, + logger: params.Logger, + } +} + +func (jr *jReceiver) agentCompactThriftAddr() string { + var port int + if jr.config != nil { + port = jr.config.AgentCompactThriftPort + } + return fmt.Sprintf(":%d", port) +} + +func (jr *jReceiver) agentCompactThriftEnabled() bool { + return jr.config != nil && jr.config.AgentCompactThriftPort > 0 +} + +func (jr *jReceiver) agentBinaryThriftAddr() string { + var port int + if jr.config != nil { + port = jr.config.AgentBinaryThriftPort + } + return fmt.Sprintf(":%d", port) +} + +func (jr *jReceiver) agentBinaryThriftEnabled() bool { + return jr.config != nil && jr.config.AgentBinaryThriftPort > 0 +} + +func (jr *jReceiver) agentHTTPAddr() string { + var port int + if jr.config != nil { + port = jr.config.AgentHTTPPort + } + return fmt.Sprintf(":%d", port) +} + +func (jr *jReceiver) agentHTTPEnabled() bool { + return jr.config != nil && jr.config.AgentHTTPPort > 0 +} + +func (jr *jReceiver) collectorGRPCAddr() string { + var port int + if jr.config != nil { + port = jr.config.CollectorGRPCPort + } + return fmt.Sprintf(":%d", port) +} + +func (jr *jReceiver) collectorGRPCEnabled() bool { + return jr.config != nil && jr.config.CollectorGRPCPort > 0 +} + +func (jr *jReceiver) collectorHTTPAddr() string { + var port int + if jr.config != nil { + port = jr.config.CollectorHTTPPort + } + return fmt.Sprintf(":%d", port) +} + +func (jr *jReceiver) collectorHTTPEnabled() bool { + return jr.config != nil && jr.config.CollectorHTTPPort > 0 +} + +func (jr *jReceiver) Start(_ context.Context, host component.Host) error { + jr.mu.Lock() + defer jr.mu.Unlock() + + var err = componenterror.ErrAlreadyStarted + jr.startOnce.Do(func() { + if err = jr.startAgent(host); err != nil && err != componenterror.ErrAlreadyStarted { + return + } + + if err = jr.startCollector(host); err != nil && err != componenterror.ErrAlreadyStarted { + return + } + + err = nil + }) + return err +} + +func (jr *jReceiver) Shutdown(context.Context) error { + var err = componenterror.ErrAlreadyStopped + jr.stopOnce.Do(func() { + jr.mu.Lock() + defer jr.mu.Unlock() + var errs []error + + if jr.agentServer != nil { + if aerr := jr.agentServer.Close(); aerr != nil { + errs = append(errs, aerr) + } + jr.agentServer = nil + } + for _, processor := range jr.agentProcessors { + processor.Stop() + } + + if jr.collectorServer != nil { + if cerr := jr.collectorServer.Close(); cerr != nil { + errs = append(errs, cerr) + } + jr.collectorServer = nil + } + if jr.grpc != nil { + jr.grpc.Stop() + jr.grpc = nil + } + err = componenterror.CombineErrors(errs) + }) + + return err +} + +func consumeTraces(ctx context.Context, batch *jaeger.Batch, consumer consumer.TracesConsumer) (int, error) { + if batch == nil { + return 0, nil + } + td := jaegertranslator.ThriftBatchToInternalTraces(batch) + return len(batch.Spans), consumer.ConsumeTraces(ctx, td) +} + +var _ agent.Agent = (*agentHandler)(nil) +var _ api_v2.CollectorServiceServer = (*jReceiver)(nil) +var _ configmanager.ClientConfigManager = (*jReceiver)(nil) + +type agentHandler struct { + name string + transport string + nextConsumer consumer.TracesConsumer +} + +// EmitZipkinBatch is unsupported agent's +func (h *agentHandler) EmitZipkinBatch(context.Context, []*zipkincore.Span) (err error) { + panic("unsupported receiver") +} + +// EmitBatch implements thrift-gen/agent/Agent and it forwards +// Jaeger spans received by the Jaeger agent processor. +func (h *agentHandler) EmitBatch(ctx context.Context, batch *jaeger.Batch) error { + ctx = obsreport.ReceiverContext(ctx, h.name, h.transport) + ctx = obsreport.StartTraceDataReceiveOp(ctx, h.name, h.transport) + + numSpans, err := consumeTraces(ctx, batch, h.nextConsumer) + obsreport.EndTraceDataReceiveOp(ctx, thriftFormat, numSpans, err) + return err +} + +func (jr *jReceiver) GetSamplingStrategy(ctx context.Context, serviceName string) (*sampling.SamplingStrategyResponse, error) { + return jr.agentSamplingManager.GetSamplingStrategy(ctx, serviceName) +} + +func (jr *jReceiver) GetBaggageRestrictions(ctx context.Context, serviceName string) ([]*baggage.BaggageRestriction, error) { + br, err := jr.agentSamplingManager.GetBaggageRestrictions(ctx, serviceName) + if err != nil { + // Baggage restrictions are not yet implemented - refer to - https://github.com/jaegertracing/jaeger/issues/373 + // As of today, GetBaggageRestrictions() always returns an error. + // However, we `return nil, nil` here in order to serve a valid `200 OK` response. + return nil, nil + } + return br, nil +} + +func (jr *jReceiver) PostSpans(ctx context.Context, r *api_v2.PostSpansRequest) (*api_v2.PostSpansResponse, error) { + if c, ok := client.FromGRPC(ctx); ok { + ctx = client.NewContext(ctx, c) + } + + ctx = obsreport.ReceiverContext(ctx, jr.instanceName, grpcTransport) + ctx = obsreport.StartTraceDataReceiveOp(ctx, jr.instanceName, grpcTransport) + + td := jaegertranslator.ProtoBatchToInternalTraces(r.GetBatch()) + + err := jr.nextConsumer.ConsumeTraces(ctx, td) + obsreport.EndTraceDataReceiveOp(ctx, protobufFormat, len(r.GetBatch().Spans), err) + if err != nil { + return nil, err + } + + return &api_v2.PostSpansResponse{}, nil +} + +func (jr *jReceiver) startAgent(_ component.Host) error { + if !jr.agentBinaryThriftEnabled() && !jr.agentCompactThriftEnabled() && !jr.agentHTTPEnabled() { + return nil + } + + if jr.agentBinaryThriftEnabled() { + h := &agentHandler{ + name: jr.instanceName, + transport: agentTransportBinary, + nextConsumer: jr.nextConsumer, + } + processor, err := jr.buildProcessor(jr.agentBinaryThriftAddr(), jr.config.AgentBinaryThriftConfig, apacheThrift.NewTBinaryProtocolFactoryDefault(), h) + if err != nil { + return err + } + jr.agentProcessors = append(jr.agentProcessors, processor) + } + + if jr.agentCompactThriftEnabled() { + h := &agentHandler{ + name: jr.instanceName, + transport: agentTransportCompact, + nextConsumer: jr.nextConsumer, + } + processor, err := jr.buildProcessor(jr.agentCompactThriftAddr(), jr.config.AgentCompactThriftConfig, apacheThrift.NewTCompactProtocolFactory(), h) + if err != nil { + return err + } + jr.agentProcessors = append(jr.agentProcessors, processor) + } + + for _, processor := range jr.agentProcessors { + go processor.Serve() + } + + // Start upstream grpc client before serving sampling endpoints over HTTP + if jr.config.RemoteSamplingClientSettings.Endpoint != "" { + grpcOpts, err := jr.config.RemoteSamplingClientSettings.ToDialOptions() + if err != nil { + jr.logger.Error("Error creating grpc dial options for remote sampling endpoint", zap.Error(err)) + return err + } + conn, err := grpc.Dial(jr.config.RemoteSamplingClientSettings.Endpoint, grpcOpts...) + if err != nil { + jr.logger.Error("Error creating grpc connection to jaeger remote sampling endpoint", zap.String("endpoint", jr.config.RemoteSamplingClientSettings.Endpoint)) + return err + } + + jr.agentSamplingManager = jSamplingConfig.NewConfigManager(conn) + } + + if jr.agentHTTPEnabled() { + jr.agentServer = httpserver.NewHTTPServer(jr.agentHTTPAddr(), jr, metrics.NullFactory) + + go func() { + if err := jr.agentServer.ListenAndServe(); err != nil { + jr.logger.Error("http server failure", zap.Error(err)) + } + }() + } + + return nil +} + +func (jr *jReceiver) buildProcessor(address string, cfg ServerConfigUDP, factory apacheThrift.TProtocolFactory, a agent.Agent) (processors.Processor, error) { + handler := agent.NewAgentProcessor(a) + transport, err := thriftudp.NewTUDPServerTransport(address) + if err != nil { + return nil, err + } + if cfg.SocketBufferSize > 0 { + if err = transport.SetSocketBufferSize(cfg.SocketBufferSize); err != nil { + return nil, err + } + } + server, err := servers.NewTBufferedServer(transport, cfg.QueueSize, cfg.MaxPacketSize, metrics.NullFactory) + if err != nil { + return nil, err + } + processor, err := processors.NewThriftProcessor(server, cfg.Workers, metrics.NullFactory, factory, handler, jr.logger) + if err != nil { + return nil, err + } + return processor, nil +} + +func (jr *jReceiver) decodeThriftHTTPBody(r *http.Request) (*jaeger.Batch, *httpError) { + bodyBytes, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return nil, &httpError{ + handler.UnableToReadBodyErrFormat, + http.StatusInternalServerError, + } + } + + contentType, _, err := mime.ParseMediaType(r.Header.Get("Content-Type")) + if err != nil { + return nil, &httpError{ + fmt.Sprintf("Cannot parse content type: %v", err), + http.StatusBadRequest, + } + } + if _, ok := acceptedThriftFormats[contentType]; !ok { + return nil, &httpError{ + fmt.Sprintf("Unsupported content type: %v", contentType), + http.StatusBadRequest, + } + } + + tdes := apacheThrift.NewTDeserializer() + batch := &jaeger.Batch{} + if err = tdes.Read(batch, bodyBytes); err != nil { + return nil, &httpError{ + fmt.Sprintf(handler.UnableToReadBodyErrFormat, err), + http.StatusBadRequest, + } + } + return batch, nil +} + +// HandleThriftHTTPBatch implements Jaeger HTTP Thrift handler. +func (jr *jReceiver) HandleThriftHTTPBatch(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if c, ok := client.FromHTTP(r); ok { + ctx = client.NewContext(ctx, c) + } + + ctx = obsreport.ReceiverContext(ctx, jr.instanceName, collectorHTTPTransport) + ctx = obsreport.StartTraceDataReceiveOp(ctx, jr.instanceName, collectorHTTPTransport) + + batch, hErr := jr.decodeThriftHTTPBody(r) + if hErr != nil { + http.Error(w, hErr.msg, hErr.statusCode) + obsreport.EndTraceDataReceiveOp(ctx, thriftFormat, 0, hErr) + return + } + + numSpans, err := consumeTraces(ctx, batch, jr.nextConsumer) + if err != nil { + http.Error(w, fmt.Sprintf("Cannot submit Jaeger batch: %v", err), http.StatusInternalServerError) + } else { + w.WriteHeader(http.StatusAccepted) + } + obsreport.EndTraceDataReceiveOp(ctx, thriftFormat, numSpans, err) +} + +func (jr *jReceiver) startCollector(host component.Host) error { + if !jr.collectorGRPCEnabled() && !jr.collectorHTTPEnabled() { + return nil + } + + if jr.collectorHTTPEnabled() { + // Now the collector that runs over HTTP + caddr := jr.collectorHTTPAddr() + cln, cerr := net.Listen("tcp", caddr) + if cerr != nil { + return fmt.Errorf("failed to bind to Collector address %q: %v", caddr, cerr) + } + + nr := mux.NewRouter() + nr.HandleFunc("/api/traces", jr.HandleThriftHTTPBatch).Methods(http.MethodPost) + jr.collectorServer = &http.Server{Handler: nr} + go func() { + _ = jr.collectorServer.Serve(cln) + }() + } + + if jr.collectorGRPCEnabled() { + jr.grpc = grpc.NewServer(jr.config.CollectorGRPCOptions...) + gaddr := jr.collectorGRPCAddr() + gln, gerr := net.Listen("tcp", gaddr) + if gerr != nil { + return fmt.Errorf("failed to bind to gRPC address %q: %v", gaddr, gerr) + } + + api_v2.RegisterCollectorServiceServer(jr.grpc, jr) + + // init and register sampling strategy store + ss, gerr := staticStrategyStore.NewStrategyStore(staticStrategyStore.Options{ + StrategiesFile: jr.config.RemoteSamplingStrategyFile, + }, jr.logger) + if gerr != nil { + return fmt.Errorf("failed to create collector strategy store: %v", gerr) + } + api_v2.RegisterSamplingManagerServer(jr.grpc, collectorSampling.NewGRPCHandler(ss)) + + go func() { + if err := jr.grpc.Serve(gln); err != nil { + host.ReportFatalError(err) + } + }() + } + + return nil +} diff --git a/internal/otel_collector/receiver/jaegerreceiver/trace_receiver_test.go b/internal/otel_collector/receiver/jaegerreceiver/trace_receiver_test.go new file mode 100644 index 00000000000..407a8d70c89 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/trace_receiver_test.go @@ -0,0 +1,589 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaegerreceiver + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "path" + "testing" + "time" + + "github.com/apache/thrift/lib/go/thrift" + collectorSampling "github.com/jaegertracing/jaeger/cmd/collector/app/sampling" + "github.com/jaegertracing/jaeger/model" + staticStrategyStore "github.com/jaegertracing/jaeger/plugin/sampling/strategystore/static" + "github.com/jaegertracing/jaeger/proto-gen/api_v2" + jaegerthrift "github.com/jaegertracing/jaeger/thrift-gen/jaeger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/testutil" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" + "go.opentelemetry.io/collector/translator/trace/jaeger" +) + +const jaegerReceiver = "jaeger_receiver_test" + +func TestTraceSource(t *testing.T) { + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerReceiver, &configuration{}, nil, params) + require.NotNil(t, jr) +} + +type traceConsumer struct { + cb func(context.Context, pdata.Traces) +} + +func (t traceConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + go t.cb(ctx, td) + return nil +} + +func jaegerBatchToHTTPBody(b *jaegerthrift.Batch) (*http.Request, error) { + body, err := thrift.NewTSerializer().Write(context.Background(), b) + if err != nil { + return nil, err + } + r := httptest.NewRequest("POST", "/api/traces", bytes.NewReader(body)) + r.Header.Add("content-type", "application/x-thrift") + return r, nil +} + +func TestThriftHTTPBodyDecode(t *testing.T) { + jr := jReceiver{} + batch := &jaegerthrift.Batch{ + Process: jaegerthrift.NewProcess(), + Spans: []*jaegerthrift.Span{jaegerthrift.NewSpan()}, + } + r, err := jaegerBatchToHTTPBody(batch) + require.NoError(t, err, "failed to prepare http body") + + gotBatch, hErr := jr.decodeThriftHTTPBody(r) + require.Nil(t, hErr, "failed to decode http body") + assert.Equal(t, batch, gotBatch) +} + +func TestClientIPDetection(t *testing.T) { + ch := make(chan context.Context) + jr := jReceiver{ + nextConsumer: traceConsumer{ + func(ctx context.Context, _ pdata.Traces) { + ch <- ctx + }, + }, + } + batch := &jaegerthrift.Batch{ + Process: jaegerthrift.NewProcess(), + Spans: []*jaegerthrift.Span{jaegerthrift.NewSpan()}, + } + r, err := jaegerBatchToHTTPBody(batch) + require.NoError(t, err) + + wantClient, ok := client.FromHTTP(r) + assert.True(t, ok) + jr.HandleThriftHTTPBatch(httptest.NewRecorder(), r) + + select { + case ctx := <-ch: + gotClient, ok := client.FromContext(ctx) + assert.True(t, ok, "must get client back from context") + assert.Equal(t, wantClient, gotClient) + break + case <-time.After(time.Second * 2): + t.Error("next consumer did not receive the batch") + } +} + +func TestReception(t *testing.T) { + port := testutil.GetAvailablePort(t) + // 1. Create the Jaeger receiver aka "server" + config := &configuration{ + CollectorHTTPPort: int(port), // that's the only one used by this test + } + sink := new(consumertest.TracesSink) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerReceiver, config, sink, params) + defer jr.Shutdown(context.Background()) + + t.Log("Starting") + + require.NoError(t, jr.Start(context.Background(), componenttest.NewNopHost())) + + t.Log("Start") + + // 2. Then send spans to the Jaeger receiver. + collectorAddr := fmt.Sprintf("http://localhost:%d/api/traces", port) + td := generateTraceData() + batches, err := jaeger.InternalTracesToJaegerProto(td) + require.NoError(t, err) + for _, batch := range batches { + require.NoError(t, sendToCollector(collectorAddr, modelToThrift(batch))) + } + + assert.NoError(t, err, "should not have failed to create the Jaeger OpenCensus exporter") + + gotTraces := sink.AllTraces() + assert.Equal(t, 1, len(gotTraces)) + + assert.EqualValues(t, td, gotTraces[0]) +} + +func TestPortsNotOpen(t *testing.T) { + // an empty config should result in no open ports + config := &configuration{} + + sink := new(consumertest.TracesSink) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerReceiver, config, sink, params) + defer jr.Shutdown(context.Background()) + + require.NoError(t, jr.Start(context.Background(), componenttest.NewNopHost())) + + // there is a race condition here that we're ignoring. + // this test may occasionally pass incorrectly, but it will not fail incorrectly + // TODO: consider adding a way for a receiver to asynchronously signal that is ready to receive spans to eliminate races/arbitrary waits + l, err := net.Listen("tcp", "localhost:14250") + assert.NoError(t, err, "should have been able to listen on 14250. jaeger receiver incorrectly started grpc") + if l != nil { + l.Close() + } + + l, err = net.Listen("tcp", "localhost:14268") + assert.NoError(t, err, "should have been able to listen on 14268. jaeger receiver incorrectly started thrift_http") + if l != nil { + l.Close() + } +} + +func TestGRPCReception(t *testing.T) { + // prepare + config := &configuration{ + CollectorGRPCPort: 14250, // that's the only one used by this test + } + sink := new(consumertest.TracesSink) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerReceiver, config, sink, params) + defer jr.Shutdown(context.Background()) + + require.NoError(t, jr.Start(context.Background(), componenttest.NewNopHost())) + + conn, err := grpc.Dial(fmt.Sprintf("0.0.0.0:%d", config.CollectorGRPCPort), grpc.WithInsecure()) + require.NoError(t, err) + defer conn.Close() + + cl := api_v2.NewCollectorServiceClient(conn) + + now := time.Unix(1542158650, 536343000).UTC() + d10min := 10 * time.Minute + d2sec := 2 * time.Second + nowPlus10min := now.Add(d10min) + nowPlus10min2sec := now.Add(d10min).Add(d2sec) + + // test + req := grpcFixture(now, d10min, d2sec) + resp, err := cl.PostSpans(context.Background(), req, grpc.WaitForReady(true)) + + // verify + assert.NoError(t, err, "should not have failed to post spans") + assert.NotNil(t, resp, "response should not have been nil") + + gotTraces := sink.AllTraces() + assert.Equal(t, 1, len(gotTraces)) + want := expectedTraceData(now, nowPlus10min, nowPlus10min2sec) + + assert.Len(t, req.Batch.Spans, want.SpanCount(), "got a conflicting amount of spans") + + assert.EqualValues(t, want, gotTraces[0]) +} + +func TestGRPCReceptionWithTLS(t *testing.T) { + // prepare + var grpcServerOptions []grpc.ServerOption + tlsCreds := configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: path.Join(".", "testdata", "server.crt"), + KeyFile: path.Join(".", "testdata", "server.key"), + }, + } + + tlsCfg, err := tlsCreds.LoadTLSConfig() + assert.NoError(t, err) + grpcServerOptions = append(grpcServerOptions, grpc.Creds(credentials.NewTLS(tlsCfg))) + + port := testutil.GetAvailablePort(t) + config := &configuration{ + CollectorGRPCPort: int(port), + CollectorGRPCOptions: grpcServerOptions, + } + sink := new(consumertest.TracesSink) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerReceiver, config, sink, params) + defer jr.Shutdown(context.Background()) + + require.NoError(t, jr.Start(context.Background(), componenttest.NewNopHost())) + + creds, err := credentials.NewClientTLSFromFile(path.Join(".", "testdata", "server.crt"), "localhost") + require.NoError(t, err) + conn, err := grpc.Dial(jr.collectorGRPCAddr(), grpc.WithTransportCredentials(creds)) + require.NoError(t, err) + defer conn.Close() + + cl := api_v2.NewCollectorServiceClient(conn) + + now := time.Now() + d10min := 10 * time.Minute + d2sec := 2 * time.Second + nowPlus10min := now.Add(d10min) + nowPlus10min2sec := now.Add(d10min).Add(d2sec) + + // test + req := grpcFixture(now, d10min, d2sec) + resp, err := cl.PostSpans(context.Background(), req, grpc.WaitForReady(true)) + + // verify + assert.NoError(t, err, "should not have failed to post spans") + assert.NotNil(t, resp, "response should not have been nil") + + gotTraces := sink.AllTraces() + assert.Equal(t, 1, len(gotTraces)) + want := expectedTraceData(now, nowPlus10min, nowPlus10min2sec) + + assert.Len(t, req.Batch.Spans, want.SpanCount(), "got a conflicting amount of spans") + assert.EqualValues(t, want, gotTraces[0]) +} + +func expectedTraceData(t1, t2, t3 time.Time) pdata.Traces { + traceID := pdata.NewTraceID( + [16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80}) + parentSpanID := pdata.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18}) + childSpanID := pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8}) + + traces := pdata.NewTraces() + traces.ResourceSpans().Resize(1) + rs := traces.ResourceSpans().At(0) + rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, "issaTest") + rs.Resource().Attributes().InsertBool("bool", true) + rs.Resource().Attributes().InsertString("string", "yes") + rs.Resource().Attributes().InsertInt("int64", 10000000) + rs.InstrumentationLibrarySpans().Resize(1) + rs.InstrumentationLibrarySpans().At(0).Spans().Resize(2) + + span0 := rs.InstrumentationLibrarySpans().At(0).Spans().At(0) + span0.SetSpanID(childSpanID) + span0.SetParentSpanID(parentSpanID) + span0.SetTraceID(traceID) + span0.SetName("DBSearch") + span0.SetStartTime(pdata.TimestampUnixNano(uint64(t1.UnixNano()))) + span0.SetEndTime(pdata.TimestampUnixNano(uint64(t2.UnixNano()))) + span0.Status().SetCode(pdata.StatusCodeError) + span0.Status().SetMessage("Stale indices") + + span1 := rs.InstrumentationLibrarySpans().At(0).Spans().At(1) + span1.SetSpanID(parentSpanID) + span1.SetTraceID(traceID) + span1.SetName("ProxyFetch") + span1.SetStartTime(pdata.TimestampUnixNano(uint64(t2.UnixNano()))) + span1.SetEndTime(pdata.TimestampUnixNano(uint64(t3.UnixNano()))) + span1.Status().SetCode(pdata.StatusCodeError) + span1.Status().SetMessage("Frontend crash") + + return traces +} + +func grpcFixture(t1 time.Time, d1, d2 time.Duration) *api_v2.PostSpansRequest { + traceID := model.TraceID{} + traceID.Unmarshal([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80}) + parentSpanID := model.NewSpanID(binary.BigEndian.Uint64([]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})) + childSpanID := model.NewSpanID(binary.BigEndian.Uint64([]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) + + return &api_v2.PostSpansRequest{ + Batch: model.Batch{ + Process: &model.Process{ + ServiceName: "issaTest", + Tags: []model.KeyValue{ + model.Bool("bool", true), + model.String("string", "yes"), + model.Int64("int64", 1e7), + }, + }, + Spans: []*model.Span{ + { + TraceID: traceID, + SpanID: childSpanID, + OperationName: "DBSearch", + StartTime: t1, + Duration: d1, + Tags: []model.KeyValue{ + model.String(tracetranslator.TagStatusMsg, "Stale indices"), + model.Int64(tracetranslator.TagStatusCode, int64(pdata.StatusCodeError)), + model.Bool("error", true), + }, + References: []model.SpanRef{ + { + TraceID: traceID, + SpanID: parentSpanID, + RefType: model.SpanRefType_CHILD_OF, + }, + }, + }, + { + TraceID: traceID, + SpanID: parentSpanID, + OperationName: "ProxyFetch", + StartTime: t1.Add(d1), + Duration: d2, + Tags: []model.KeyValue{ + model.String(tracetranslator.TagStatusMsg, "Frontend crash"), + model.Int64(tracetranslator.TagStatusCode, int64(pdata.StatusCodeError)), + model.Bool("error", true), + }, + }, + }, + }, + } +} + +func TestSampling(t *testing.T) { + port := testutil.GetAvailablePort(t) + config := &configuration{ + CollectorGRPCPort: int(port), + RemoteSamplingStrategyFile: "testdata/strategies.json", + } + sink := new(consumertest.TracesSink) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerReceiver, config, sink, params) + defer jr.Shutdown(context.Background()) + + require.NoError(t, jr.Start(context.Background(), componenttest.NewNopHost())) + t.Log("Start") + + conn, err := grpc.Dial(fmt.Sprintf("localhost:%d", config.CollectorGRPCPort), grpc.WithInsecure()) + assert.NoError(t, err) + defer conn.Close() + + cl := api_v2.NewSamplingManagerClient(conn) + + expected := &api_v2.SamplingStrategyResponse{ + StrategyType: api_v2.SamplingStrategyType_PROBABILISTIC, + ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ + SamplingRate: 0.8, + }, + OperationSampling: &api_v2.PerOperationSamplingStrategies{ + DefaultSamplingProbability: 0.8, + PerOperationStrategies: []*api_v2.OperationSamplingStrategy{ + { + Operation: "op1", + ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ + SamplingRate: 0.2, + }, + }, + { + Operation: "op2", + ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ + SamplingRate: 0.4, + }, + }, + }, + }, + } + + resp, err := cl.GetSamplingStrategy(context.Background(), &api_v2.SamplingStrategyParameters{ + ServiceName: "foo", + }) + assert.NoError(t, err) + assert.Equal(t, expected, resp) +} + +func TestSamplingFailsOnNotConfigured(t *testing.T) { + port := testutil.GetAvailablePort(t) + // prepare + config := &configuration{ + CollectorGRPCPort: int(port), + } + sink := new(consumertest.TracesSink) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerReceiver, config, sink, params) + defer jr.Shutdown(context.Background()) + + require.NoError(t, jr.Start(context.Background(), componenttest.NewNopHost())) + t.Log("Start") + + conn, err := grpc.Dial(fmt.Sprintf("localhost:%d", config.CollectorGRPCPort), grpc.WithInsecure()) + assert.NoError(t, err) + defer conn.Close() + + cl := api_v2.NewSamplingManagerClient(conn) + + response, err := cl.GetSamplingStrategy(context.Background(), &api_v2.SamplingStrategyParameters{ + ServiceName: "nothing", + }) + require.NoError(t, err) + assert.Equal(t, 0.001, response.GetProbabilisticSampling().GetSamplingRate()) +} + +func TestSamplingFailsOnBadFile(t *testing.T) { + port := testutil.GetAvailablePort(t) + // prepare + config := &configuration{ + CollectorGRPCPort: int(port), + RemoteSamplingStrategyFile: "does-not-exist", + } + sink := new(consumertest.TracesSink) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr := newJaegerReceiver(jaegerReceiver, config, sink, params) + defer jr.Shutdown(context.Background()) + assert.Error(t, jr.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestSamplingStrategiesMutualTLS(t *testing.T) { + caPath := path.Join(".", "testdata", "ca.crt") + serverCertPath := path.Join(".", "testdata", "server.crt") + serverKeyPath := path.Join(".", "testdata", "server.key") + clientCertPath := path.Join(".", "testdata", "client.crt") + clientKeyPath := path.Join(".", "testdata", "client.key") + + // start gRPC server that serves sampling strategies + tlsCfgOpts := configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: caPath, + CertFile: serverCertPath, + KeyFile: serverKeyPath, + }, + } + tlsCfg, err := tlsCfgOpts.LoadTLSConfig() + require.NoError(t, err) + server, serverAddr := initializeGRPCTestServer(t, func(s *grpc.Server) { + ss, serr := staticStrategyStore.NewStrategyStore(staticStrategyStore.Options{ + StrategiesFile: path.Join(".", "testdata", "strategies.json"), + }, zap.NewNop()) + require.NoError(t, serr) + api_v2.RegisterSamplingManagerServer(s, collectorSampling.NewGRPCHandler(ss)) + }, grpc.Creds(credentials.NewTLS(tlsCfg))) + defer server.GracefulStop() + + // Create sampling strategies receiver + port := testutil.GetAvailablePort(t) + require.NoError(t, err) + hostEndpoint := fmt.Sprintf("localhost:%d", port) + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.RemoteSampling = &RemoteSamplingConfig{ + GRPCClientSettings: configgrpc.GRPCClientSettings{ + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: caPath, + CertFile: clientCertPath, + KeyFile: clientKeyPath, + }, + Insecure: false, + ServerName: "localhost", + }, + Endpoint: serverAddr.String(), + }, + HostEndpoint: hostEndpoint, + } + // at least one protocol has to be enabled + thriftHTTPPort := testutil.GetAvailablePort(t) + require.NoError(t, err) + cfg.Protocols.ThriftHTTP = &confighttp.HTTPServerSettings{ + Endpoint: fmt.Sprintf("localhost:%d", thriftHTTPPort), + } + exp, err := factory.CreateTracesReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, consumertest.NewTracesNop()) + require.NoError(t, err) + host := &componenttest.ErrorWaitingHost{} + err = exp.Start(context.Background(), host) + require.NoError(t, err) + defer exp.Shutdown(context.Background()) + _, err = host.WaitForFatalError(200 * time.Millisecond) + require.NoError(t, err) + + resp, err := http.Get(fmt.Sprintf("http://%s?service=bar", hostEndpoint)) + require.NoError(t, err) + bodyBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.Contains(t, "{\"strategyType\":1,\"rateLimitingSampling\":{\"maxTracesPerSecond\":5}}", string(bodyBytes)) +} + +func TestConsumeThriftTrace(t *testing.T) { + tests := []struct { + batch *jaegerthrift.Batch + numSpans int + }{ + { + batch: nil, + }, + { + batch: &jaegerthrift.Batch{Spans: []*jaegerthrift.Span{{}}}, + numSpans: 1, + }, + } + for _, test := range tests { + numSpans, err := consumeTraces(context.Background(), test.batch, consumertest.NewTracesNop()) + require.NoError(t, err) + assert.Equal(t, test.numSpans, numSpans) + } +} + +func sendToCollector(endpoint string, batch *jaegerthrift.Batch) error { + buf, err := thrift.NewTSerializer().Write(context.Background(), batch) + if err != nil { + return err + } + req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(buf)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/x-thrift") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode) + } + return nil +} diff --git a/internal/otel_collector/receiver/kafkareceiver/README.md b/internal/otel_collector/receiver/kafkareceiver/README.md new file mode 100644 index 00000000000..f371ce08df3 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/README.md @@ -0,0 +1,63 @@ +# Kafka Receiver + +Kafka receiver receives traces from Kafka. Message payload encoding is configurable. + +Supported pipeline types: traces + +## Getting Started + +The following settings are required: + +- `protocol_version` (no default): Kafka protocol version e.g. 2.0.0 + +The following settings can be optionally configured: + +- `brokers` (default = localhost:9092): The list of kafka brokers +- `topic` (default = otlp_spans): The name of the kafka topic to export to +- `encoding` (default = otlp_proto): The encoding of the payload sent to kafka. Available encodings: + - `otlp_proto`: the payload is deserialized to `ExportTraceServiceRequest`. + - `jaeger_proto`: the payload is deserialized to a single Jaeger proto `Span`. + - `jaeger_json`: the payload is deserialized to a single Jaeger JSON Span using `jsonpb`. + - `zipkin_proto`: the payload is deserialized into a list of Zipkin proto spans. + - `zipkin_json`: the payload is deserialized into a list of Zipkin V2 JSON spans. + - `zipkin_thrift`: the payload is deserialized into a list of Zipkin Thrift spans. +- `group_id` (default = otel-collector): The consumer group that receiver will be consuming messages from +- `client_id` (default = otel-collector): The consumer client ID that receiver will use +- `auth` + - `plain_text` + - `username`: The username to use. + - `password`: The password to use + - `tls` + - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should + only be used if `insecure` is set to true. + - `cert_file`: path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to true. + - `key_file`: path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to true. + - `insecure` (default = false): Disable verifying the server's certificate + chain and host name (`InsecureSkipVerify` in the tls config) + - `server_name_override`: ServerName indicates the name of the server requested by the client + in order to support virtual hosting. + - `kerberos` + - `service_name`: Kerberos service name + - `realm`: Kerberos realm + - `use_keytab`: Use of keytab instead of password, if this is true, keytab file will be used instead of password + - `username`: The Kerberos username used for authenticate with KDC + - `password`: The Kerberos password used for authenticate with KDC + - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf + - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab +- `metadata` + - `full` (default = true): Whether to maintain a full set of metadata. When + disabled the client does not make the initial request to broker at the + startup. + - `retry` + - `max` (default = 3): The number of retries to get metadata + - `backoff` (default = 250ms): How long to wait between metadata retries + +Example: + +```yaml +receivers: + kafka: + protocol_version: 2.0.0 +``` diff --git a/internal/otel_collector/receiver/kafkareceiver/config.go b/internal/otel_collector/receiver/kafkareceiver/config.go new file mode 100644 index 00000000000..fe03de1bb45 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/config.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/kafkaexporter" +) + +// Config defines configuration for Kafka receiver. +type Config struct { + configmodels.ReceiverSettings `mapstructure:",squash"` + // The list of kafka brokers (default localhost:9092) + Brokers []string `mapstructure:"brokers"` + // Kafka protocol version + ProtocolVersion string `mapstructure:"protocol_version"` + // The name of the kafka topic to consume from (default "otlp_spans") + Topic string `mapstructure:"topic"` + // Encoding of the messages (default "otlp_proto") + Encoding string `mapstructure:"encoding"` + // The consumer group that receiver will be consuming messages from (default "otel-collector") + GroupID string `mapstructure:"group_id"` + // The consumer client ID that receiver will use (default "otel-collector") + ClientID string `mapstructure:"client_id"` + + // Metadata is the namespace for metadata management properties used by the + // Client, and shared by the Producer/Consumer. + Metadata kafkaexporter.Metadata `mapstructure:"metadata"` + + Authentication kafkaexporter.Authentication `mapstructure:"auth"` +} diff --git a/internal/otel_collector/receiver/kafkareceiver/config_test.go b/internal/otel_collector/receiver/kafkareceiver/config_test.go new file mode 100644 index 00000000000..7e044ef42f2 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/config_test.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/exporter/kafkaexporter" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + require.NoError(t, err) + require.Equal(t, 1, len(cfg.Receivers)) + + r := cfg.Receivers[typeStr].(*Config) + assert.Equal(t, &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: typeStr, + TypeVal: typeStr, + }, + Topic: "spans", + Encoding: "otlp_proto", + Brokers: []string{"foo:123", "bar:456"}, + ClientID: "otel-collector", + GroupID: "otel-collector", + Authentication: kafkaexporter.Authentication{ + TLS: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "ca.pem", + CertFile: "cert.pem", + KeyFile: "key.pem", + }, + }, + }, + Metadata: kafkaexporter.Metadata{ + Full: true, + Retry: kafkaexporter.MetadataRetry{ + Max: 10, + Backoff: time.Second * 5, + }, + }, + }, r) +} diff --git a/internal/otel_collector/receiver/kafkareceiver/factory.go b/internal/otel_collector/receiver/kafkareceiver/factory.go new file mode 100644 index 00000000000..f6cd6ee65ad --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/factory.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "context" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter/kafkaexporter" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +const ( + typeStr = "kafka" + defaultTopic = "otlp_spans" + defaultEncoding = "otlp_proto" + defaultBroker = "localhost:9092" + defaultClientID = "otel-collector" + defaultGroupID = defaultClientID + + // default from sarama.NewConfig() + defaultMetadataRetryMax = 3 + // default from sarama.NewConfig() + defaultMetadataRetryBackoff = time.Millisecond * 250 + // default from sarama.NewConfig() + defaultMetadataFull = true +) + +// FactoryOption applies changes to kafkaExporterFactory. +type FactoryOption func(factory *kafkaReceiverFactory) + +// WithAddUnmarshallers adds marshallers. +func WithAddUnmarshallers(encodingMarshaller map[string]Unmarshaller) FactoryOption { + return func(factory *kafkaReceiverFactory) { + for encoding, unmarshaller := range encodingMarshaller { + factory.unmarshalers[encoding] = unmarshaller + } + } +} + +// NewFactory creates Kafka receiver factory. +func NewFactory(options ...FactoryOption) component.ReceiverFactory { + f := &kafkaReceiverFactory{ + unmarshalers: defaultUnmarshallers(), + } + for _, o := range options { + o(f) + } + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithTraces(f.createTraceReceiver)) +} + +func createDefaultConfig() configmodels.Receiver { + return &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Topic: defaultTopic, + Encoding: defaultEncoding, + Brokers: []string{defaultBroker}, + ClientID: defaultClientID, + GroupID: defaultGroupID, + Metadata: kafkaexporter.Metadata{ + Full: defaultMetadataFull, + Retry: kafkaexporter.MetadataRetry{ + Max: defaultMetadataRetryMax, + Backoff: defaultMetadataRetryBackoff, + }, + }, + } +} + +type kafkaReceiverFactory struct { + unmarshalers map[string]Unmarshaller +} + +func (f *kafkaReceiverFactory) createTraceReceiver( + _ context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.TracesConsumer, +) (component.TracesReceiver, error) { + c := cfg.(*Config) + r, err := newReceiver(*c, params, f.unmarshalers, nextConsumer) + if err != nil { + return nil, err + } + return r, nil +} diff --git a/internal/otel_collector/receiver/kafkareceiver/factory_test.go b/internal/otel_collector/receiver/kafkareceiver/factory_test.go new file mode 100644 index 00000000000..4417776a9ef --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/factory_test.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig().(*Config) + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) + assert.Equal(t, []string{defaultBroker}, cfg.Brokers) + assert.Equal(t, defaultTopic, cfg.Topic) + assert.Equal(t, defaultGroupID, cfg.GroupID) + assert.Equal(t, defaultClientID, cfg.ClientID) +} + +func TestCreateTraceReceiver(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Brokers = []string{"invalid:9092"} + cfg.ProtocolVersion = "2.0.0" + f := kafkaReceiverFactory{unmarshalers: defaultUnmarshallers()} + r, err := f.createTraceReceiver(context.Background(), component.ReceiverCreateParams{}, cfg, nil) + // no available broker + require.Error(t, err) + assert.Nil(t, r) +} + +func TestCreateTraceReceiver_error(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.ProtocolVersion = "2.0.0" + // disable contacting broker at startup + cfg.Metadata.Full = false + f := kafkaReceiverFactory{unmarshalers: defaultUnmarshallers()} + r, err := f.createTraceReceiver(context.Background(), component.ReceiverCreateParams{}, cfg, nil) + require.NoError(t, err) + assert.NotNil(t, r) +} + +func TestWithUnmarshallers(t *testing.T) { + unmarshaller := &customUnamarshaller{} + f := NewFactory(WithAddUnmarshallers(map[string]Unmarshaller{unmarshaller.Encoding(): unmarshaller})) + cfg := createDefaultConfig().(*Config) + // disable contacting broker + cfg.Metadata.Full = false + cfg.ProtocolVersion = "2.0.0" + + t.Run("custom_encoding", func(t *testing.T) { + cfg.Encoding = unmarshaller.Encoding() + exporter, err := f.CreateTracesReceiver(context.Background(), component.ReceiverCreateParams{}, cfg, nil) + require.NoError(t, err) + require.NotNil(t, exporter) + }) + t.Run("default_encoding", func(t *testing.T) { + cfg.Encoding = new(otlpProtoUnmarshaller).Encoding() + exporter, err := f.CreateTracesReceiver(context.Background(), component.ReceiverCreateParams{}, cfg, nil) + require.NoError(t, err) + assert.NotNil(t, exporter) + }) +} + +type customUnamarshaller struct { +} + +var _ Unmarshaller = (*customUnamarshaller)(nil) + +func (c customUnamarshaller) Unmarshal(bytes []byte) (pdata.Traces, error) { + panic("implement me") +} + +func (c customUnamarshaller) Encoding() string { + return "custom" +} diff --git a/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaller.go b/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaller.go new file mode 100644 index 00000000000..579f7f6e1d5 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaller.go @@ -0,0 +1,69 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "bytes" + + "github.com/gogo/protobuf/jsonpb" + jaegerproto "github.com/jaegertracing/jaeger/model" + + "go.opentelemetry.io/collector/consumer/pdata" + jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" +) + +type jaegerProtoSpanUnmarshaller struct { +} + +var _ Unmarshaller = (*jaegerProtoSpanUnmarshaller)(nil) + +func (j jaegerProtoSpanUnmarshaller) Unmarshal(bytes []byte) (pdata.Traces, error) { + span := &jaegerproto.Span{} + err := span.Unmarshal(bytes) + if err != nil { + return pdata.NewTraces(), err + } + return jaegerSpanToTraces(span), nil +} + +func (j jaegerProtoSpanUnmarshaller) Encoding() string { + return "jaeger_proto" +} + +type jaegerJSONSpanUnmarshaller struct { +} + +var _ Unmarshaller = (*jaegerJSONSpanUnmarshaller)(nil) + +func (j jaegerJSONSpanUnmarshaller) Unmarshal(data []byte) (pdata.Traces, error) { + span := &jaegerproto.Span{} + err := jsonpb.Unmarshal(bytes.NewReader(data), span) + if err != nil { + return pdata.NewTraces(), err + } + return jaegerSpanToTraces(span), nil +} + +func (j jaegerJSONSpanUnmarshaller) Encoding() string { + return "jaeger_json" +} + +func jaegerSpanToTraces(span *jaegerproto.Span) pdata.Traces { + batch := jaegerproto.Batch{ + Spans: []*jaegerproto.Span{span}, + Process: span.Process, + } + return jaegertranslator.ProtoBatchToInternalTraces(batch) +} diff --git a/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaller_test.go b/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaller_test.go new file mode 100644 index 00000000000..274651cacbc --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaller_test.go @@ -0,0 +1,87 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "bytes" + "testing" + + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" +) + +func TestUnmarshallJaeger(t *testing.T) { + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetName("foo") + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetStartTime(pdata.TimestampUnixNano(10)) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetEndTime(pdata.TimestampUnixNano(20)) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + batches, err := jaegertranslator.InternalTracesToJaegerProto(td) + require.NoError(t, err) + + protoBytes, err := batches[0].Spans[0].Marshal() + require.NoError(t, err) + + jsonMarshaller := &jsonpb.Marshaler{} + jsonBytes := new(bytes.Buffer) + jsonMarshaller.Marshal(jsonBytes, batches[0].Spans[0]) + + tests := []struct { + unmarshaller Unmarshaller + encoding string + bytes []byte + }{ + { + unmarshaller: jaegerProtoSpanUnmarshaller{}, + encoding: "jaeger_proto", + bytes: protoBytes, + }, + { + unmarshaller: jaegerJSONSpanUnmarshaller{}, + encoding: "jaeger_json", + bytes: jsonBytes.Bytes(), + }, + } + for _, test := range tests { + t.Run(test.encoding, func(t *testing.T) { + got, err := test.unmarshaller.Unmarshal(test.bytes) + require.NoError(t, err) + assert.Equal(t, td, got) + assert.Equal(t, test.encoding, test.unmarshaller.Encoding()) + }) + } +} + +func TestUnmarshallJaegerProto_error(t *testing.T) { + p := jaegerProtoSpanUnmarshaller{} + got, err := p.Unmarshal([]byte("+$%")) + assert.Equal(t, pdata.NewTraces(), got) + assert.Error(t, err) +} + +func TestUnmarshallJaegerJSON_error(t *testing.T) { + p := jaegerJSONSpanUnmarshaller{} + got, err := p.Unmarshal([]byte("+$%")) + assert.Equal(t, pdata.NewTraces(), got) + assert.Error(t, err) +} diff --git a/internal/otel_collector/receiver/kafkareceiver/kafka_receiver.go b/internal/otel_collector/receiver/kafkareceiver/kafka_receiver.go new file mode 100644 index 00000000000..8703305c7ae --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/kafka_receiver.go @@ -0,0 +1,181 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "context" + "fmt" + "sync" + + "github.com/Shopify/sarama" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter/kafkaexporter" + "go.opentelemetry.io/collector/obsreport" +) + +const ( + transport = "kafka" +) + +var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") + +// kafkaConsumer uses sarama to consume and handle messages from kafka. +type kafkaConsumer struct { + name string + consumerGroup sarama.ConsumerGroup + nextConsumer consumer.TracesConsumer + topics []string + cancelConsumeLoop context.CancelFunc + unmarshaller Unmarshaller + + logger *zap.Logger +} + +var _ component.Receiver = (*kafkaConsumer)(nil) + +func newReceiver(config Config, params component.ReceiverCreateParams, unmarshalers map[string]Unmarshaller, nextConsumer consumer.TracesConsumer) (*kafkaConsumer, error) { + unmarshaller := unmarshalers[config.Encoding] + if unmarshaller == nil { + return nil, errUnrecognizedEncoding + } + + c := sarama.NewConfig() + c.ClientID = config.ClientID + c.Metadata.Full = config.Metadata.Full + c.Metadata.Retry.Max = config.Metadata.Retry.Max + c.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff + if config.ProtocolVersion != "" { + version, err := sarama.ParseKafkaVersion(config.ProtocolVersion) + if err != nil { + return nil, err + } + c.Version = version + } + if err := kafkaexporter.ConfigureAuthentication(config.Authentication, c); err != nil { + return nil, err + } + client, err := sarama.NewConsumerGroup(config.Brokers, config.GroupID, c) + if err != nil { + return nil, err + } + return &kafkaConsumer{ + name: config.Name(), + consumerGroup: client, + topics: []string{config.Topic}, + nextConsumer: nextConsumer, + unmarshaller: unmarshaller, + logger: params.Logger, + }, nil +} + +func (c *kafkaConsumer) Start(context.Context, component.Host) error { + ctx, cancel := context.WithCancel(context.Background()) + c.cancelConsumeLoop = cancel + consumerGroup := &consumerGroupHandler{ + name: c.name, + logger: c.logger, + unmarshaller: c.unmarshaller, + nextConsumer: c.nextConsumer, + ready: make(chan bool), + } + go c.consumeLoop(ctx, consumerGroup) + <-consumerGroup.ready + return nil +} + +func (c *kafkaConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) error { + for { + // `Consume` should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims + if err := c.consumerGroup.Consume(ctx, c.topics, handler); err != nil { + c.logger.Error("Error from consumer", zap.Error(err)) + } + // check if context was cancelled, signaling that the consumer should stop + if ctx.Err() != nil { + c.logger.Info("Consumer stopped", zap.Error(ctx.Err())) + return ctx.Err() + } + } +} + +func (c *kafkaConsumer) Shutdown(context.Context) error { + c.cancelConsumeLoop() + return c.consumerGroup.Close() +} + +type consumerGroupHandler struct { + name string + unmarshaller Unmarshaller + nextConsumer consumer.TracesConsumer + ready chan bool + readyCloser sync.Once + + logger *zap.Logger +} + +var _ sarama.ConsumerGroupHandler = (*consumerGroupHandler)(nil) + +func (c *consumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error { + c.readyCloser.Do(func() { + close(c.ready) + }) + statsTags := []tag.Mutator{tag.Insert(tagInstanceName, c.name)} + _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionStart.M(1)) + return nil +} + +func (c *consumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { + statsTags := []tag.Mutator{tag.Insert(tagInstanceName, c.name)} + _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionClose.M(1)) + return nil +} + +func (c *consumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + c.logger.Info("Starting consumer group", zap.Int32("partition", claim.Partition())) + for message := range claim.Messages() { + c.logger.Debug("Kafka message claimed", + zap.String("value", string(message.Value)), + zap.Time("timestamp", message.Timestamp), + zap.String("topic", message.Topic)) + session.MarkMessage(message, "") + + ctx := obsreport.ReceiverContext(session.Context(), c.name, transport) + ctx = obsreport.StartTraceDataReceiveOp(ctx, c.name, transport) + statsTags := []tag.Mutator{tag.Insert(tagInstanceName, c.name)} + _ = stats.RecordWithTags(ctx, statsTags, + statMessageCount.M(1), + statMessageOffset.M(message.Offset), + statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + + traces, err := c.unmarshaller.Unmarshal(message.Value) + if err != nil { + c.logger.Error("failed to unmarshall message", zap.Error(err)) + return err + } + + err = c.nextConsumer.ConsumeTraces(session.Context(), traces) + obsreport.EndTraceDataReceiveOp(ctx, c.unmarshaller.Encoding(), traces.SpanCount(), err) + if err != nil { + return err + } + } + return nil +} diff --git a/internal/otel_collector/receiver/kafkareceiver/kafka_receiver_test.go b/internal/otel_collector/receiver/kafkareceiver/kafka_receiver_test.go new file mode 100644 index 00000000000..f865970186e --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/kafka_receiver_test.go @@ -0,0 +1,334 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/kafkaexporter" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" +) + +func TestNewReceiver_version_err(t *testing.T) { + c := Config{ + Encoding: defaultEncoding, + ProtocolVersion: "none", + } + r, err := newReceiver(c, component.ReceiverCreateParams{}, defaultUnmarshallers(), consumertest.NewTracesNop()) + assert.Error(t, err) + assert.Nil(t, r) +} + +func TestNewReceiver_encoding_err(t *testing.T) { + c := Config{ + Encoding: "foo", + } + r, err := newReceiver(c, component.ReceiverCreateParams{}, defaultUnmarshallers(), consumertest.NewTracesNop()) + require.Error(t, err) + assert.Nil(t, r) + assert.EqualError(t, err, errUnrecognizedEncoding.Error()) +} + +func TestNewExporter_err_auth_type(t *testing.T) { + c := Config{ + ProtocolVersion: "2.0.0", + Authentication: kafkaexporter.Authentication{ + TLS: &configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/doesnotexist", + }, + }, + }, + Encoding: defaultEncoding, + Metadata: kafkaexporter.Metadata{ + Full: false, + }, + } + r, err := newReceiver(c, component.ReceiverCreateParams{}, defaultUnmarshallers(), consumertest.NewTracesNop()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to load TLS config") + assert.Nil(t, r) +} + +func TestReceiverStart(t *testing.T) { + testClient := testConsumerGroup{once: &sync.Once{}} + c := kafkaConsumer{ + nextConsumer: consumertest.NewTracesNop(), + logger: zap.NewNop(), + consumerGroup: testClient, + } + + err := c.Start(context.Background(), nil) + require.NoError(t, err) + c.Shutdown(context.Background()) +} + +func TestReceiverStartConsume(t *testing.T) { + testClient := testConsumerGroup{once: &sync.Once{}} + c := kafkaConsumer{ + nextConsumer: consumertest.NewTracesNop(), + logger: zap.NewNop(), + consumerGroup: testClient, + } + ctx, cancelFunc := context.WithCancel(context.Background()) + c.cancelConsumeLoop = cancelFunc + c.Shutdown(context.Background()) + err := c.consumeLoop(ctx, &consumerGroupHandler{ + ready: make(chan bool), + }) + assert.EqualError(t, err, context.Canceled.Error()) +} + +func TestReceiver_error(t *testing.T) { + zcore, logObserver := observer.New(zapcore.ErrorLevel) + logger := zap.New(zcore) + + expectedErr := fmt.Errorf("handler error") + testClient := testConsumerGroup{once: &sync.Once{}, err: expectedErr} + c := kafkaConsumer{ + nextConsumer: consumertest.NewTracesNop(), + logger: logger, + consumerGroup: testClient, + } + + err := c.Start(context.Background(), nil) + require.NoError(t, err) + c.Shutdown(context.Background()) + waitUntil(func() bool { + return logObserver.FilterField(zap.Error(expectedErr)).Len() > 0 + }, 100, time.Millisecond*100) + assert.True(t, logObserver.FilterField(zap.Error(expectedErr)).Len() > 0) +} + +func TestConsumerGroupHandler(t *testing.T) { + views := MetricViews() + view.Register(views...) + defer view.Unregister(views...) + + c := consumerGroupHandler{ + unmarshaller: &otlpProtoUnmarshaller{}, + logger: zap.NewNop(), + ready: make(chan bool), + nextConsumer: consumertest.NewTracesNop(), + } + + testSession := testConsumerGroupSession{} + err := c.Setup(testSession) + require.NoError(t, err) + _, ok := <-c.ready + assert.False(t, ok) + viewData, err := view.RetrieveData(statPartitionStart.Name()) + require.NoError(t, err) + assert.Equal(t, 1, len(viewData)) + distData := viewData[0].Data.(*view.SumData) + assert.Equal(t, float64(1), distData.Value) + + err = c.Cleanup(testSession) + require.NoError(t, err) + viewData, err = view.RetrieveData(statPartitionClose.Name()) + require.NoError(t, err) + assert.Equal(t, 1, len(viewData)) + distData = viewData[0].Data.(*view.SumData) + assert.Equal(t, float64(1), distData.Value) + + groupClaim := testConsumerGroupClaim{ + messageChan: make(chan *sarama.ConsumerMessage), + } + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + err = c.ConsumeClaim(testSession, groupClaim) + require.NoError(t, err) + wg.Done() + }() + + groupClaim.messageChan <- &sarama.ConsumerMessage{} + close(groupClaim.messageChan) + wg.Wait() +} + +func TestConsumerGroupHandler_error_unmarshall(t *testing.T) { + c := consumerGroupHandler{ + unmarshaller: &otlpProtoUnmarshaller{}, + logger: zap.NewNop(), + ready: make(chan bool), + nextConsumer: consumertest.NewTracesNop(), + } + + wg := sync.WaitGroup{} + wg.Add(1) + groupClaim := &testConsumerGroupClaim{ + messageChan: make(chan *sarama.ConsumerMessage), + } + go func() { + err := c.ConsumeClaim(testConsumerGroupSession{}, groupClaim) + require.Error(t, err) + wg.Done() + }() + groupClaim.messageChan <- &sarama.ConsumerMessage{Value: []byte("!@#")} + close(groupClaim.messageChan) + wg.Wait() +} + +func TestConsumerGroupHandler_error_nextConsumer(t *testing.T) { + nextConsumer := new(consumertest.TracesSink) + consumerError := fmt.Errorf("failed to consumer") + nextConsumer.SetConsumeError(consumerError) + c := consumerGroupHandler{ + unmarshaller: &otlpProtoUnmarshaller{}, + logger: zap.NewNop(), + ready: make(chan bool), + nextConsumer: nextConsumer, + } + + wg := sync.WaitGroup{} + wg.Add(1) + groupClaim := &testConsumerGroupClaim{ + messageChan: make(chan *sarama.ConsumerMessage), + } + go func() { + e := c.ConsumeClaim(testConsumerGroupSession{}, groupClaim) + assert.EqualError(t, e, consumerError.Error()) + wg.Done() + }() + + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + request := &otlptrace.ExportTraceServiceRequest{ + ResourceSpans: pdata.TracesToOtlp(td), + } + bts, err := request.Marshal() + require.NoError(t, err) + groupClaim.messageChan <- &sarama.ConsumerMessage{Value: bts} + close(groupClaim.messageChan) + wg.Wait() +} + +type testConsumerGroupClaim struct { + messageChan chan *sarama.ConsumerMessage +} + +var _ sarama.ConsumerGroupClaim = (*testConsumerGroupClaim)(nil) + +const ( + testTopic = "otlp_spans" + testPartition = 5 + testInitialOffset = 6 + testHighWatermarkOffset = 4 +) + +func (t testConsumerGroupClaim) Topic() string { + return testTopic +} + +func (t testConsumerGroupClaim) Partition() int32 { + return testPartition +} + +func (t testConsumerGroupClaim) InitialOffset() int64 { + return testInitialOffset +} + +func (t testConsumerGroupClaim) HighWaterMarkOffset() int64 { + return testHighWatermarkOffset +} + +func (t testConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { + return t.messageChan +} + +type testConsumerGroupSession struct { +} + +func (t testConsumerGroupSession) Commit() { + panic("implement me") +} + +var _ sarama.ConsumerGroupSession = (*testConsumerGroupSession)(nil) + +func (t testConsumerGroupSession) Claims() map[string][]int32 { + panic("implement me") +} + +func (t testConsumerGroupSession) MemberID() string { + panic("implement me") +} + +func (t testConsumerGroupSession) GenerationID() int32 { + panic("implement me") +} + +func (t testConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("implement me") +} + +func (t testConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("implement me") +} + +func (t testConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { +} + +func (t testConsumerGroupSession) Context() context.Context { + return context.Background() +} + +type testConsumerGroup struct { + once *sync.Once + err error +} + +var _ sarama.ConsumerGroup = (*testConsumerGroup)(nil) + +func (t testConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error { + t.once.Do(func() { + handler.Setup(testConsumerGroupSession{}) + }) + return t.err +} + +func (t testConsumerGroup) Errors() <-chan error { + panic("implement me") +} + +func (t testConsumerGroup) Close() error { + return nil +} + +func waitUntil(f func() bool, iterations int, sleepInterval time.Duration) { + for i := 0; i < iterations; i++ { + if f() { + return + } + time.Sleep(sleepInterval) + } +} diff --git a/internal/otel_collector/receiver/kafkareceiver/metrics.go b/internal/otel_collector/receiver/kafkareceiver/metrics.go new file mode 100644 index 00000000000..41b0e960c8e --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/metrics.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + tagInstanceName, _ = tag.NewKey("name") + + statMessageCount = stats.Int64("kafka_receiver_messages", "Number of received messages", stats.UnitDimensionless) + statMessageOffset = stats.Int64("kafka_receiver_current_offset", "Current message offset", stats.UnitDimensionless) + statMessageOffsetLag = stats.Int64("kafka_receiver_offset_lag", "Current offset lag", stats.UnitDimensionless) + + statPartitionStart = stats.Int64("kafka_receiver_partition_start", "Number of started partitions", stats.UnitDimensionless) + statPartitionClose = stats.Int64("kafka_receiver_partition_close", "Number of finished partitions", stats.UnitDimensionless) +) + +// MetricViews return metric views for Kafka receiver. +func MetricViews() []*view.View { + tagKeys := []tag.Key{tagInstanceName} + + countMessages := &view.View{ + Name: statMessageCount.Name(), + Measure: statMessageCount, + Description: statMessageCount.Description(), + TagKeys: tagKeys, + Aggregation: view.Sum(), + } + + lastValueOffset := &view.View{ + Name: statMessageOffset.Name(), + Measure: statMessageOffset, + Description: statMessageOffset.Description(), + TagKeys: tagKeys, + Aggregation: view.LastValue(), + } + + lastValueOffsetLag := &view.View{ + Name: statMessageOffsetLag.Name(), + Measure: statMessageOffsetLag, + Description: statMessageOffsetLag.Description(), + TagKeys: tagKeys, + Aggregation: view.LastValue(), + } + + countPartitionStart := &view.View{ + Name: statPartitionStart.Name(), + Measure: statPartitionStart, + Description: statPartitionStart.Description(), + TagKeys: tagKeys, + Aggregation: view.Sum(), + } + + countPartitionClose := &view.View{ + Name: statPartitionClose.Name(), + Measure: statPartitionClose, + Description: statPartitionClose.Description(), + TagKeys: tagKeys, + Aggregation: view.Sum(), + } + + return []*view.View{ + countMessages, + lastValueOffset, + lastValueOffsetLag, + countPartitionStart, + countPartitionClose, + } +} diff --git a/internal/otel_collector/receiver/kafkareceiver/metrics_test.go b/internal/otel_collector/receiver/kafkareceiver/metrics_test.go new file mode 100644 index 00000000000..1d67d522cbc --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/metrics_test.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMetrics(t *testing.T) { + metricViews := MetricViews() + viewNames := []string{ + "kafka_receiver_messages", + "kafka_receiver_current_offset", + "kafka_receiver_offset_lag", + "kafka_receiver_partition_start", + "kafka_receiver_partition_close", + } + for i, viewName := range viewNames { + assert.Equal(t, viewName, metricViews[i].Name) + } +} diff --git a/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaller.go b/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaller.go new file mode 100644 index 00000000000..b73978606b2 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaller.go @@ -0,0 +1,38 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "go.opentelemetry.io/collector/consumer/pdata" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" +) + +type otlpProtoUnmarshaller struct { +} + +var _ Unmarshaller = (*otlpProtoUnmarshaller)(nil) + +func (p *otlpProtoUnmarshaller) Unmarshal(bytes []byte) (pdata.Traces, error) { + request := &otlptrace.ExportTraceServiceRequest{} + err := request.Unmarshal(bytes) + if err != nil { + return pdata.NewTraces(), err + } + return pdata.TracesFromOtlp(request.GetResourceSpans()), nil +} + +func (*otlpProtoUnmarshaller) Encoding() string { + return defaultEncoding +} diff --git a/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaller_test.go b/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaller_test.go new file mode 100644 index 00000000000..f52d898e425 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaller_test.go @@ -0,0 +1,49 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" +) + +func TestUnmarshallOTLP(t *testing.T) { + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + td.ResourceSpans().At(0).Resource().Attributes().InsertString("foo", "bar") + request := &otlptrace.ExportTraceServiceRequest{ + ResourceSpans: pdata.TracesToOtlp(td), + } + expected, err := request.Marshal() + require.NoError(t, err) + + p := otlpProtoUnmarshaller{} + got, err := p.Unmarshal(expected) + require.NoError(t, err) + assert.Equal(t, td, got) + assert.Equal(t, "otlp_proto", p.Encoding()) +} + +func TestUnmarshallOTLP_error(t *testing.T) { + p := otlpProtoUnmarshaller{} + got, err := p.Unmarshal([]byte("+$%")) + assert.Equal(t, pdata.NewTraces(), got) + assert.Error(t, err) +} diff --git a/internal/otel_collector/receiver/kafkareceiver/testdata/config.yaml b/internal/otel_collector/receiver/kafkareceiver/testdata/config.yaml new file mode 100644 index 00000000000..c170cf713a1 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/testdata/config.yaml @@ -0,0 +1,30 @@ +receivers: + kafka: + topic: spans + brokers: + - "foo:123" + - "bar:456" + client_id: otel-collector + group_id: otel-collector + auth: + tls: + ca_file: ca.pem + cert_file: cert.pem + key_file: key.pem + metadata: + retry: + max: 10 + backoff: 5s + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [kafka] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/kafkareceiver/unmarshaller.go b/internal/otel_collector/receiver/kafkareceiver/unmarshaller.go new file mode 100644 index 00000000000..5a2972607c2 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/unmarshaller.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +// Unmarshaller deserializes the message body. +type Unmarshaller interface { + // Unmarshal deserializes the message body into traces. + Unmarshal([]byte) (pdata.Traces, error) + + // Encoding of the serialized messages. + Encoding() string +} + +// defaultUnmarshallers returns map of supported encodings with Unmarshaller. +func defaultUnmarshallers() map[string]Unmarshaller { + otlp := &otlpProtoUnmarshaller{} + jaegerProto := jaegerProtoSpanUnmarshaller{} + jaegerJSON := jaegerJSONSpanUnmarshaller{} + zipkinProto := zipkinProtoSpanUnmarshaller{} + zipkinJSON := zipkinJSONSpanUnmarshaller{} + zipkinThrift := zipkinThriftSpanUnmarshaller{} + return map[string]Unmarshaller{ + otlp.Encoding(): otlp, + jaegerProto.Encoding(): jaegerProto, + jaegerJSON.Encoding(): jaegerJSON, + zipkinProto.Encoding(): zipkinProto, + zipkinJSON.Encoding(): zipkinJSON, + zipkinThrift.Encoding(): zipkinThrift, + } +} diff --git a/internal/otel_collector/receiver/kafkareceiver/unmarshaller_test.go b/internal/otel_collector/receiver/kafkareceiver/unmarshaller_test.go new file mode 100644 index 00000000000..952eb9ced22 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/unmarshaller_test.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDefaultUnMarshaller(t *testing.T) { + expectedEncodings := []string{ + "otlp_proto", + "jaeger_proto", + "jaeger_json", + "zipkin_proto", + "zipkin_json", + "zipkin_thrift", + } + marshallers := defaultUnmarshallers() + assert.Equal(t, len(expectedEncodings), len(marshallers)) + for _, e := range expectedEncodings { + t.Run(e, func(t *testing.T) { + m, ok := marshallers[e] + require.True(t, ok) + assert.NotNil(t, m) + }) + } +} diff --git a/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaller.go b/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaller.go new file mode 100644 index 00000000000..698cffa97f5 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaller.go @@ -0,0 +1,106 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "encoding/json" + + "github.com/apache/thrift/lib/go/thrift" + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + zipkinmodel "github.com/openzipkin/zipkin-go/model" + "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" + + "go.opentelemetry.io/collector/consumer/pdata" + zipkintranslator "go.opentelemetry.io/collector/translator/trace/zipkin" +) + +type zipkinProtoSpanUnmarshaller struct { +} + +var _ Unmarshaller = (*zipkinProtoSpanUnmarshaller)(nil) + +func (z zipkinProtoSpanUnmarshaller) Unmarshal(bytes []byte) (pdata.Traces, error) { + parseSpans, err := zipkin_proto3.ParseSpans(bytes, false) + if err != nil { + return pdata.NewTraces(), err + } + return zipkintranslator.V2SpansToInternalTraces(parseSpans, false) +} + +func (z zipkinProtoSpanUnmarshaller) Encoding() string { + return "zipkin_proto" +} + +type zipkinJSONSpanUnmarshaller struct { +} + +var _ Unmarshaller = (*zipkinJSONSpanUnmarshaller)(nil) + +func (z zipkinJSONSpanUnmarshaller) Unmarshal(bytes []byte) (pdata.Traces, error) { + var spans []*zipkinmodel.SpanModel + if err := json.Unmarshal(bytes, &spans); err != nil { + return pdata.NewTraces(), err + } + return zipkintranslator.V2SpansToInternalTraces(spans, false) +} + +func (z zipkinJSONSpanUnmarshaller) Encoding() string { + return "zipkin_json" +} + +type zipkinThriftSpanUnmarshaller struct { +} + +var _ Unmarshaller = (*zipkinThriftSpanUnmarshaller)(nil) + +func (z zipkinThriftSpanUnmarshaller) Unmarshal(bytes []byte) (pdata.Traces, error) { + spans, err := deserializeZipkinThrift(bytes) + if err != nil { + return pdata.NewTraces(), err + } + return zipkintranslator.V1ThriftBatchToInternalTraces(spans) + +} + +func (z zipkinThriftSpanUnmarshaller) Encoding() string { + return "zipkin_thrift" +} + +// deserializeThrift decodes Thrift bytes to a list of spans. +// This code comes from jaegertracing/jaeger, ideally we should have imported +// it but this was creating many conflicts so brought the code to here. +// https://github.com/jaegertracing/jaeger/blob/6bc0c122bfca8e737a747826ae60a22a306d7019/model/converter/thrift/zipkin/deserialize.go#L36 +func deserializeZipkinThrift(b []byte) ([]*zipkincore.Span, error) { + buffer := thrift.NewTMemoryBuffer() + buffer.Write(b) + + transport := thrift.NewTBinaryProtocolTransport(buffer) + _, size, err := transport.ReadListBegin() // Ignore the returned element type + if err != nil { + return nil, err + } + + // We don't depend on the size returned by ReadListBegin to preallocate the array because it + // sometimes returns a nil error on bad input and provides an unreasonably large int for size + var spans []*zipkincore.Span + for i := 0; i < size; i++ { + zs := &zipkincore.Span{} + if err = zs.Read(transport); err != nil { + return nil, err + } + spans = append(spans, zs) + } + return spans, nil +} diff --git a/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaller_test.go b/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaller_test.go new file mode 100644 index 00000000000..3a0251f8f4d --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaller_test.go @@ -0,0 +1,120 @@ +// Copyright 2020 The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "testing" + + "github.com/apache/thrift/lib/go/thrift" + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" + zipkinreporter "github.com/openzipkin/zipkin-go/reporter" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + zipkintranslator "go.opentelemetry.io/collector/translator/trace/zipkin" +) + +func TestUnmarshallZipkin(t *testing.T) { + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + td.ResourceSpans().At(0).Resource().Attributes().InitFromMap( + map[string]pdata.AttributeValue{conventions.AttributeServiceName: pdata.NewAttributeValueString("my_service")}) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().Resize(1) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetName("foo") + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetStartTime(pdata.TimestampUnixNano(1597759000)) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetEndTime(pdata.TimestampUnixNano(1597769000)) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).SetParentSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 0})) + spans, err := zipkintranslator.InternalTracesToZipkinSpans(td) + require.NoError(t, err) + + serializer := zipkinreporter.JSONSerializer{} + jsonBytes, err := serializer.Serialize(spans) + require.NoError(t, err) + + tSpan := &zipkincore.Span{Name: "foo"} + thriftTransport := thrift.NewTMemoryBuffer() + protocolTransport := thrift.NewTBinaryProtocolTransport(thriftTransport) + require.NoError(t, protocolTransport.WriteListBegin(thrift.STRUCT, 1)) + err = tSpan.Write(protocolTransport) + require.NoError(t, err) + require.NoError(t, protocolTransport.WriteListEnd()) + + tdThrift, err := zipkintranslator.V1ThriftBatchToInternalTraces([]*zipkincore.Span{tSpan}) + require.NoError(t, err) + + protoBytes, err := new(zipkin_proto3.SpanSerializer).Serialize(spans) + require.NoError(t, err) + + tests := []struct { + unmarshaller Unmarshaller + encoding string + bytes []byte + expected pdata.Traces + }{ + { + unmarshaller: zipkinProtoSpanUnmarshaller{}, + encoding: "zipkin_proto", + bytes: protoBytes, + expected: td, + }, + { + unmarshaller: zipkinJSONSpanUnmarshaller{}, + encoding: "zipkin_json", + bytes: jsonBytes, + expected: td, + }, + { + unmarshaller: zipkinThriftSpanUnmarshaller{}, + encoding: "zipkin_thrift", + bytes: thriftTransport.Buffer.Bytes(), + expected: tdThrift, + }, + } + for _, test := range tests { + t.Run(test.encoding, func(t *testing.T) { + traces, err := test.unmarshaller.Unmarshal(test.bytes) + require.NoError(t, err) + assert.Equal(t, test.expected, traces) + assert.Equal(t, test.encoding, test.unmarshaller.Encoding()) + }) + } +} + +func TestUnmarshallZipkinThrift_error(t *testing.T) { + p := zipkinThriftSpanUnmarshaller{} + got, err := p.Unmarshal([]byte("+$%")) + assert.Equal(t, pdata.NewTraces(), got) + assert.Error(t, err) +} + +func TestUnmarshallZipkinJSON_error(t *testing.T) { + p := zipkinJSONSpanUnmarshaller{} + got, err := p.Unmarshal([]byte("+$%")) + assert.Equal(t, pdata.NewTraces(), got) + assert.Error(t, err) +} + +func TestUnmarshallZipkinProto_error(t *testing.T) { + p := zipkinProtoSpanUnmarshaller{} + got, err := p.Unmarshal([]byte("+$%")) + assert.Equal(t, pdata.NewTraces(), got) + assert.Error(t, err) +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/README.md b/internal/otel_collector/receiver/opencensusreceiver/README.md new file mode 100644 index 00000000000..d119506cbc6 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/README.md @@ -0,0 +1,53 @@ +# OpenCensus Receiver + +Receives data via gRPC or HTTP using [OpenCensus]( https://opencensus.io/) +format. + +Supported pipeline types: traces, metrics + +## Getting Started + +All that is required to enable the OpenCensus receiver is to include it in the +receiver definitions. + +```yaml +receivers: + opencensus: +``` + +The following settings are configurable: + +- `endpoint` (default = 0.0.0.0:55678): host:port to which the receiver is + going to receive data. The valid syntax is described at + https://github.com/grpc/grpc/blob/master/doc/naming.md. + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configgrpc/README.md) including CORS +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/exporterhelper/README.md) + +## Writing with HTTP/JSON + +The OpenCensus receiver can receive trace export calls via HTTP/JSON in +addition to gRPC. The HTTP/JSON address is the same as gRPC as the protocol is +recognized and processed accordingly. + +To write traces with HTTP/JSON, `POST` to `[address]/v1/trace`. The JSON message +format parallels the gRPC protobuf format, see this +[OpenApi spec for it](https://github.com/census-instrumentation/opencensus-proto/blob/master/gen-openapi/opencensus/proto/agent/trace/v1/trace_service.swagger.json). + +The HTTP/JSON endpoint can also optionally configure +[CORS](https://fetch.spec.whatwg.org/#cors-protocol), which is enabled by +specifying a list of allowed CORS origins in the `cors_allowed_origins` field: + +```yaml +receivers: + opencensus: + cors_allowed_origins: + - http://test.com + # Origins can have wildcards with *, use * by itself to match any origin. + - https://*.example.com +``` diff --git a/internal/otel_collector/receiver/opencensusreceiver/config.go b/internal/otel_collector/receiver/opencensusreceiver/config.go new file mode 100644 index 00000000000..52be610bd15 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/config.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusreceiver + +import ( + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for OpenCensus receiver. +type Config struct { + configmodels.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // Configures the receiver server protocol. + configgrpc.GRPCServerSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // CorsOrigins are the allowed CORS origins for HTTP/JSON requests to grpc-gateway adapter + // for the OpenCensus receiver. See github.com/rs/cors + // An empty list means that CORS is not enabled at all. A wildcard (*) can be + // used to match any origin or one or more characters of an origin. + CorsOrigins []string `mapstructure:"cors_allowed_origins"` +} + +func (rOpts *Config) buildOptions() ([]ocOption, error) { + var opts []ocOption + if len(rOpts.CorsOrigins) > 0 { + opts = append(opts, withCorsOrigins(rOpts.CorsOrigins)) + } + + grpcServerOptions, err := rOpts.GRPCServerSettings.ToServerOption() + if err != nil { + return nil, err + } + if len(grpcServerOptions) > 0 { + opts = append(opts, withGRPCServerOptions(grpcServerOptions...)) + } + + return opts, nil +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/config_test.go b/internal/otel_collector/receiver/opencensusreceiver/config_test.go new file mode 100644 index 00000000000..5f753b84baa --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/config_test.go @@ -0,0 +1,196 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusreceiver + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, len(cfg.Receivers), 7) + + r0 := cfg.Receivers["opencensus"] + assert.Equal(t, r0, factory.CreateDefaultConfig()) + + r1 := cfg.Receivers["opencensus/customname"].(*Config) + assert.Equal(t, r1, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "opencensus/customname", + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:9090", + Transport: "tcp", + }, + ReadBufferSize: 512 * 1024, + }, + }) + + r2 := cfg.Receivers["opencensus/keepalive"].(*Config) + assert.Equal(t, r2, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "opencensus/keepalive", + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:55678", + Transport: "tcp", + }, + ReadBufferSize: 512 * 1024, + Keepalive: &configgrpc.KeepaliveServerConfig{ + ServerParameters: &configgrpc.KeepaliveServerParameters{ + MaxConnectionIdle: 11 * time.Second, + MaxConnectionAge: 12 * time.Second, + MaxConnectionAgeGrace: 13 * time.Second, + Time: 30 * time.Second, + Timeout: 5 * time.Second, + }, + EnforcementPolicy: &configgrpc.KeepaliveEnforcementPolicy{ + MinTime: 10 * time.Second, + PermitWithoutStream: true, + }, + }, + }, + }) + + r3 := cfg.Receivers["opencensus/msg-size-conc-connect-max-idle"].(*Config) + assert.Equal(t, r3, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "opencensus/msg-size-conc-connect-max-idle", + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:55678", + Transport: "tcp", + }, + MaxRecvMsgSizeMiB: 32, + MaxConcurrentStreams: 16, + ReadBufferSize: 1024, + WriteBufferSize: 1024, + Keepalive: &configgrpc.KeepaliveServerConfig{ + ServerParameters: &configgrpc.KeepaliveServerParameters{ + MaxConnectionIdle: 10 * time.Second, + }, + }, + }, + }) + + // TODO(ccaraman): Once the config loader checks for the files existence, this test may fail and require + // use of fake cert/key for test purposes. + r4 := cfg.Receivers["opencensus/tlscredentials"].(*Config) + assert.Equal(t, r4, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "opencensus/tlscredentials", + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:55678", + Transport: "tcp", + }, + ReadBufferSize: 512 * 1024, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "test.crt", + KeyFile: "test.key", + }, + }, + }, + }) + + r5 := cfg.Receivers["opencensus/cors"].(*Config) + assert.Equal(t, r5, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "opencensus/cors", + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:55678", + Transport: "tcp", + }, + ReadBufferSize: 512 * 1024, + }, + CorsOrigins: []string{"https://*.test.com", "https://test.com"}, + }) + + r6 := cfg.Receivers["opencensus/uds"].(*Config) + assert.Equal(t, r6, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "opencensus/uds", + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "/tmp/opencensus.sock", + Transport: "unix", + }, + ReadBufferSize: 512 * 1024, + }, + }) +} + +func TestBuildOptions_TLSCredentials(t *testing.T) { + cfg := Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: "IncorrectTLS", + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "willfail", + }, + }, + }, + } + _, err := cfg.buildOptions() + assert.EqualError(t, err, `failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither`) + + cfg.TLSSetting = &configtls.TLSServerSetting{} + opt, err := cfg.buildOptions() + assert.NoError(t, err) + assert.NotNil(t, opt) +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/factory.go b/internal/otel_collector/receiver/opencensusreceiver/factory.go new file mode 100644 index 00000000000..484e37dcbc4 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/factory.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusreceiver + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "opencensus" +) + +func NewFactory() component.ReceiverFactory { + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithTraces(createTraceReceiver), + receiverhelper.WithMetrics(createMetricsReceiver)) +} + +func createDefaultConfig() configmodels.Receiver { + return &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:55678", + Transport: "tcp", + }, + // We almost write 0 bytes, so no need to tune WriteBufferSize. + ReadBufferSize: 512 * 1024, + }, + } +} + +func createTraceReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.TracesConsumer, +) (component.TracesReceiver, error) { + r, err := createReceiver(cfg) + if err != nil { + return nil, err + } + + r.traceConsumer = nextConsumer + + return r, nil +} + +func createMetricsReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.MetricsConsumer, +) (component.MetricsReceiver, error) { + r, err := createReceiver(cfg) + if err != nil { + return nil, err + } + + r.metricsConsumer = nextConsumer + + return r, nil +} + +func createReceiver(cfg configmodels.Receiver) (*ocReceiver, error) { + rCfg := cfg.(*Config) + + // There must be one receiver for both metrics and traces. We maintain a map of + // receivers per config. + + // Check to see if there is already a receiver for this config. + receiver, ok := receivers[rCfg] + if !ok { + // Build the configuration options. + opts, err := rCfg.buildOptions() + if err != nil { + return nil, err + } + + // We don't have a receiver, so create one. + receiver, err = newOpenCensusReceiver( + rCfg.Name(), rCfg.NetAddr.Transport, rCfg.NetAddr.Endpoint, nil, nil, opts...) + if err != nil { + return nil, err + } + // Remember the receiver in the map + receivers[rCfg] = receiver + } + return receiver, nil +} + +// This is the map of already created OpenCensus receivers for particular configurations. +// We maintain this map because the Factory is asked trace and metric receivers separately +// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// create separate objects, they must use one ocReceiver object per configuration. +var receivers = map[*Config]*ocReceiver{} diff --git a/internal/otel_collector/receiver/opencensusreceiver/factory_test.go b/internal/otel_collector/receiver/opencensusreceiver/factory_test.go new file mode 100644 index 00000000000..fe70a32e447 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/factory_test.go @@ -0,0 +1,201 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusreceiver + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/testutil" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + cfg := createDefaultConfig() + + config := cfg.(*Config) + config.NetAddr.Endpoint = testutil.GetAvailableLocalAddress(t) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + tReceiver, err := createTraceReceiver(context.Background(), params, cfg, nil) + assert.NotNil(t, tReceiver) + assert.NoError(t, err) + + mReceiver, err := createMetricsReceiver(context.Background(), params, cfg, nil) + assert.NotNil(t, mReceiver) + assert.NoError(t, err) +} + +func TestCreateTraceReceiver(t *testing.T) { + defaultReceiverSettings := configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + } + defaultNetAddr := confignet.NetAddr{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: "tcp", + } + defaultGRPCSettings := configgrpc.GRPCServerSettings{ + NetAddr: defaultNetAddr, + } + tests := []struct { + name string + cfg *Config + wantErr bool + }{ + { + name: "default", + cfg: &Config{ + ReceiverSettings: defaultReceiverSettings, + GRPCServerSettings: defaultGRPCSettings, + }, + }, + { + name: "invalid_port", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "localhost:112233", + Transport: "tcp", + }, + }, + }, + wantErr: true, + }, + { + name: "max-msg-size-and-concurrent-connections", + cfg: &Config{ + ReceiverSettings: defaultReceiverSettings, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: defaultNetAddr, + MaxRecvMsgSizeMiB: 32, + MaxConcurrentStreams: 16, + }, + }, + }, + } + ctx := context.Background() + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tr, err := createTraceReceiver(ctx, params, tt.cfg, consumertest.NewTracesNop()) + if (err != nil) != tt.wantErr { + t.Errorf("factory.CreateTracesReceiver() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tr != nil { + require.NoError(t, tr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, tr.Shutdown(context.Background())) + } + }) + } +} + +func TestCreateMetricReceiver(t *testing.T) { + defaultReceiverSettings := configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + } + defaultNetAddr := confignet.NetAddr{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: "tcp", + } + defaultGRPCSettings := configgrpc.GRPCServerSettings{ + NetAddr: defaultNetAddr, + } + + tests := []struct { + name string + cfg *Config + wantErr bool + }{ + { + name: "default", + cfg: &Config{ + ReceiverSettings: defaultReceiverSettings, + GRPCServerSettings: defaultGRPCSettings, + }, + }, + { + name: "invalid_address", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "327.0.0.1:1122", + Transport: "tcp", + }, + }, + }, + wantErr: true, + }, + { + name: "keepalive", + cfg: &Config{ + ReceiverSettings: defaultReceiverSettings, + GRPCServerSettings: configgrpc.GRPCServerSettings{ + NetAddr: defaultNetAddr, + Keepalive: &configgrpc.KeepaliveServerConfig{ + ServerParameters: &configgrpc.KeepaliveServerParameters{ + MaxConnectionAge: 60 * time.Second, + }, + EnforcementPolicy: &configgrpc.KeepaliveEnforcementPolicy{ + MinTime: 30 * time.Second, + PermitWithoutStream: true, + }, + }, + }, + }, + }, + } + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc, err := createMetricsReceiver(context.Background(), params, tt.cfg, consumertest.NewMetricsNop()) + if (err != nil) != tt.wantErr { + t.Errorf("factory.CreateMetricsReceiver() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tc != nil { + require.NoError(t, tc.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, tc.Shutdown(context.Background())) + } + }) + } +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/doc.go b/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/doc.go new file mode 100644 index 00000000000..6d1d12e3c97 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ocmetrics is the logic for receiving OpenCensus metrics proto from +// already instrumented applications and then passing them onto a metricsink instance. +package ocmetrics diff --git a/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/opencensus.go b/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/opencensus.go new file mode 100644 index 00000000000..d3023424451 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/opencensus.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocmetrics + +import ( + "context" + "errors" + "io" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/translator/internaldata" +) + +// Receiver is the type used to handle metrics from OpenCensus exporters. +type Receiver struct { + agentmetricspb.UnimplementedMetricsServiceServer + instanceName string + nextConsumer consumer.MetricsConsumer +} + +// New creates a new ocmetrics.Receiver reference. +func New(instanceName string, nextConsumer consumer.MetricsConsumer) (*Receiver, error) { + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + ocr := &Receiver{ + instanceName: instanceName, + nextConsumer: nextConsumer, + } + return ocr, nil +} + +var _ agentmetricspb.MetricsServiceServer = (*Receiver)(nil) + +var errMetricsExportProtocolViolation = errors.New("protocol violation: Export's first message must have a Node") + +const ( + receiverTagValue = "oc_metrics" + receiverTransport = "grpc" // TODO: transport is being hard coded for now, investigate if info is available on context. + receiverDataFormat = "protobuf" +) + +// Export is the gRPC method that receives streamed metrics from +// OpenCensus-metricproto compatible libraries/applications. +func (ocr *Receiver) Export(mes agentmetricspb.MetricsService_ExportServer) error { + longLivedRPCCtx := obsreport.ReceiverContext(mes.Context(), ocr.instanceName, receiverTransport) + + // Retrieve the first message. It MUST have a non-nil Node. + recv, err := mes.Recv() + if err != nil { + return err + } + + // Check the condition that the first message has a non-nil Node. + if recv.Node == nil { + return errMetricsExportProtocolViolation + } + + var lastNonNilNode *commonpb.Node + var resource *resourcepb.Resource + // Now that we've got the first message with a Node, we can start to receive streamed up metrics. + for { + lastNonNilNode, resource, err = ocr.processReceivedMsg( + longLivedRPCCtx, + lastNonNilNode, + resource, + recv) + if err != nil { + return err + } + + recv, err = mes.Recv() + if err != nil { + if err == io.EOF { + // Do not return EOF as an error so that grpc-gateway calls get an empty + // response with HTTP status code 200 rather than a 500 error with EOF. + return nil + } + return err + } + } +} + +func (ocr *Receiver) processReceivedMsg( + longLivedRPCCtx context.Context, + lastNonNilNode *commonpb.Node, + resource *resourcepb.Resource, + recv *agentmetricspb.ExportMetricsServiceRequest, +) (*commonpb.Node, *resourcepb.Resource, error) { + // If a Node has been sent from downstream, save and use it. + if recv.Node != nil { + lastNonNilNode = recv.Node + } + + // TODO(songya): differentiate between unset and nil resource. See + // https://github.com/census-instrumentation/opencensus-proto/issues/146. + if recv.Resource != nil { + resource = recv.Resource + } + + md := consumerdata.MetricsData{ + Node: lastNonNilNode, + Resource: resource, + Metrics: recv.Metrics, + } + + err := ocr.sendToNextConsumer(longLivedRPCCtx, md) + return lastNonNilNode, resource, err +} + +func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, md consumerdata.MetricsData) error { + ctx := obsreport.StartMetricsReceiveOp( + longLivedRPCCtx, + ocr.instanceName, + receiverTransport, + obsreport.WithLongLivedCtx()) + + numTimeSeries := 0 + numPoints := 0 + // Count number of time series and data points. + for _, metric := range md.Metrics { + numTimeSeries += len(metric.Timeseries) + for _, ts := range metric.GetTimeseries() { + numPoints += len(ts.GetPoints()) + } + } + + var consumerErr error + if len(md.Metrics) > 0 { + consumerErr = ocr.nextConsumer.ConsumeMetrics(ctx, internaldata.OCToMetrics(md)) + } + + obsreport.EndMetricsReceiveOp( + ctx, + receiverDataFormat, + numPoints, + consumerErr) + + return consumerErr +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/opencensus_test.go b/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/opencensus_test.go new file mode 100644 index 00000000000..11c7c7f0283 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/ocmetrics/opencensus_test.go @@ -0,0 +1,418 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocmetrics + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "strings" + "sync" + "testing" + "time" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/exporter/opencensusexporter" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/testutil" + "go.opentelemetry.io/collector/translator/internaldata" +) + +func TestReceiver_endToEnd(t *testing.T) { + metricSink := new(consumertest.MetricsSink) + + port, doneFn := ocReceiverOnGRPCServer(t, metricSink) + defer doneFn() + + address := fmt.Sprintf("localhost:%d", port) + expFactory := opencensusexporter.NewFactory() + expCfg := expFactory.CreateDefaultConfig().(*opencensusexporter.Config) + expCfg.GRPCClientSettings.TLSSetting.Insecure = true + expCfg.Endpoint = address + expCfg.WaitForReady = true + oce, err := expFactory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, expCfg) + require.NoError(t, err) + err = oce.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + + defer func() { + require.NoError(t, oce.Shutdown(context.Background())) + }() + + md := testdata.GenerateMetricsOneMetric() + assert.NoError(t, oce.ConsumeMetrics(context.Background(), md)) + + testutil.WaitFor(t, func() bool { + return len(metricSink.AllMetrics()) != 0 + }) + gotMetrics := metricSink.AllMetrics() + require.Len(t, gotMetrics, 1) + assert.Equal(t, md, gotMetrics[0]) +} + +// Issue #43. Export should support node multiplexing. +// The goal is to ensure that Receiver can always support +// a passthrough mode where it initiates Export normally by firstly +// receiving the initiator node. However ti should still be able to +// accept nodes from downstream sources, but if a node isn't specified in +// an exportMetrics request, assume it is from the last received and non-nil node. +func TestExportMultiplexing(t *testing.T) { + metricSink := new(consumertest.MetricsSink) + + port, doneFn := ocReceiverOnGRPCServer(t, metricSink) + defer doneFn() + + metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) + require.NoError(t, err, "Failed to create the gRPC MetricsService_ExportClient: %v", err) + defer metricsClientDoneFn() + + // Step 1) The initiation. + initiatingNode := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{ + Pid: 1, + HostName: "multiplexer", + }, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_JAVA}, + } + + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: initiatingNode}) + require.NoError(t, err, "Failed to send the initiating message: %v", err) + + // Step 1a) Send some metrics without a node, they should be registered as coming from the initiating node. + mLi := []*metricspb.Metric{makeMetric(1)} + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil, Metrics: mLi}) + require.NoError(t, err, "Failed to send the proxied message from app1: %v", err) + + // Step 2) Send a "proxied" metrics message from app1 with "node1" + node1 := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{Pid: 9489, HostName: "nodejs-host"}, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_NODE_JS}, + } + mL1 := []*metricspb.Metric{makeMetric(2)} + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: node1, Metrics: mL1}) + require.NoError(t, err, "Failed to send the proxied message from app1: %v", err) + + // Step 3) Send a metrics message without a node but with metrics: this + // should be registered as belonging to the last used node i.e. "node1". + mLn1 := []*metricspb.Metric{makeMetric(3)} + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil, Metrics: mLn1}) + require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) + + // Step 4) Send a metrics message from a differently proxied node "node2" from app2 + node2 := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{Pid: 7752, HostName: "golang-host"}, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_GO_LANG}, + } + mL2 := []*metricspb.Metric{makeMetric(4)} + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: node2, Metrics: mL2}) + require.NoError(t, err, "Failed to send the proxied message from app2: %v", err) + + // Step 5a) Send a metrics message without a node but with metrics: this + // should be registered as belonging to the last used node i.e. "node2". + mLn2a := []*metricspb.Metric{makeMetric(5)} + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil, Metrics: mLn2a}) + require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) + + // Step 5b) + mLn2b := []*metricspb.Metric{makeMetric(6)} + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil, Metrics: mLn2b}) + require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) + // Give the process sometime to send data over the wire and perform batching + <-time.After(150 * time.Millisecond) + + // Examination time! + resultsMapping := make(map[string][]*metricspb.Metric) + for _, md := range metricSink.AllMetrics() { + ocmds := internaldata.MetricsToOC(md) + for _, ocmd := range ocmds { + resultsMapping[nodeToKey(ocmd.Node)] = append(resultsMapping[nodeToKey(ocmd.Node)], ocmd.Metrics...) + } + } + + // First things first, we expect exactly 3 unique keys + // 1. Initiating Node + // 2. Node 1 + // 3. Node 2 + if g, w := len(resultsMapping), 3; g != w { + t.Errorf("Got %d keys in the results map; Wanted exactly %d\n\nResultsMapping: %+v\n", g, w, resultsMapping) + } + + // Want metric counts + wantMetricCounts := map[string]int{ + nodeToKey(initiatingNode): 1, + nodeToKey(node1): 2, + nodeToKey(node2): 3, + } + for key, wantMetricCounts := range wantMetricCounts { + gotMetricCounts := len(resultsMapping[key]) + if gotMetricCounts != wantMetricCounts { + t.Errorf("Key=%q gotMetricCounts %d wantMetricCounts %d", key, gotMetricCounts, wantMetricCounts) + } + } + + // Now ensure that the exported metrics match up exactly with + // the nodes and the last seen node expectation/behavior. + // (or at least their serialized equivalents match up) + wantContents := map[string][]*metricspb.Metric{ + nodeToKey(initiatingNode): mLi, + nodeToKey(node1): append(mL1, mLn1...), + nodeToKey(node2): append(mL2, append(mLn2a, mLn2b...)...), + } + + gotBlob, _ := json.Marshal(resultsMapping) + wantBlob, _ := json.Marshal(wantContents) + if !bytes.Equal(gotBlob, wantBlob) { + t.Errorf("Unequal serialization results\nGot:\n\t%s\nWant:\n\t%s\n", gotBlob, wantBlob) + } +} + +// The first message without a Node MUST be rejected and teardown the connection. +// See https://github.com/census-instrumentation/opencensus-service/issues/53 +func TestExportProtocolViolations_nodelessFirstMessage(t *testing.T) { + metricSink := new(consumertest.MetricsSink) + + port, doneFn := ocReceiverOnGRPCServer(t, metricSink) + defer doneFn() + + metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) + require.NoError(t, err, "Failed to create the gRPC MetricsService_ExportClient: %v", err) + defer metricsClientDoneFn() + + // Send a Nodeless first message + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: nil}) + require.NoError(t, err, "Unexpectedly failed to send the first message: %v", err) + + longDuration := 2 * time.Second + testDone := make(chan bool, 1) + var wg sync.WaitGroup + wg.Add(1) + + go func() { + // Our insurance policy to ensure that this test doesn't hang + // forever and should quickly report if/when we regress. + select { + case <-testDone: + t.Log("Test ended early enough") + case <-time.After(longDuration): + metricsClientDoneFn() + t.Errorf("Test took too long (%s) and is likely still hanging so this is a regression", longDuration) + } + wg.Done() + }() + + // Now the response should return an error and should have been torn down + // regardless of the number of times after invocation below, or any attempt + // to send the proper/corrective data should be rejected. + for i := 0; i < 10; i++ { + recv, err := metricsClient.Recv() + if recv != nil { + t.Errorf("Iteration #%d: Unexpectedly got back a response: %#v", i, recv) + } + if err == nil { + t.Errorf("Iteration #%d: Unexpectedly got back a nil error", i) + continue + } + + wantSubStr := "protocol violation: Export's first message must have a Node" + if g := err.Error(); !strings.Contains(g, wantSubStr) { + t.Errorf("Iteration #%d: Got error:\n\t%s\nWant substring:\n\t%s\n", i, g, wantSubStr) + } + + // The connection should be invalid at this point and + // no attempt to send corrections should succeed. + n1 := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{Pid: 9489, HostName: "nodejs-host"}, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_NODE_JS}, + } + if err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: n1}); err == nil { + t.Errorf("Iteration #%d: Unexpectedly succeeded in sending a message upstream. Connection must be in terminal state", i) + } else if g, w := err, io.EOF; g != w { + t.Errorf("Iteration #%d:\nGot error %q\nWant error %q", i, g, w) + } + } + + close(testDone) + wg.Wait() +} + +// If the first message is valid (has a non-nil Node) and has metrics, those +// metrics should be received and NEVER discarded. +// See https://github.com/census-instrumentation/opencensus-service/issues/51 +func TestExportProtocolConformation_metricsInFirstMessage(t *testing.T) { + // This test used to be flaky on Windows. Skip if errors pop up again + + metricSink := new(consumertest.MetricsSink) + + port, doneFn := ocReceiverOnGRPCServer(t, metricSink) + defer doneFn() + + metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) + require.NoError(t, err, "Failed to create the gRPC MetricsService_ExportClient: %v", err) + defer metricsClientDoneFn() + + mLi := []*metricspb.Metric{makeMetric(10), makeMetric(11)} + ni := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{Pid: 1}, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_JAVA}, + } + err = metricsClient.Send(&agentmetricspb.ExportMetricsServiceRequest{Node: ni, Metrics: mLi}) + require.NoError(t, err, "Failed to send the first message: %v", err) + + // Give it time to be sent over the wire, then exported. + <-time.After(100 * time.Millisecond) + + // Examination time! + resultsMapping := make(map[string][]*metricspb.Metric) + for _, md := range metricSink.AllMetrics() { + ocmds := internaldata.MetricsToOC(md) + for _, ocmd := range ocmds { + resultsMapping[nodeToKey(ocmd.Node)] = append(resultsMapping[nodeToKey(ocmd.Node)], ocmd.Metrics...) + } + } + + if g, w := len(resultsMapping), 1; g != w { + t.Errorf("Results mapping: Got len(keys) %d Want %d", g, w) + } + + // Check for the keys + wantLengths := map[string]int{ + nodeToKey(ni): 2, + } + for key, wantLength := range wantLengths { + gotLength := len(resultsMapping[key]) + if gotLength != wantLength { + t.Errorf("Exported metrics:: Key: %s\nGot length %d\nWant length %d", key, gotLength, wantLength) + } + } + + // And finally ensure that the protos' serializations are equivalent to the expected + wantContents := map[string][]*metricspb.Metric{ + nodeToKey(ni): mLi, + } + + gotBlob, _ := json.Marshal(resultsMapping) + wantBlob, _ := json.Marshal(wantContents) + if !bytes.Equal(gotBlob, wantBlob) { + t.Errorf("Unequal serialization results\nGot:\n\t%s\nWant:\n\t%s\n", gotBlob, wantBlob) + } +} + +// Helper functions from here on below +func makeMetricsServiceClient(port int) (agentmetricspb.MetricsService_ExportClient, func(), error) { + addr := fmt.Sprintf(":%d", port) + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, nil, err + } + + svc := agentmetricspb.NewMetricsServiceClient(cc) + metricsClient, err := svc.Export(context.Background()) + if err != nil { + _ = cc.Close() + return nil, nil, err + } + + doneFn := func() { _ = cc.Close() } + return metricsClient, doneFn, nil +} + +func nodeToKey(n *commonpb.Node) string { + blob, _ := proto.Marshal(n) + return string(blob) +} + +func ocReceiverOnGRPCServer(t *testing.T, sr consumer.MetricsConsumer) (int, func()) { + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + doneFnList := []func(){func() { ln.Close() }} + done := func() { + for _, doneFn := range doneFnList { + doneFn() + } + } + + _, port, err := testutil.HostPortFromAddr(ln.Addr()) + if err != nil { + done() + t.Fatalf("Failed to parse host:port from listener address: %s error: %v", ln.Addr(), err) + } + + oci, err := New(receiverTagValue, sr) + require.NoError(t, err, "Failed to create the Receiver: %v", err) + + // Now run it as a gRPC server + srv := obsreport.GRPCServerWithObservabilityEnabled() + agentmetricspb.RegisterMetricsServiceServer(srv, oci) + go func() { + _ = srv.Serve(ln) + }() + + return port, done +} + +func makeMetric(val int) *metricspb.Metric { + key := &metricspb.LabelKey{ + Key: fmt.Sprintf("%s%d", "key", val), + } + value := &metricspb.LabelValue{ + Value: fmt.Sprintf("%s%d", "value", val), + HasValue: true, + } + + descriptor := &metricspb.MetricDescriptor{ + Name: fmt.Sprintf("%s%d", "metric_descriptort_", val), + Description: "metric descriptor", + Unit: "1", + Type: metricspb.MetricDescriptor_GAUGE_INT64, + LabelKeys: []*metricspb.LabelKey{key}, + } + + now := time.Now().UTC() + point := &metricspb.Point{ + Timestamp: timestamppb.New(now.Add(20 * time.Second)), + Value: &metricspb.Point_Int64Value{ + Int64Value: int64(val), + }, + } + + ts := &metricspb.TimeSeries{ + StartTimestamp: timestamppb.New(now.Add(-10 * time.Second)), + LabelValues: []*metricspb.LabelValue{value}, + Points: []*metricspb.Point{point}, + } + + return &metricspb.Metric{ + MetricDescriptor: descriptor, + Timeseries: []*metricspb.TimeSeries{ts}, + } +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/octrace/doc.go b/internal/otel_collector/receiver/opencensusreceiver/octrace/doc.go new file mode 100644 index 00000000000..4bf176ae24e --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/octrace/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package octrace is the logic for receiving OpenCensus trace protobuf defined spans from +// already instrumented applications and then passing them onto a TraceReceiverSink instance. +package octrace diff --git a/internal/otel_collector/receiver/opencensusreceiver/octrace/observability_test.go b/internal/otel_collector/receiver/opencensusreceiver/octrace/observability_test.go new file mode 100644 index 00000000000..15071f89315 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/octrace/observability_test.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package octrace + +import ( + "bytes" + "encoding/json" + "reflect" + "sync" + "testing" + "time" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/stretchr/testify/require" + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/obsreport/obsreporttest" +) + +// Ensure that if we add a metrics exporter that our target metrics +// will be recorded but also with the proper tag keys and values. +// See Issue https://github.com/census-instrumentation/opencensus-service/issues/63 +// +// Note: we are intentionally skipping the ocgrpc.ServerDefaultViews as this +// test is to ensure exactness, but with the mentioned views registered, the +// output will be quite noisy. +func TestEnsureRecordedMetrics(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + port, doneReceiverFn := ocReceiverOnGRPCServer(t, consumertest.NewTracesNop()) + defer doneReceiverFn() + + n := 20 + // Now for the traceExporter that sends 0 length spans + traceSvcClient, traceSvcDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the trace service client: %v", err) + spans := []*tracepb.Span{{TraceId: []byte("abcdefghijklmnop"), SpanId: []byte("12345678")}} + for i := 0; i < n; i++ { + err = traceSvcClient.Send(&agenttracepb.ExportTraceServiceRequest{Spans: spans, Node: &commonpb.Node{}}) + require.NoError(t, err, "Failed to send requests to the service: %v", err) + } + flush(traceSvcDoneFn) + + obsreporttest.CheckReceiverTracesViews(t, "oc_trace", "grpc", int64(n), 0) +} + +func TestEnsureRecordedMetrics_zeroLengthSpansSender(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + port, doneFn := ocReceiverOnGRPCServer(t, consumertest.NewTracesNop()) + defer doneFn() + + n := 20 + // Now for the traceExporter that sends 0 length spans + traceSvcClient, traceSvcDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the trace service client: %v", err) + for i := 0; i <= n; i++ { + err = traceSvcClient.Send(&agenttracepb.ExportTraceServiceRequest{Spans: nil, Node: &commonpb.Node{}}) + require.NoError(t, err, "Failed to send requests to the service: %v", err) + } + flush(traceSvcDoneFn) + + obsreporttest.CheckReceiverTracesViews(t, "oc_trace", "grpc", 0, 0) +} + +func TestExportSpanLinkingMaintainsParentLink(t *testing.T) { + // Always sample for the purpose of examining all the spans in this test. + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + // TODO: File an issue with OpenCensus-Go to ask for a method to retrieve + // the default sampler because the current method of blindly changing the + // global sampler makes testing hard. + // Denoise this test by setting the sampler to never sample + defer trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) + + ocSpansSaver := new(testOCTraceExporter) + trace.RegisterExporter(ocSpansSaver) + defer trace.UnregisterExporter(ocSpansSaver) + + port, doneFn := ocReceiverOnGRPCServer(t, consumertest.NewTracesNop()) + defer doneFn() + + traceSvcClient, traceSvcDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the trace service client: %v", err) + + n := 5 + for i := 0; i < n; i++ { + sl := []*tracepb.Span{{TraceId: []byte("abcdefghijklmnop"), SpanId: []byte{byte(i + 1), 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}}} + err = traceSvcClient.Send(&agenttracepb.ExportTraceServiceRequest{Spans: sl, Node: &commonpb.Node{}}) + require.NoError(t, err, "Failed to send requests to the service: %v", err) + } + + flush(traceSvcDoneFn) + + // Inspection time! + ocSpansSaver.mu.Lock() + defer ocSpansSaver.mu.Unlock() + + require.NotEqual( + t, + len(ocSpansSaver.spanData), + 0, + "Unfortunately did not receive an exported span data. Please check this library's implementation or go.opencensus.io/trace", + ) + + gotSpanData := ocSpansSaver.spanData + if g, w := len(gotSpanData), n+1; g != w { + blob, _ := json.MarshalIndent(gotSpanData, " ", " ") + t.Fatalf("Spandata count: Got %d Want %d\n\nData: %s", g, w, blob) + } + + receiverSpanData := gotSpanData[0] + if g, w := len(receiverSpanData.Links), 1; g != w { + t.Fatalf("Links count: Got %d Want %d\nGotSpanData: %#v", g, w, receiverSpanData) + } + + // The rpc span is always last in the list + rpcSpanData := gotSpanData[len(gotSpanData)-1] + + // Ensure that the link matches up exactly! + wantLink := trace.Link{ + SpanID: rpcSpanData.SpanID, + TraceID: rpcSpanData.TraceID, + Type: trace.LinkTypeParent, + } + if g, w := receiverSpanData.Links[0], wantLink; !reflect.DeepEqual(g, w) { + t.Errorf("Link:\nGot: %#v\nWant: %#v\n", g, w) + } + if g, w := receiverSpanData.Name, "receiver/oc_trace/TraceDataReceived"; g != w { + t.Errorf("ReceiverExport span's SpanData.Name:\nGot: %q\nWant: %q\n", g, w) + } + + // And then for the receiverSpanData itself, it SHOULD NOT + // have a ParentID, so let's enforce all the conditions below: + // 1. That it doesn't have the RPC spanID as its ParentSpanID + // 2. That it actually has no ParentSpanID i.e. has a blank SpanID + if g, w := receiverSpanData.ParentSpanID[:], rpcSpanData.SpanID[:]; bytes.Equal(g, w) { + t.Errorf("ReceiverSpanData.ParentSpanID unfortunately was linked to the RPC span\nGot: %x\nWant: %x", g, w) + } + + var blankSpanID trace.SpanID + if g, w := receiverSpanData.ParentSpanID[:], blankSpanID[:]; !bytes.Equal(g, w) { + t.Errorf("ReceiverSpanData unfortunately has a parent and isn't NULL\nGot: %x\nWant: %x", g, w) + } +} + +type testOCTraceExporter struct { + mu sync.Mutex + spanData []*trace.SpanData +} + +func (tote *testOCTraceExporter) ExportSpan(sd *trace.SpanData) { + tote.mu.Lock() + defer tote.mu.Unlock() + + tote.spanData = append(tote.spanData, sd) +} + +// TODO: Determine how to do this deterministic. +func flush(traceSvcDoneFn func()) { + // Give it enough time to process the streamed spans. + <-time.After(40 * time.Millisecond) + + // End the gRPC service to complete the RPC trace so that we + // can examine the RPC trace as well. + traceSvcDoneFn() + + // Give it some more time to complete the RPC trace and export. + <-time.After(40 * time.Millisecond) +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/octrace/opencensus.go b/internal/otel_collector/receiver/opencensusreceiver/octrace/opencensus.go new file mode 100644 index 00000000000..cf490841535 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/octrace/opencensus.go @@ -0,0 +1,166 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package octrace + +import ( + "context" + "errors" + "io" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/translator/internaldata" +) + +const ( + receiverTagValue = "oc_trace" + receiverTransport = "grpc" // TODO: transport is being hard coded for now, investigate if info is available on context. + receiverDataFormat = "protobuf" +) + +// Receiver is the type used to handle spans from OpenCensus exporters. +type Receiver struct { + agenttracepb.UnimplementedTraceServiceServer + nextConsumer consumer.TracesConsumer + instanceName string +} + +// New creates a new opencensus.Receiver reference. +func New(instanceName string, nextConsumer consumer.TracesConsumer, opts ...Option) (*Receiver, error) { + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + ocr := &Receiver{ + nextConsumer: nextConsumer, + instanceName: instanceName, + } + for _, opt := range opts { + opt(ocr) + } + + return ocr, nil +} + +var _ agenttracepb.TraceServiceServer = (*Receiver)(nil) + +var errUnimplemented = errors.New("unimplemented") + +// Config handles configuration messages. +func (ocr *Receiver) Config(agenttracepb.TraceService_ConfigServer) error { + // TODO: Implement when we define the config receiver/sender. + return errUnimplemented +} + +var errTraceExportProtocolViolation = errors.New("protocol violation: Export's first message must have a Node") + +// Export is the gRPC method that receives streamed traces from +// OpenCensus-traceproto compatible libraries/applications. +func (ocr *Receiver) Export(tes agenttracepb.TraceService_ExportServer) error { + ctx := tes.Context() + if c, ok := client.FromGRPC(ctx); ok { + ctx = client.NewContext(ctx, c) + } + + longLivedRPCCtx := obsreport.ReceiverContext(ctx, ocr.instanceName, receiverTransport) + + // The first message MUST have a non-nil Node. + recv, err := tes.Recv() + if err != nil { + return err + } + + // Check the condition that the first message has a non-nil Node. + if recv.Node == nil { + return errTraceExportProtocolViolation + } + + var lastNonNilNode *commonpb.Node + var resource *resourcepb.Resource + // Now that we've got the first message with a Node, we can start to receive streamed up spans. + for { + lastNonNilNode, resource, err = ocr.processReceivedMsg( + longLivedRPCCtx, + lastNonNilNode, + resource, + recv) + if err != nil { + return err + } + + recv, err = tes.Recv() + if err != nil { + if err == io.EOF { + // Do not return EOF as an error so that grpc-gateway calls get an empty + // response with HTTP status code 200 rather than a 500 error with EOF. + return nil + } + return err + } + } +} + +func (ocr *Receiver) processReceivedMsg( + longLivedRPCCtx context.Context, + lastNonNilNode *commonpb.Node, + resource *resourcepb.Resource, + recv *agenttracepb.ExportTraceServiceRequest, +) (*commonpb.Node, *resourcepb.Resource, error) { + // If a Node has been sent from downstream, save and use it. + if recv.Node != nil { + lastNonNilNode = recv.Node + } + + // TODO(songya): differentiate between unset and nil resource. See + // https://github.com/census-instrumentation/opencensus-proto/issues/146. + if recv.Resource != nil { + resource = recv.Resource + } + + td := consumerdata.TraceData{ + Node: lastNonNilNode, + Resource: resource, + Spans: recv.Spans, + SourceFormat: "oc_trace", + } + + err := ocr.sendToNextConsumer(longLivedRPCCtx, td) + return lastNonNilNode, resource, err +} + +func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, tracedata consumerdata.TraceData) error { + ctx := obsreport.StartTraceDataReceiveOp( + longLivedRPCCtx, + ocr.instanceName, + receiverTransport, + obsreport.WithLongLivedCtx()) + + var err error + numSpans := len(tracedata.Spans) + if numSpans != 0 { + err = ocr.nextConsumer.ConsumeTraces(ctx, internaldata.OCToTraceData(tracedata)) + } + + obsreport.EndTraceDataReceiveOp(ctx, receiverDataFormat, numSpans, err) + + return err +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/octrace/opencensus_test.go b/internal/otel_collector/receiver/opencensusreceiver/octrace/opencensus_test.go new file mode 100644 index 00000000000..5d7bfd20f4a --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/octrace/opencensus_test.go @@ -0,0 +1,395 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package octrace + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "strings" + "testing" + "time" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/exporter/opencensusexporter" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/testutil" + "go.opentelemetry.io/collector/translator/internaldata" +) + +func TestReceiver_endToEnd(t *testing.T) { + spanSink := new(consumertest.TracesSink) + + port, doneFn := ocReceiverOnGRPCServer(t, spanSink) + defer doneFn() + + address := fmt.Sprintf("localhost:%d", port) + expFactory := opencensusexporter.NewFactory() + expCfg := expFactory.CreateDefaultConfig().(*opencensusexporter.Config) + expCfg.GRPCClientSettings.TLSSetting.Insecure = true + expCfg.Endpoint = address + expCfg.WaitForReady = true + oce, err := expFactory.CreateTracesExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, expCfg) + require.NoError(t, err) + err = oce.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + + defer func() { + require.NoError(t, oce.Shutdown(context.Background())) + }() + + td := testdata.GenerateTraceDataOneSpan() + assert.NoError(t, oce.ConsumeTraces(context.Background(), td)) + + testutil.WaitFor(t, func() bool { + return len(spanSink.AllTraces()) != 0 + }) + gotTraces := spanSink.AllTraces() + require.Len(t, gotTraces, 1) + assert.Equal(t, td, gotTraces[0]) +} + +// Issue #43. Export should support node multiplexing. +// The goal is to ensure that Receiver can always support +// a passthrough mode where it initiates Export normally by firstly +// receiving the initiator node. However ti should still be able to +// accept nodes from downstream sources, but if a node isn't specified in +// an exportTrace request, assume it is from the last received and non-nil node. +func TestExportMultiplexing(t *testing.T) { + spanSink := new(consumertest.TracesSink) + + port, doneFn := ocReceiverOnGRPCServer(t, spanSink) + defer doneFn() + + traceClient, traceClientDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the gRPC TraceService_ExportClient: %v", err) + defer traceClientDoneFn() + + // Step 1) The initiation. + initiatingNode := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{ + Pid: 1, + HostName: "multiplexer", + }, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_JAVA}, + } + + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: initiatingNode}) + require.NoError(t, err, "Failed to send the initiating message: %v", err) + + // Step 1a) Send some spans without a node, they should be registered as coming from the initiating node. + sLi := []*tracepb.Span{{TraceId: []byte("1234567890abcdef"), Status: &tracepb.Status{}}} + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: nil, Spans: sLi}) + require.NoError(t, err, "Failed to send the proxied message from app1: %v", err) + + // Step 2) Send a "proxied" trace message from app1 with "node1" + node1 := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{Pid: 9489, HostName: "nodejs-host"}, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_NODE_JS}, + } + sL1 := []*tracepb.Span{{TraceId: []byte("abcdefghijklmnop"), Name: &tracepb.TruncatableString{Value: "test"}, Status: &tracepb.Status{}}} + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: node1, Spans: sL1}) + require.NoError(t, err, "Failed to send the proxied message from app1: %v", err) + + // Step 3) Send a trace message without a node but with spans: this + // should be registered as belonging to the last used node i.e. "node1". + sLn1 := []*tracepb.Span{{TraceId: []byte("ABCDEFGHIJKLMNOP"), Status: &tracepb.Status{}}, {TraceId: []byte("1234567890abcdef"), Status: &tracepb.Status{}}} + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: nil, Spans: sLn1}) + require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) + + // Step 4) Send a trace message from a differently proxied node "node2" from app2 + node2 := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{Pid: 7752, HostName: "golang-host"}, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_GO_LANG}, + } + sL2 := []*tracepb.Span{{TraceId: []byte("_B_D_F_H_J_L_N_O"), Status: &tracepb.Status{}}} + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: node2, Spans: sL2}) + require.NoError(t, err, "Failed to send the proxied message from app2: %v", err) + + // Step 5a) Send a trace message without a node but with spans: this + // should be registered as belonging to the last used node i.e. "node2". + sLn2a := []*tracepb.Span{{TraceId: []byte("_BCDEFGHIJKLMNO_"), Status: &tracepb.Status{}}, {TraceId: []byte("_234567890abcde_"), Status: &tracepb.Status{}}} + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: nil, Spans: sLn2a}) + require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) + + // Step 5b) + sLn2b := []*tracepb.Span{{TraceId: []byte("_xxxxxxxxxxxxxx_"), Status: &tracepb.Status{}}, {TraceId: []byte("B234567890abcdAB"), Status: &tracepb.Status{}}} + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: nil, Spans: sLn2b}) + require.NoError(t, err, "Failed to send the proxied message without a node: %v", err) + // Give the process sometime to send data over the wire and perform batching + <-time.After(150 * time.Millisecond) + + // Examination time! + resultsMapping := make(map[string][]*tracepb.Span) + for _, td := range spanSink.AllTraces() { + octds := internaldata.TraceDataToOC(td) + for _, octd := range octds { + resultsMapping[nodeToKey(octd.Node)] = append(resultsMapping[nodeToKey(octd.Node)], octd.Spans...) + } + } + + // First things first, we expect exactly 3 unique keys + // 1. Initiating Node + // 2. Node 1 + // 3. Node 2 + if g, w := len(resultsMapping), 3; g != w { + t.Errorf("Got %d keys in the results map; Wanted exactly %d\n\nResultsMapping: %+v\n", g, w, resultsMapping) + } + + // Want span counts + wantSpanCounts := map[string]int{ + nodeToKey(initiatingNode): 1, + nodeToKey(node1): 3, + nodeToKey(node2): 5, + } + for key, wantSpanCounts := range wantSpanCounts { + gotSpanCounts := len(resultsMapping[key]) + if gotSpanCounts != wantSpanCounts { + t.Errorf("Key=%q gotSpanCounts %d wantSpanCounts %d", key, gotSpanCounts, wantSpanCounts) + } + } + + // Now ensure that the exported spans match up exactly with + // the nodes and the last seen node expectation/behavior. + // (or at least their serialized equivalents match up) + wantContents := map[string][]*tracepb.Span{ + nodeToKey(initiatingNode): sLi, + nodeToKey(node1): append(sL1, sLn1...), + nodeToKey(node2): append(sL2, append(sLn2a, sLn2b...)...), + } + + for nodeKey, wantSpans := range wantContents { + gotSpans, ok := resultsMapping[nodeKey] + if !ok { + t.Errorf("Wanted to find a node that was not found for key: %s", nodeKey) + } + if len(gotSpans) != len(wantSpans) { + t.Errorf("Unequal number of spans for nodeKey: %s", nodeKey) + } + for _, wantSpan := range wantSpans { + found := false + for _, gotSpan := range gotSpans { + wantStr, _ := json.Marshal(wantSpan) + gotStr, _ := json.Marshal(gotSpan) + if bytes.Equal(wantStr, gotStr) { + found = true + } + } + if !found { + t.Errorf("Unequal span serialization\nGot:\n\t%s\nWant:\n\t%s\n", gotSpans, wantSpans) + } + } + } +} + +// The first message without a Node MUST be rejected and teardown the connection. +// See https://github.com/census-instrumentation/opencensus-service/issues/53 +func TestExportProtocolViolations_nodelessFirstMessage(t *testing.T) { + spanSink := new(consumertest.TracesSink) + + port, doneFn := ocReceiverOnGRPCServer(t, spanSink) + defer doneFn() + + traceClient, traceClientDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the gRPC TraceService_ExportClient: %v", err) + defer traceClientDoneFn() + + // Send a Nodeless first message + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: nil}) + require.NoError(t, err, "Unexpectedly failed to send the first message: %v", err) + + longDuration := 2 * time.Second + testDone := make(chan struct{}) + goroutineDone := make(chan struct{}) + go func() { + // Our insurance policy to ensure that this test doesn't hang + // forever and should quickly report if/when we regress. + select { + case <-testDone: + t.Log("Test ended early enough") + case <-time.After(longDuration): + traceClientDoneFn() + t.Errorf("Test took too long (%s) and is likely still hanging so this is a regression", longDuration) + } + close(goroutineDone) + }() + + // Now the response should return an error and should have been torn down + // regardless of the number of times after invocation below, or any attempt + // to send the proper/corrective data should be rejected. + for i := 0; i < 10; i++ { + recv, err := traceClient.Recv() + if recv != nil { + t.Errorf("Iteration #%d: Unexpectedly got back a response: %#v", i, recv) + } + if err == nil { + t.Errorf("Iteration #%d: Unexpectedly got back a nil error", i) + continue + } + + wantSubStr := "protocol violation: Export's first message must have a Node" + if g := err.Error(); !strings.Contains(g, wantSubStr) { + t.Errorf("Iteration #%d: Got error:\n\t%s\nWant substring:\n\t%s\n", i, g, wantSubStr) + } + + // The connection should be invalid at this point and + // no attempt to send corrections should succeed. + n1 := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{Pid: 9489, HostName: "nodejs-host"}, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_NODE_JS}, + } + if err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: n1}); err == nil { + t.Errorf("Iteration #%d: Unexpectedly succeeded in sending a message upstream. Connection must be in terminal state", i) + } else if g, w := err, io.EOF; g != w { + t.Errorf("Iteration #%d:\nGot error %q\nWant error %q", i, g, w) + } + } + + close(testDone) + <-goroutineDone +} + +// If the first message is valid (has a non-nil Node) and has spans, those +// spans should be received and NEVER discarded. +// See https://github.com/census-instrumentation/opencensus-service/issues/51 +func TestExportProtocolConformation_spansInFirstMessage(t *testing.T) { + spanSink := new(consumertest.TracesSink) + + port, doneFn := ocReceiverOnGRPCServer(t, spanSink) + defer doneFn() + + traceClient, traceClientDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the gRPC TraceService_ExportClient: %v", err) + defer traceClientDoneFn() + + sLi := []*tracepb.Span{ + {TraceId: []byte("1234567890abcdef"), Status: &tracepb.Status{}}, + {TraceId: []byte("XXXXXXXXXXabcdef"), Status: &tracepb.Status{}}, + } + ni := &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{Pid: 1}, + LibraryInfo: &commonpb.LibraryInfo{Language: commonpb.LibraryInfo_JAVA}, + } + err = traceClient.Send(&agenttracepb.ExportTraceServiceRequest{Node: ni, Spans: sLi}) + require.NoError(t, err, "Failed to send the first message: %v", err) + + // Give it time to be sent over the wire, then exported. + <-time.After(100 * time.Millisecond) + + // Examination time! + resultsMapping := make(map[string][]*tracepb.Span) + for _, td := range spanSink.AllTraces() { + octds := internaldata.TraceDataToOC(td) + for _, octd := range octds { + resultsMapping[nodeToKey(octd.Node)] = append(resultsMapping[nodeToKey(octd.Node)], octd.Spans...) + } + } + + if g, w := len(resultsMapping), 1; g != w { + t.Errorf("Results mapping: Got len(keys) %d Want %d", g, w) + } + + // Check for the keys + wantLengths := map[string]int{ + nodeToKey(ni): 2, + } + for key, wantLength := range wantLengths { + gotLength := len(resultsMapping[key]) + if gotLength != wantLength { + t.Errorf("Exported spans:: Key: %s\nGot length %d\nWant length %d", key, gotLength, wantLength) + } + } + + // And finally ensure that the protos' serializations are equivalent to the expected + wantContents := map[string][]*tracepb.Span{ + nodeToKey(ni): sLi, + } + + gotBlob, _ := json.Marshal(resultsMapping) + wantBlob, _ := json.Marshal(wantContents) + if !bytes.Equal(gotBlob, wantBlob) { + t.Errorf("Unequal serialization results\nGot:\n\t%s\nWant:\n\t%s\n", gotBlob, wantBlob) + } +} + +// Helper functions from here on below +func makeTraceServiceClient(port int) (agenttracepb.TraceService_ExportClient, func(), error) { + addr := fmt.Sprintf(":%d", port) + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, nil, err + } + + svc := agenttracepb.NewTraceServiceClient(cc) + traceClient, err := svc.Export(context.Background()) + if err != nil { + _ = cc.Close() + return nil, nil, err + } + + doneFn := func() { _ = cc.Close() } + return traceClient, doneFn, nil +} + +func nodeToKey(n *commonpb.Node) string { + blob, _ := proto.Marshal(n) + return string(blob) +} + +func ocReceiverOnGRPCServer(t *testing.T, sr consumer.TracesConsumer) (int, func()) { + ln, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + doneFnList := []func(){func() { require.NoError(t, ln.Close()) }} + done := func() { + for _, doneFn := range doneFnList { + doneFn() + } + } + + _, port, err := testutil.HostPortFromAddr(ln.Addr()) + if err != nil { + done() + t.Fatalf("Failed to parse host:port from listener address: %s error: %v", ln.Addr(), err) + } + + oci, err := New(receiverTagValue, sr) + require.NoError(t, err, "Failed to create the Receiver: %v", err) + + // Now run it as a gRPC server + srv := obsreport.GRPCServerWithObservabilityEnabled() + agenttracepb.RegisterTraceServiceServer(srv, oci) + go func() { + _ = srv.Serve(ln) + }() + + return port, done +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/octrace/options.go b/internal/otel_collector/receiver/opencensusreceiver/octrace/options.go new file mode 100644 index 00000000000..a3477619ba9 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/octrace/options.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package octrace + +// Option interface defines for configuration settings to be applied to receivers. +// +// WithReceiver applies the configuration to the given receiver. +type Option func(*Receiver) diff --git a/internal/otel_collector/receiver/opencensusreceiver/opencensus.go b/internal/otel_collector/receiver/opencensusreceiver/opencensus.go new file mode 100644 index 00000000000..6535d91e5d7 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/opencensus.go @@ -0,0 +1,275 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusreceiver + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "sync" + + agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" + agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" + gatewayruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/rs/cors" + "github.com/soheilhy/cmux" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/receiver/opencensusreceiver/ocmetrics" + "go.opentelemetry.io/collector/receiver/opencensusreceiver/octrace" +) + +// ocReceiver is the type that exposes Trace and Metrics reception. +type ocReceiver struct { + mu sync.Mutex + ln net.Listener + serverGRPC *grpc.Server + serverHTTP *http.Server + gatewayMux *gatewayruntime.ServeMux + corsOrigins []string + grpcServerOptions []grpc.ServerOption + + traceReceiverOpts []octrace.Option + + traceReceiver *octrace.Receiver + metricsReceiver *ocmetrics.Receiver + + traceConsumer consumer.TracesConsumer + metricsConsumer consumer.MetricsConsumer + + stopOnce sync.Once + startServerOnce sync.Once + startTraceReceiverOnce sync.Once + startMetricsReceiverOnce sync.Once + + instanceName string +} + +// newOpenCensusReceiver just creates the OpenCensus receiver services. It is the caller's +// responsibility to invoke the respective Start*Reception methods as well +// as the various Stop*Reception methods to end it. +func newOpenCensusReceiver( + instanceName string, + transport string, + addr string, + tc consumer.TracesConsumer, + mc consumer.MetricsConsumer, + opts ...ocOption, +) (*ocReceiver, error) { + // TODO: (@odeke-em) use options to enable address binding changes. + ln, err := net.Listen(transport, addr) + if err != nil { + return nil, fmt.Errorf("failed to bind to address %q: %v", addr, err) + } + + ocr := &ocReceiver{ + ln: ln, + corsOrigins: []string{}, // Disable CORS by default. + gatewayMux: gatewayruntime.NewServeMux(), + } + + for _, opt := range opts { + opt.withReceiver(ocr) + } + + ocr.instanceName = instanceName + ocr.traceConsumer = tc + ocr.metricsConsumer = mc + + return ocr, nil +} + +// Start runs the trace receiver on the gRPC server. Currently +// it also enables the metrics receiver too. +func (ocr *ocReceiver) Start(_ context.Context, host component.Host) error { + return ocr.start(host) +} + +func (ocr *ocReceiver) registerTraceConsumer() error { + var err = componenterror.ErrAlreadyStarted + + ocr.startTraceReceiverOnce.Do(func() { + ocr.traceReceiver, err = octrace.New( + ocr.instanceName, ocr.traceConsumer, ocr.traceReceiverOpts...) + if err == nil { + srv := ocr.grpcServer() + agenttracepb.RegisterTraceServiceServer(srv, ocr.traceReceiver) + } + }) + + return err +} + +func (ocr *ocReceiver) registerMetricsConsumer() error { + var err = componenterror.ErrAlreadyStarted + + ocr.startMetricsReceiverOnce.Do(func() { + ocr.metricsReceiver, err = ocmetrics.New( + ocr.instanceName, ocr.metricsConsumer) + if err == nil { + srv := ocr.grpcServer() + agentmetricspb.RegisterMetricsServiceServer(srv, ocr.metricsReceiver) + } + }) + return err +} + +func (ocr *ocReceiver) grpcServer() *grpc.Server { + ocr.mu.Lock() + defer ocr.mu.Unlock() + + if ocr.serverGRPC == nil { + ocr.serverGRPC = obsreport.GRPCServerWithObservabilityEnabled(ocr.grpcServerOptions...) + } + + return ocr.serverGRPC +} + +// Shutdown is a method to turn off receiving. +func (ocr *ocReceiver) Shutdown(context.Context) error { + if err := ocr.stop(); err != componenterror.ErrAlreadyStopped { + return err + } + return nil +} + +// start runs all the receivers/services namely, Trace and Metrics services. +func (ocr *ocReceiver) start(host component.Host) error { + hasConsumer := false + if ocr.traceConsumer != nil { + hasConsumer = true + if err := ocr.registerTraceConsumer(); err != nil && err != componenterror.ErrAlreadyStarted { + return err + } + } + + if ocr.metricsConsumer != nil { + hasConsumer = true + if err := ocr.registerMetricsConsumer(); err != nil && err != componenterror.ErrAlreadyStarted { + return err + } + } + + if !hasConsumer { + return errors.New("cannot start receiver: no consumers were specified") + } + + if err := ocr.startServer(host); err != nil && err != componenterror.ErrAlreadyStarted { + return err + } + + // At this point we've successfully started all the services/receivers. + // Add other start routines here. + return nil +} + +// stop stops the underlying gRPC server and all the services running on it. +func (ocr *ocReceiver) stop() error { + ocr.mu.Lock() + defer ocr.mu.Unlock() + + err := componenterror.ErrAlreadyStopped + ocr.stopOnce.Do(func() { + err = nil + + if ocr.serverHTTP != nil { + _ = ocr.serverHTTP.Close() + } + + if ocr.ln != nil { + _ = ocr.ln.Close() + } + + // TODO: @(odeke-em) investigate what utility invoking (*grpc.Server).Stop() + // gives us yet we invoke (net.Listener).Close(). + // Sure (*grpc.Server).Stop() enables proper shutdown but imposes + // a painful and artificial wait time that goes into 20+seconds yet most of our + // tests and code should be reactive in less than even 1second. + // ocr.serverGRPC.Stop() + }) + return err +} + +func (ocr *ocReceiver) httpServer() *http.Server { + ocr.mu.Lock() + defer ocr.mu.Unlock() + + if ocr.serverHTTP == nil { + var mux http.Handler = ocr.gatewayMux + if len(ocr.corsOrigins) > 0 { + co := cors.Options{AllowedOrigins: ocr.corsOrigins} + mux = cors.New(co).Handler(mux) + } + ocr.serverHTTP = &http.Server{Handler: mux} + } + + return ocr.serverHTTP +} + +func (ocr *ocReceiver) startServer(host component.Host) error { + err := componenterror.ErrAlreadyStarted + ocr.startServerOnce.Do(func() { + err = nil + // Register the grpc-gateway on the HTTP server mux + c := context.Background() + opts := []grpc.DialOption{grpc.WithInsecure()} + endpoint := ocr.ln.Addr().String() + + _, ok := ocr.ln.(*net.UnixListener) + if ok { + endpoint = "unix:" + endpoint + } + + err = agenttracepb.RegisterTraceServiceHandlerFromEndpoint(c, ocr.gatewayMux, endpoint, opts) + if err != nil { + return + } + + err = agentmetricspb.RegisterMetricsServiceHandlerFromEndpoint(c, ocr.gatewayMux, endpoint, opts) + if err != nil { + return + } + + // Start the gRPC and HTTP/JSON (grpc-gateway) servers on the same port. + m := cmux.New(ocr.ln) + grpcL := m.MatchWithWriters( + cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"), + cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc+proto")) + + httpL := m.Match(cmux.Any()) + go func() { + if errGrpc := ocr.serverGRPC.Serve(grpcL); errGrpc != nil { + host.ReportFatalError(errGrpc) + } + }() + go func() { + if errHTTP := ocr.httpServer().Serve(httpL); errHTTP != nil { + host.ReportFatalError(errHTTP) + } + }() + go func() { + if errServe := m.Serve(); errServe != nil { + host.ReportFatalError(errServe) + } + }() + }) + return err +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/opencensus_test.go b/internal/otel_collector/receiver/opencensusreceiver/opencensus_test.go new file mode 100644 index 00000000000..d23eae44312 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/opencensus_test.go @@ -0,0 +1,620 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//lint:file-ignore U1000 t.Skip() flaky test causes unused function warning. + +package opencensusreceiver + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "strings" + "testing" + "time" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" + agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/obsreport/obsreporttest" + "go.opentelemetry.io/collector/testutil" + "go.opentelemetry.io/collector/translator/internaldata" +) + +const ocReceiverName = "oc_receiver_test" + +func TestGrpcGateway_endToEnd(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + + // Set the buffer count to 1 to make it flush the test span immediately. + sink := new(consumertest.TracesSink) + ocr, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, sink, nil) + require.NoError(t, err, "Failed to create trace receiver: %v", err) + + err = ocr.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to start trace receiver: %v", err) + defer ocr.Shutdown(context.Background()) + + // TODO(songy23): make starting server deterministic + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + url := fmt.Sprintf("http://%s/v1/trace", addr) + + // Verify that CORS is not enabled by default, but that it gives an 405 + // method not allowed error. + verifyCorsResp(t, url, "origin.com", 405, false) + + traceJSON := []byte(` + { + "node":{"identifier":{"hostName":"testHost"}}, + "spans":[ + { + "traceId":"W47/95gDgQPSabYzgT/GDA==", + "spanId":"7uGbfsPBsXM=", + "name":{"value":"testSpan"}, + "startTime":"2018-12-13T14:51:00Z", + "endTime":"2018-12-13T14:51:01Z", + "attributes": { + "attributeMap": { + "attr1": {"intValue": "55"} + } + } + } + ] + }`) + req, err := http.NewRequest("POST", url, bytes.NewBuffer(traceJSON)) + require.NoError(t, err, "Error creating trace POST request: %v", err) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err, "Error posting trace to grpc-gateway server: %v", err) + + respBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Error reading response from trace grpc-gateway, %v", err) + } + respStr := string(respBytes) + + err = resp.Body.Close() + if err != nil { + t.Errorf("Error closing response body, %v", err) + } + + if resp.StatusCode != 200 { + t.Errorf("Unexpected status from trace grpc-gateway: %v", resp.StatusCode) + } + + if respStr != "" { + t.Errorf("Got unexpected response from trace grpc-gateway: %v", respStr) + } + + got := sink.AllTraces() + require.Len(t, got, 1) + gotOc := internaldata.TraceDataToOC(got[0]) + require.Len(t, gotOc, 1) + + want := consumerdata.TraceData{ + Node: &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{HostName: "testHost"}, + }, + Resource: &resourcepb.Resource{}, + Spans: []*tracepb.Span{ + { + TraceId: []byte{0x5B, 0x8E, 0xFF, 0xF7, 0x98, 0x3, 0x81, 0x3, 0xD2, 0x69, 0xB6, 0x33, 0x81, 0x3F, 0xC6, 0xC}, + SpanId: []byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x73}, + Name: &tracepb.TruncatableString{Value: "testSpan"}, + StartTime: timestamppb.New(time.Unix(1544712660, 0).UTC()), + EndTime: timestamppb.New(time.Unix(1544712661, 0).UTC()), + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + "attr1": { + Value: &tracepb.AttributeValue_IntValue{IntValue: 55}, + }, + }, + }, + Status: &tracepb.Status{}, + }, + }, + SourceFormat: "oc_trace", + } + assert.True(t, proto.Equal(want.Node, gotOc[0].Node)) + assert.True(t, proto.Equal(want.Resource, gotOc[0].Resource)) + require.Len(t, want.Spans, 1) + require.Len(t, gotOc[0].Spans, 1) + assert.EqualValues(t, want.Spans[0], gotOc[0].Spans[0]) +} + +func TestTraceGrpcGatewayCors_endToEnd(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + corsOrigins := []string{"allowed-*.com"} + + ocr, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, consumertest.NewTracesNop(), nil, withCorsOrigins(corsOrigins)) + require.NoError(t, err, "Failed to create trace receiver: %v", err) + defer ocr.Shutdown(context.Background()) + + err = ocr.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to start trace receiver: %v", err) + + // TODO(songy23): make starting server deterministic + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + url := fmt.Sprintf("http://%s/v1/trace", addr) + + // Verify allowed domain gets responses that allow CORS. + verifyCorsResp(t, url, "allowed-origin.com", 200, true) + + // Verify disallowed domain gets responses that disallow CORS. + verifyCorsResp(t, url, "disallowed-origin.com", 200, false) +} + +func TestMetricsGrpcGatewayCors_endToEnd(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + corsOrigins := []string{"allowed-*.com"} + + ocr, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, nil, consumertest.NewMetricsNop(), withCorsOrigins(corsOrigins)) + require.NoError(t, err, "Failed to create metrics receiver: %v", err) + defer ocr.Shutdown(context.Background()) + + err = ocr.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to start metrics receiver: %v", err) + + // TODO(songy23): make starting server deterministic + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + url := fmt.Sprintf("http://%s/v1/metrics", addr) + + // Verify allowed domain gets responses that allow CORS. + verifyCorsResp(t, url, "allowed-origin.com", 200, true) + + // Verify disallowed domain gets responses that disallow CORS. + verifyCorsResp(t, url, "disallowed-origin.com", 200, false) +} + +func verifyCorsResp(t *testing.T, url string, origin string, wantStatus int, wantAllowed bool) { + req, err := http.NewRequest("OPTIONS", url, nil) + require.NoError(t, err, "Error creating trace OPTIONS request: %v", err) + req.Header.Set("Origin", origin) + req.Header.Set("Access-Control-Request-Method", "POST") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err, "Error sending OPTIONS to grpc-gateway server: %v", err) + + err = resp.Body.Close() + if err != nil { + t.Errorf("Error closing OPTIONS response body, %v", err) + } + + if resp.StatusCode != wantStatus { + t.Errorf("Unexpected status from OPTIONS: %v", resp.StatusCode) + } + + gotAllowOrigin := resp.Header.Get("Access-Control-Allow-Origin") + gotAllowMethods := resp.Header.Get("Access-Control-Allow-Methods") + + wantAllowOrigin := "" + wantAllowMethods := "" + if wantAllowed { + wantAllowOrigin = origin + wantAllowMethods = "POST" + } + + if gotAllowOrigin != wantAllowOrigin { + t.Errorf("Unexpected Access-Control-Allow-Origin: %v", gotAllowOrigin) + } + if gotAllowMethods != wantAllowMethods { + t.Errorf("Unexpected Access-Control-Allow-Methods: %v", gotAllowMethods) + } +} + +func TestStopWithoutStartNeverCrashes(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + ocr, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, nil, nil) + require.NoError(t, err, "Failed to create an OpenCensus receiver: %v", err) + // Stop it before ever invoking Start*. + ocr.stop() +} + +func TestNewPortAlreadyUsed(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + ln, err := net.Listen("tcp", addr) + require.NoError(t, err, "failed to listen on %q: %v", addr, err) + defer ln.Close() + + r, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, nil, nil) + require.Error(t, err) + require.Nil(t, r) +} + +func TestMultipleStopReceptionShouldNotError(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + r, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, consumertest.NewTracesNop(), consumertest.NewMetricsNop()) + require.NoError(t, err) + require.NotNil(t, r) + + require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, r.Shutdown(context.Background())) +} + +func TestStartWithoutConsumersShouldFail(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + r, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, nil, nil) + require.NoError(t, err) + require.NotNil(t, r) + + require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) +} + +func tempSocketName(t *testing.T) string { + tmpfile, err := ioutil.TempFile("", "sock") + require.NoError(t, err) + require.NoError(t, tmpfile.Close()) + socket := tmpfile.Name() + require.NoError(t, os.Remove(socket)) + return socket +} + +func TestReceiveOnUnixDomainSocket_endToEnd(t *testing.T) { + socketName := tempSocketName(t) + cbts := consumertest.NewTracesNop() + r, err := newOpenCensusReceiver(ocReceiverName, "unix", socketName, cbts, nil) + require.NoError(t, err) + require.NotNil(t, r) + require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) + defer r.Shutdown(context.Background()) + + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + span := ` +{ + "node": { + }, + "spans": [ + { + "trace_id": "YpsR8/le4OgjwSSxhjlrEg==", + "span_id": "2CogcbJh7Ko=", + "socket": { + "value": "/abc", + "truncated_byte_count": 0 + }, + "kind": "SPAN_KIND_UNSPECIFIED", + "start_time": "2020-01-09T11:13:53.187Z", + "end_time": "2020-01-09T11:13:53.187Z" + } + ] +} +` + c := http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (conn net.Conn, err error) { + return net.Dial("unix", socketName) + }, + }, + } + + response, err := c.Post("http://unix/v1/trace", "application/json", strings.NewReader(span)) + require.NoError(t, err) + defer response.Body.Close() + + require.Equal(t, 200, response.StatusCode) +} + +// TestOCReceiverTrace_HandleNextConsumerResponse checks if the trace receiver +// is returning the proper response (return and metrics) when the next consumer +// in the pipeline reports error. The test changes the responses returned by the +// next trace consumer, checks if data was passed down the pipeline and if +// proper metrics were recorded. It also uses all endpoints supported by the +// trace receiver. +func TestOCReceiverTrace_HandleNextConsumerResponse(t *testing.T) { + type ingestionStateTest struct { + okToIngest bool + expectedCode codes.Code + } + tests := []struct { + name string + expectedReceivedBatches int + expectedIngestionBlockedRPCs int + ingestionStates []ingestionStateTest + }{ + { + name: "IngestTest", + expectedReceivedBatches: 2, + expectedIngestionBlockedRPCs: 1, + ingestionStates: []ingestionStateTest{ + { + okToIngest: true, + expectedCode: codes.OK, + }, + { + okToIngest: false, + expectedCode: codes.Unknown, + }, + { + okToIngest: true, + expectedCode: codes.OK, + }, + }, + }, + } + + addr := testutil.GetAvailableLocalAddress(t) + msg := &agenttracepb.ExportTraceServiceRequest{ + Node: &commonpb.Node{ + ServiceInfo: &commonpb.ServiceInfo{Name: "test-svc"}, + }, + Spans: []*tracepb.Span{ + { + TraceId: []byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + }, + }, + }, + } + + exportBidiFn := func( + t *testing.T, + cc *grpc.ClientConn, + msg *agenttracepb.ExportTraceServiceRequest) error { + + acc := agenttracepb.NewTraceServiceClient(cc) + stream, err := acc.Export(context.Background()) + require.NoError(t, err) + require.NotNil(t, stream) + + err = stream.Send(msg) + stream.CloseSend() + if err == nil { + for { + if _, err = stream.Recv(); err != nil { + if err == io.EOF { + err = nil + } + break + } + } + } + + return err + } + + exporters := []struct { + receiverTag string + exportFn func( + t *testing.T, + cc *grpc.ClientConn, + msg *agenttracepb.ExportTraceServiceRequest) error + }{ + { + receiverTag: "oc_trace", + exportFn: exportBidiFn, + }, + } + for _, exporter := range exporters { + for _, tt := range tests { + t.Run(tt.name+"/"+exporter.receiverTag, func(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + sink := new(consumertest.TracesSink) + + var opts []ocOption + ocr, err := newOpenCensusReceiver(exporter.receiverTag, "tcp", addr, nil, nil, opts...) + require.Nil(t, err) + require.NotNil(t, ocr) + + ocr.traceConsumer = sink + require.NoError(t, ocr.Start(context.Background(), componenttest.NewNopHost())) + defer ocr.Shutdown(context.Background()) + + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + t.Errorf("grpc.Dial: %v", err) + } + defer cc.Close() + + for _, ingestionState := range tt.ingestionStates { + if ingestionState.okToIngest { + sink.SetConsumeError(nil) + } else { + sink.SetConsumeError(fmt.Errorf("%q: consumer error", tt.name)) + } + + err = exporter.exportFn(t, cc, msg) + + status, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, ingestionState.expectedCode, status.Code()) + } + + require.Equal(t, tt.expectedReceivedBatches, len(sink.AllTraces())) + obsreporttest.CheckReceiverTracesViews(t, exporter.receiverTag, "grpc", int64(tt.expectedReceivedBatches), int64(tt.expectedIngestionBlockedRPCs)) + }) + } + } +} + +// TestOCReceiverMetrics_HandleNextConsumerResponse checks if the metrics receiver +// is returning the proper response (return and metrics) when the next consumer +// in the pipeline reports error. The test changes the responses returned by the +// next trace consumer, checks if data was passed down the pipeline and if +// proper metrics were recorded. It also uses all endpoints supported by the +// metrics receiver. +func TestOCReceiverMetrics_HandleNextConsumerResponse(t *testing.T) { + type ingestionStateTest struct { + okToIngest bool + expectedCode codes.Code + } + tests := []struct { + name string + expectedReceivedBatches int + expectedIngestionBlockedRPCs int + ingestionStates []ingestionStateTest + }{ + { + name: "IngestTest", + expectedReceivedBatches: 2, + expectedIngestionBlockedRPCs: 1, + ingestionStates: []ingestionStateTest{ + { + okToIngest: true, + expectedCode: codes.OK, + }, + { + okToIngest: false, + expectedCode: codes.Unknown, + }, + { + okToIngest: true, + expectedCode: codes.OK, + }, + }, + }, + } + + descriptor := &metricspb.MetricDescriptor{ + Name: "testMetric", + Description: "metric descriptor", + Unit: "1", + Type: metricspb.MetricDescriptor_GAUGE_INT64, + } + point := &metricspb.Point{ + Timestamp: timestamppb.New(time.Now().UTC()), + Value: &metricspb.Point_Int64Value{ + Int64Value: int64(1), + }, + } + ts := &metricspb.TimeSeries{ + Points: []*metricspb.Point{point}, + } + metric := &metricspb.Metric{ + MetricDescriptor: descriptor, + Timeseries: []*metricspb.TimeSeries{ts}, + } + + addr := testutil.GetAvailableLocalAddress(t) + msg := &agentmetricspb.ExportMetricsServiceRequest{ + Node: &commonpb.Node{ + ServiceInfo: &commonpb.ServiceInfo{Name: "test-svc"}, + }, + Metrics: []*metricspb.Metric{metric}, + } + + exportBidiFn := func( + t *testing.T, + cc *grpc.ClientConn, + msg *agentmetricspb.ExportMetricsServiceRequest) error { + + acc := agentmetricspb.NewMetricsServiceClient(cc) + stream, err := acc.Export(context.Background()) + require.NoError(t, err) + require.NotNil(t, stream) + + err = stream.Send(msg) + stream.CloseSend() + if err == nil { + for { + if _, err = stream.Recv(); err != nil { + if err == io.EOF { + err = nil + } + break + } + } + } + + return err + } + + exporters := []struct { + receiverTag string + exportFn func( + t *testing.T, + cc *grpc.ClientConn, + msg *agentmetricspb.ExportMetricsServiceRequest) error + }{ + { + receiverTag: "oc_metrics", + exportFn: exportBidiFn, + }, + } + for _, exporter := range exporters { + for _, tt := range tests { + t.Run(tt.name+"/"+exporter.receiverTag, func(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + sink := new(consumertest.MetricsSink) + + var opts []ocOption + ocr, err := newOpenCensusReceiver(exporter.receiverTag, "tcp", addr, nil, nil, opts...) + require.Nil(t, err) + require.NotNil(t, ocr) + + ocr.metricsConsumer = sink + require.Nil(t, ocr.Start(context.Background(), componenttest.NewNopHost())) + defer ocr.Shutdown(context.Background()) + + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + t.Errorf("grpc.Dial: %v", err) + } + defer cc.Close() + + for _, ingestionState := range tt.ingestionStates { + if ingestionState.okToIngest { + sink.SetConsumeError(nil) + } else { + sink.SetConsumeError(fmt.Errorf("%q: consumer error", tt.name)) + } + + err = exporter.exportFn(t, cc, msg) + + status, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, ingestionState.expectedCode, status.Code()) + } + + require.Equal(t, tt.expectedReceivedBatches, len(sink.AllMetrics())) + obsreporttest.CheckReceiverMetricsViews(t, exporter.receiverTag, "grpc", int64(tt.expectedReceivedBatches), int64(tt.expectedIngestionBlockedRPCs)) + }) + } + } +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/options.go b/internal/otel_collector/receiver/opencensusreceiver/options.go new file mode 100644 index 00000000000..84fc48da142 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/options.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusreceiver + +import ( + "google.golang.org/grpc" +) + +// ocOption interface defines for configuration settings to be applied to receivers. +// +// withReceiver applies the configuration to the given receiver. +type ocOption interface { + withReceiver(*ocReceiver) +} + +type corsOrigins struct { + origins []string +} + +var _ ocOption = (*corsOrigins)(nil) + +func (co *corsOrigins) withReceiver(ocr *ocReceiver) { + ocr.corsOrigins = co.origins +} + +// withCorsOrigins is an option to specify the allowed origins to enable writing +// HTTP/JSON requests to the grpc-gateway adapter using CORS. +func withCorsOrigins(origins []string) ocOption { + return &corsOrigins{origins: origins} +} + +var _ ocOption = (grpcServerOptions)(nil) + +type grpcServerOptions []grpc.ServerOption + +func (gsvo grpcServerOptions) withReceiver(ocr *ocReceiver) { + ocr.grpcServerOptions = gsvo +} + +// withGRPCServerOptions allows one to specify the options for starting a gRPC server. +func withGRPCServerOptions(gsOpts ...grpc.ServerOption) ocOption { + gsvOpts := grpcServerOptions(gsOpts) + return gsvOpts +} diff --git a/internal/otel_collector/receiver/opencensusreceiver/testdata/config.yaml b/internal/otel_collector/receiver/opencensusreceiver/testdata/config.yaml new file mode 100644 index 00000000000..1de5dde787e --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/testdata/config.yaml @@ -0,0 +1,69 @@ +receivers: + # The following entry initializes the default OpenCensus receiver. + # The default values are specified here + # https://github.com/open-telemetry/opentelemetry-collector/blob/71589202609d7e787244076b631b45e219101867/receiver/opencensusreceiver/factory.go#L47-L56 + # The full name of this receiver is `opencensus` and can be referenced in pipelines by 'opencensus'. + opencensus: + # The following entry demonstrates configuring the common receiver settings: + # - endpoint + # For more information on the struct, refer to + # https://github:com/open-telemetry/opentelemetry-service/blob/71589202609d7e787244076b631b45e219101867/config/configmodels/configmodels.go#L142-L150 + # This configuration is of type 'opencensus' and has the name 'customname' with a full name of 'opencensus/customname' + # ('/'. To reference this configuration in a pipeline, use the full name `opencensus/customname`. + opencensus/customname: + # The receiver will listen on endpoint: "0.0.0.0:9090". + endpoint: 0.0.0.0:9090 + # The following entry configures all of the keep alive settings. These settings are used to configure the receiver. + opencensus/keepalive: + keepalive: + server_parameters: + max_connection_idle: 11s + max_connection_age: 12s + max_connection_age_grace: 13s + time: 30s + timeout: 5s + enforcement_policy: + min_time: 10s + permit_without_stream: true + # The following demonstrates how to set maximum limits on stream, message size and connection idle time. + # Note: The test yaml has demonstrated configuration on a grouped by their structure; however, all of the settings can + # be mix and matched like adding the maximum connection idle setting in this example. + opencensus/msg-size-conc-connect-max-idle: + max_recv_msg_size_mib: 32 + max_concurrent_streams: 16 + read_buffer_size: 1024 + write_buffer_size: 1024 + keepalive: + server_parameters: + max_connection_idle: 10s + # The following entry demonstrates how to specify TLS credentials for the server. + # Note: These files do not exist. If the receiver is started with this configuration, it will fail. + opencensus/tlscredentials: + tls_settings: + cert_file: test.crt + key_file: test.key + # The following entry demonstrates how to specify a Unix Domain Socket for the server. + opencensus/uds: + transport: unix + endpoint: /tmp/opencensus.sock + # The following entry demonstrates how to configure the OpenCensus receiver to allow Cross-Origin Resource Sharing (CORS). + # Both fully qualified domain names and the use of wildcards are supported. + opencensus/cors: + cors_allowed_origins: + - https://*.test.com # Wildcard subdomain. Allows domains like https://www.test.com and https://foo.test.com but not https://wwwtest.com. + - https://test.com # Fully qualified domain name. Allows https://test.com only. +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [opencensus/customname] + processors: [exampleprocessor] + exporters: [exampleexporter] + metrics: + receivers: [opencensus] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/otlpreceiver/README.md b/internal/otel_collector/receiver/otlpreceiver/README.md new file mode 100644 index 00000000000..259590ac00c --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/README.md @@ -0,0 +1,67 @@ +# OTLP Receiver + +Receives data via gRPC or HTTP using [OTLP]( +https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/protocol/otlp.md) +format. + +Supported pipeline types: traces, metrics, logs + +:warning: OTLP metrics format is currently marked as "Alpha" and may change in +incompatible way any time. + +## Getting Started + +All that is required to enable the OTLP receiver is to include it in the +receiver definitions. A protocol can be disabled by simply not specifying it in +the list of protocols. + +```yaml +receivers: + otlp: + protocols: + grpc: + http: +``` + +The following settings are configurable: + +- `endpoint` (default = 0.0.0.0:4317 for grpc protocol, 0.0.0.0:55681 http protocol): + host:port to which the receiver is going to receive data. The valid syntax is + described at https://github.com/grpc/grpc/blob/master/doc/naming.md. + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configgrpc/README.md) including CORS +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/exporterhelper/README.md) + +## Writing with HTTP/JSON + +The OTLP receiver can receive trace export calls via HTTP/JSON in addition to +gRPC. The HTTP/JSON address is the same as gRPC as the protocol is recognized +and processed accordingly. Note the format needs to be [protobuf JSON +serialization](https://developers.google.com/protocol-buffers/docs/proto3#json). + +IMPORTANT: bytes fields are encoded as base64 strings. + +To write traces with HTTP/JSON, `POST` to `[address]/v1/traces` for traces, +to `[address]/v1/metrics` for metrics, to `[address]/v1/logs` for logs. The default +port is `55681`. + +The HTTP/JSON endpoint can also optionally configure +[CORS](https://fetch.spec.whatwg.org/#cors-protocol), which is enabled by +specifying a list of allowed CORS origins in the `cors_allowed_origins` field: + +```yaml +receivers: + otlp: + protocols: + http: + endpoint: "localhost:55681" + cors_allowed_origins: + - http://test.com + # Origins can have wildcards with *, use * by itself to match any origin. + - https://*.example.com +``` diff --git a/internal/otel_collector/receiver/otlpreceiver/config.go b/internal/otel_collector/receiver/otlpreceiver/config.go new file mode 100644 index 00000000000..85d4ca3f48c --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/config.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" +) + +type Protocols struct { + GRPC *configgrpc.GRPCServerSettings `mapstructure:"grpc"` + HTTP *confighttp.HTTPServerSettings `mapstructure:"http"` +} + +// Config defines configuration for OTLP receiver. +type Config struct { + configmodels.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // Protocols is the configuration for the supported protocols, currently gRPC and HTTP (Proto and JSON). + Protocols `mapstructure:"protocols"` +} diff --git a/internal/otel_collector/receiver/otlpreceiver/config_test.go b/internal/otel_collector/receiver/otlpreceiver/config_test.go new file mode 100644 index 00000000000..dd177adabb8 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/config_test.go @@ -0,0 +1,218 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "path" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/config/configtls" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, len(cfg.Receivers), 9) + + assert.Equal(t, cfg.Receivers["otlp"], factory.CreateDefaultConfig()) + + defaultOnlyGRPC := factory.CreateDefaultConfig().(*Config) + defaultOnlyGRPC.SetName("otlp/only_grpc") + defaultOnlyGRPC.HTTP = nil + assert.Equal(t, cfg.Receivers["otlp/only_grpc"], defaultOnlyGRPC) + + defaultOnlyHTTP := factory.CreateDefaultConfig().(*Config) + defaultOnlyHTTP.SetName("otlp/only_http") + defaultOnlyHTTP.GRPC = nil + assert.Equal(t, cfg.Receivers["otlp/only_http"], defaultOnlyHTTP) + + assert.Equal(t, cfg.Receivers["otlp/customname"], + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "otlp/customname", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "localhost:9090", + Transport: "tcp", + }, + ReadBufferSize: 512 * 1024, + }, + }, + }) + + assert.Equal(t, cfg.Receivers["otlp/keepalive"], + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "otlp/keepalive", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:4317", + Transport: "tcp", + }, + ReadBufferSize: 512 * 1024, + Keepalive: &configgrpc.KeepaliveServerConfig{ + ServerParameters: &configgrpc.KeepaliveServerParameters{ + MaxConnectionIdle: 11 * time.Second, + MaxConnectionAge: 12 * time.Second, + MaxConnectionAgeGrace: 13 * time.Second, + Time: 30 * time.Second, + Timeout: 5 * time.Second, + }, + EnforcementPolicy: &configgrpc.KeepaliveEnforcementPolicy{ + MinTime: 10 * time.Second, + PermitWithoutStream: true, + }, + }, + }, + }, + }) + + assert.Equal(t, cfg.Receivers["otlp/msg-size-conc-connect-max-idle"], + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "otlp/msg-size-conc-connect-max-idle", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:4317", + Transport: "tcp", + }, + MaxRecvMsgSizeMiB: 32, + MaxConcurrentStreams: 16, + ReadBufferSize: 1024, + WriteBufferSize: 1024, + Keepalive: &configgrpc.KeepaliveServerConfig{ + ServerParameters: &configgrpc.KeepaliveServerParameters{ + MaxConnectionIdle: 10 * time.Second, + }, + }, + }, + }, + }) + + // NOTE: Once the config loader checks for the files existence, this test may fail and require + // use of fake cert/key for test purposes. + assert.Equal(t, cfg.Receivers["otlp/tlscredentials"], + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "otlp/tlscredentials", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "0.0.0.0:4317", + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "test.crt", + KeyFile: "test.key", + }, + }, + ReadBufferSize: 512 * 1024, + }, + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: "0.0.0.0:55681", + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "test.crt", + KeyFile: "test.key", + }, + }, + }, + }, + }) + + assert.Equal(t, cfg.Receivers["otlp/cors"], + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "otlp/cors", + }, + Protocols: Protocols{ + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: "0.0.0.0:55681", + CorsOrigins: []string{"https://*.test.com", "https://test.com"}, + }, + }, + }) + + assert.Equal(t, cfg.Receivers["otlp/uds"], + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "otlp/uds", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "/tmp/grpc_otlp.sock", + Transport: "unix", + }, + ReadBufferSize: 512 * 1024, + }, + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: "/tmp/http_otlp.sock", + // Transport: "unix", + }, + }, + }) +} + +func TestFailedLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "typo_default_proto_config.yaml"), factories) + assert.EqualError(t, err, `error reading receivers configuration for otlp: unknown protocols in the OTLP receiver`) + + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "bad_proto_config.yaml"), factories) + assert.EqualError(t, err, "error reading receivers configuration for otlp: 1 error(s) decoding:\n\n* 'protocols' has invalid keys: thrift") + + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "bad_no_proto_config.yaml"), factories) + assert.EqualError(t, err, "error reading receivers configuration for otlp: must specify at least one protocol when using the OTLP receiver") + + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "bad_empty_config.yaml"), factories) + assert.EqualError(t, err, "error reading receivers configuration for otlp: empty config for OTLP receiver") +} diff --git a/internal/otel_collector/receiver/otlpreceiver/factory.go b/internal/otel_collector/receiver/otlpreceiver/factory.go new file mode 100644 index 00000000000..75553059d16 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/factory.go @@ -0,0 +1,198 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + "fmt" + + "github.com/spf13/viper" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "otlp" + + // Protocol values. + protoGRPC = "grpc" + protoHTTP = "http" + protocolsFieldName = "protocols" + + defaultGRPCEndpoint = "0.0.0.0:4317" + defaultHTTPEndpoint = "0.0.0.0:55681" + legacyGRPCEndpoint = "0.0.0.0:55680" +) + +func NewFactory() component.ReceiverFactory { + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithTraces(createTraceReceiver), + receiverhelper.WithMetrics(createMetricsReceiver), + receiverhelper.WithLogs(createLogReceiver), + receiverhelper.WithCustomUnmarshaler(customUnmarshaler)) +} + +// createDefaultConfig creates the default configuration for receiver. +func createDefaultConfig() configmodels.Receiver { + return &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: defaultGRPCEndpoint, + Transport: "tcp", + }, + // We almost write 0 bytes, so no need to tune WriteBufferSize. + ReadBufferSize: 512 * 1024, + }, + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: defaultHTTPEndpoint, + }, + }, + } +} + +// customUnmarshaler is used to add defaults for named but empty protocols +func customUnmarshaler(componentViperSection *viper.Viper, intoCfg interface{}) error { + if componentViperSection == nil || len(componentViperSection.AllKeys()) == 0 { + return fmt.Errorf("empty config for OTLP receiver") + } + // first load the config normally + err := componentViperSection.UnmarshalExact(intoCfg) + if err != nil { + return err + } + + receiverCfg := intoCfg.(*Config) + // next manually search for protocols in viper, if a protocol is not present it means it is disable. + protocols := componentViperSection.GetStringMap(protocolsFieldName) + + // UnmarshalExact will ignore empty entries like a protocol with no values, so if a typo happened + // in the protocol that is intended to be enabled will not be enabled. So check if the protocols + // include only known protocols. + knownProtocols := 0 + if _, ok := protocols[protoGRPC]; !ok { + receiverCfg.GRPC = nil + } else { + knownProtocols++ + } + + if _, ok := protocols[protoHTTP]; !ok { + receiverCfg.HTTP = nil + } else { + knownProtocols++ + } + + if len(protocols) != knownProtocols { + return fmt.Errorf("unknown protocols in the OTLP receiver") + } + + if receiverCfg.GRPC == nil && receiverCfg.HTTP == nil { + return fmt.Errorf("must specify at least one protocol when using the OTLP receiver") + } + + return nil +} + +// CreateTracesReceiver creates a trace receiver based on provided config. +func createTraceReceiver( + ctx context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.TracesConsumer, +) (component.TracesReceiver, error) { + r, err := createReceiver(cfg, params.Logger) + if err != nil { + return nil, err + } + if err = r.registerTraceConsumer(ctx, nextConsumer); err != nil { + return nil, err + } + return r, nil +} + +// CreateMetricsReceiver creates a metrics receiver based on provided config. +func createMetricsReceiver( + ctx context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + consumer consumer.MetricsConsumer, +) (component.MetricsReceiver, error) { + r, err := createReceiver(cfg, params.Logger) + if err != nil { + return nil, err + } + if err = r.registerMetricsConsumer(ctx, consumer); err != nil { + return nil, err + } + return r, nil +} + +// CreateLogReceiver creates a log receiver based on provided config. +func createLogReceiver( + ctx context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + consumer consumer.LogsConsumer, +) (component.LogsReceiver, error) { + r, err := createReceiver(cfg, params.Logger) + if err != nil { + return nil, err + } + if err = r.registerLogsConsumer(ctx, consumer); err != nil { + return nil, err + } + return r, nil +} + +func createReceiver(cfg configmodels.Receiver, logger *zap.Logger) (*otlpReceiver, error) { + rCfg := cfg.(*Config) + + // There must be one receiver for both metrics and traces. We maintain a map of + // receivers per config. + + // Check to see if there is already a receiver for this config. + receiver, ok := receivers[rCfg] + if !ok { + var err error + // We don't have a receiver, so create one. + receiver, err = newOtlpReceiver(rCfg, logger) + if err != nil { + return nil, err + } + // Remember the receiver in the map + receivers[rCfg] = receiver + } + return receiver, nil +} + +// This is the map of already created OTLP receivers for particular configurations. +// We maintain this map because the Factory is asked trace and metric receivers separately +// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// create separate objects, they must use one otlpReceiver object per configuration. +var receivers = map[*Config]*otlpReceiver{} diff --git a/internal/otel_collector/receiver/otlpreceiver/factory_test.go b/internal/otel_collector/receiver/otlpreceiver/factory_test.go new file mode 100644 index 00000000000..60ea0b9094b --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/factory_test.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/testutil" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + config := cfg.(*Config) + config.GRPC.NetAddr.Endpoint = testutil.GetAvailableLocalAddress(t) + config.HTTP.Endpoint = testutil.GetAvailableLocalAddress(t) + + creationParams := component.ReceiverCreateParams{Logger: zap.NewNop()} + tReceiver, err := factory.CreateTracesReceiver(context.Background(), creationParams, cfg, new(consumertest.TracesSink)) + assert.NotNil(t, tReceiver) + assert.NoError(t, err) + + mReceiver, err := factory.CreateMetricsReceiver(context.Background(), creationParams, cfg, new(consumertest.MetricsSink)) + assert.NotNil(t, mReceiver) + assert.NoError(t, err) +} + +func TestCreateTraceReceiver(t *testing.T) { + factory := NewFactory() + defaultReceiverSettings := configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + } + defaultGRPCSettings := &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: "tcp", + }, + } + defaultHTTPSettings := &confighttp.HTTPServerSettings{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + tests := []struct { + name string + cfg *Config + wantErr bool + }{ + { + name: "default", + cfg: &Config{ + ReceiverSettings: defaultReceiverSettings, + Protocols: Protocols{ + GRPC: defaultGRPCSettings, + HTTP: defaultHTTPSettings, + }, + }, + }, + { + name: "invalid_grpc_port", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "localhost:112233", + Transport: "tcp", + }, + }, + HTTP: defaultHTTPSettings, + }, + }, + wantErr: true, + }, + { + name: "invalid_http_port", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: defaultGRPCSettings, + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: "localhost:112233", + }, + }, + }, + wantErr: true, + }, + } + ctx := context.Background() + creationParams := component.ReceiverCreateParams{Logger: zap.NewNop()} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := new(consumertest.TracesSink) + tr, err := factory.CreateTracesReceiver(ctx, creationParams, tt.cfg, sink) + assert.NoError(t, err) + require.NotNil(t, tr) + if tt.wantErr { + assert.Error(t, tr.Start(context.Background(), componenttest.NewNopHost())) + } else { + assert.NoError(t, tr.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, tr.Shutdown(context.Background())) + } + }) + } +} + +func TestCreateMetricReceiver(t *testing.T) { + factory := NewFactory() + defaultReceiverSettings := configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + } + defaultGRPCSettings := &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: "tcp", + }, + } + defaultHTTPSettings := &confighttp.HTTPServerSettings{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + tests := []struct { + name string + cfg *Config + wantErr bool + }{ + { + name: "default", + cfg: &Config{ + ReceiverSettings: defaultReceiverSettings, + Protocols: Protocols{ + GRPC: defaultGRPCSettings, + HTTP: defaultHTTPSettings, + }, + }, + }, + { + name: "invalid_grpc_address", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "327.0.0.1:1122", + Transport: "tcp", + }, + }, + HTTP: defaultHTTPSettings, + }, + }, + wantErr: true, + }, + { + name: "invalid_http_address", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: defaultGRPCSettings, + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: "327.0.0.1:1122", + }, + }, + }, + wantErr: true, + }, + } + ctx := context.Background() + creationParams := component.ReceiverCreateParams{Logger: zap.NewNop()} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := new(consumertest.MetricsSink) + mr, err := factory.CreateMetricsReceiver(ctx, creationParams, tt.cfg, sink) + assert.NoError(t, err) + require.NotNil(t, mr) + if tt.wantErr { + assert.Error(t, mr.Start(context.Background(), componenttest.NewNopHost())) + } else { + require.NoError(t, mr.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, mr.Shutdown(context.Background())) + } + }) + } +} + +func TestCreateLogReceiver(t *testing.T) { + factory := NewFactory() + defaultReceiverSettings := configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + } + defaultGRPCSettings := &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: "tcp", + }, + } + defaultHTTPSettings := &confighttp.HTTPServerSettings{ + Endpoint: testutil.GetAvailableLocalAddress(t), + } + + tests := []struct { + name string + cfg *Config + wantStartErr bool + wantErr bool + sink consumer.LogsConsumer + }{ + { + name: "default", + cfg: &Config{ + ReceiverSettings: defaultReceiverSettings, + Protocols: Protocols{ + GRPC: defaultGRPCSettings, + HTTP: defaultHTTPSettings, + }, + }, + sink: new(consumertest.LogsSink), + }, + { + name: "invalid_grpc_address", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: "327.0.0.1:1122", + Transport: "tcp", + }, + }, + HTTP: defaultHTTPSettings, + }, + }, + wantStartErr: true, + sink: new(consumertest.LogsSink), + }, + { + name: "invalid_http_address", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: defaultGRPCSettings, + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: "327.0.0.1:1122", + }, + }, + }, + wantStartErr: true, + sink: new(consumertest.LogsSink), + }, + { + name: "no_next_consumer", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{ + GRPC: defaultGRPCSettings, + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: "327.0.0.1:1122", + }, + }, + }, + wantErr: true, + sink: nil, + }, + { + name: "no_http_or_grcp_config", + cfg: &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + Protocols: Protocols{}, + }, + wantErr: false, + sink: new(consumertest.LogsSink), + }, + } + ctx := context.Background() + creationParams := component.ReceiverCreateParams{Logger: zap.NewNop()} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mr, err := factory.CreateLogsReceiver(ctx, creationParams, tt.cfg, tt.sink) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + require.NotNil(t, mr) + + if tt.wantStartErr { + assert.Error(t, mr.Start(context.Background(), componenttest.NewNopHost())) + } else { + require.NoError(t, mr.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, mr.Shutdown(context.Background())) + } + receivers = map[*Config]*otlpReceiver{} + }) + } +} diff --git a/internal/otel_collector/receiver/otlpreceiver/logs/otlp.go b/internal/otel_collector/receiver/otlpreceiver/logs/otlp.go new file mode 100644 index 00000000000..149a1c110bd --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/logs/otlp.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "context" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal" + collectorlog "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + "go.opentelemetry.io/collector/obsreport" +) + +const ( + dataFormatProtobuf = "protobuf" +) + +// Receiver is the type used to handle spans from OpenTelemetry exporters. +type Receiver struct { + instanceName string + nextConsumer consumer.LogsConsumer +} + +// New creates a new Receiver reference. +func New(instanceName string, nextConsumer consumer.LogsConsumer) *Receiver { + r := &Receiver{ + instanceName: instanceName, + nextConsumer: nextConsumer, + } + + return r +} + +const ( + receiverTagValue = "otlp_log" + receiverTransport = "grpc" +) + +func (r *Receiver) Export(ctx context.Context, req *collectorlog.ExportLogsServiceRequest) (*collectorlog.ExportLogsServiceResponse, error) { + // We need to ensure that it propagates the receiver name as a tag + ctxWithReceiverName := obsreport.ReceiverContext(ctx, r.instanceName, receiverTransport) + + ld := pdata.LogsFromInternalRep(internal.LogsFromOtlp(req.ResourceLogs)) + err := r.sendToNextConsumer(ctxWithReceiverName, ld) + if err != nil { + return nil, err + } + + return &collectorlog.ExportLogsServiceResponse{}, nil +} + +func (r *Receiver) sendToNextConsumer(ctx context.Context, ld pdata.Logs) error { + numSpans := ld.LogRecordCount() + if numSpans == 0 { + return nil + } + + if c, ok := client.FromGRPC(ctx); ok { + ctx = client.NewContext(ctx, c) + } + + ctx = obsreport.StartLogsReceiveOp(ctx, r.instanceName, receiverTransport) + err := r.nextConsumer.ConsumeLogs(ctx, ld) + obsreport.EndLogsReceiveOp(ctx, dataFormatProtobuf, numSpans, err) + + return err +} diff --git a/internal/otel_collector/receiver/otlpreceiver/logs/otlp_test.go b/internal/otel_collector/receiver/otlpreceiver/logs/otlp_test.go new file mode 100644 index 00000000000..8faf2f3d7f1 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/logs/otlp_test.go @@ -0,0 +1,181 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "context" + "fmt" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal" + "go.opentelemetry.io/collector/internal/data" + collectorlog "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + otlplog "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/logs/v1" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/testutil" +) + +var _ collectorlog.LogsServiceServer = (*Receiver)(nil) + +func TestExport(t *testing.T) { + // given + + logSink := new(consumertest.LogsSink) + + port, doneFn := otlpReceiverOnGRPCServer(t, logSink) + defer doneFn() + + traceClient, traceClientDoneFn, err := makeLogsServiceClient(port) + require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) + defer traceClientDoneFn() + + // when + + unixnanos := uint64(12578940000000012345) + traceID := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1} + spanID := [8]byte{8, 7, 6, 5, 4, 3, 2, 1} + resourceLogs := []*otlplog.ResourceLogs{ + { + InstrumentationLibraryLogs: []*otlplog.InstrumentationLibraryLogs{ + { + Logs: []*otlplog.LogRecord{ + { + TraceId: data.NewTraceID(traceID), + SpanId: data.NewSpanID(spanID), + Name: "operationB", + TimeUnixNano: unixnanos, + }, + }, + }, + }, + }, + } + + // Keep log data to compare the test result against it + // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream + traceData := pdata.LogsFromInternalRep(internal.LogsFromOtlp(resourceLogs)).Clone() + + req := &collectorlog.ExportLogsServiceRequest{ + ResourceLogs: resourceLogs, + } + + resp, err := traceClient.Export(context.Background(), req) + require.NoError(t, err, "Failed to export trace: %v", err) + require.NotNil(t, resp, "The response is missing") + + // assert + + require.Equal(t, 1, len(logSink.AllLogs()), "unexpected length: %v", len(logSink.AllLogs())) + + assert.EqualValues(t, traceData, logSink.AllLogs()[0]) +} + +func TestExport_EmptyRequest(t *testing.T) { + logSink := new(consumertest.LogsSink) + + port, doneFn := otlpReceiverOnGRPCServer(t, logSink) + defer doneFn() + + logClient, logClientDoneFn, err := makeLogsServiceClient(port) + require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) + defer logClientDoneFn() + + resp, err := logClient.Export(context.Background(), &collectorlog.ExportLogsServiceRequest{}) + assert.NoError(t, err, "Failed to export trace: %v", err) + assert.NotNil(t, resp, "The response is missing") +} + +func TestExport_ErrorConsumer(t *testing.T) { + logSink := new(consumertest.LogsSink) + logSink.SetConsumeError(fmt.Errorf("error")) + + port, doneFn := otlpReceiverOnGRPCServer(t, logSink) + defer doneFn() + + logClient, logClientDoneFn, err := makeLogsServiceClient(port) + require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) + defer logClientDoneFn() + + req := &collectorlog.ExportLogsServiceRequest{ + ResourceLogs: []*otlplog.ResourceLogs{ + { + InstrumentationLibraryLogs: []*otlplog.InstrumentationLibraryLogs{ + { + Logs: []*otlplog.LogRecord{ + { + Name: "operationB", + }, + }, + }, + }, + }, + }, + } + + resp, err := logClient.Export(context.Background(), req) + assert.EqualError(t, err, "rpc error: code = Unknown desc = error") + assert.Nil(t, resp) +} + +func makeLogsServiceClient(port int) (collectorlog.LogsServiceClient, func(), error) { + addr := fmt.Sprintf(":%d", port) + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, nil, err + } + + logClient := collectorlog.NewLogsServiceClient(cc) + + doneFn := func() { _ = cc.Close() } + return logClient, doneFn, nil +} + +func otlpReceiverOnGRPCServer(t *testing.T, tc consumer.LogsConsumer) (int, func()) { + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + doneFnList := []func(){func() { ln.Close() }} + done := func() { + for _, doneFn := range doneFnList { + doneFn() + } + } + + _, port, err := testutil.HostPortFromAddr(ln.Addr()) + if err != nil { + done() + t.Fatalf("Failed to parse host:port from listener address: %s error: %v", ln.Addr(), err) + } + + r := New(receiverTagValue, tc) + require.NoError(t, err) + + // Now run it as a gRPC server + srv := obsreport.GRPCServerWithObservabilityEnabled() + collectorlog.RegisterLogsServiceServer(srv, r) + go func() { + _ = srv.Serve(ln) + }() + + return port, done +} diff --git a/internal/otel_collector/receiver/otlpreceiver/marshal_jsonpb.go b/internal/otel_collector/receiver/otlpreceiver/marshal_jsonpb.go new file mode 100644 index 00000000000..22fb7814fc2 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/marshal_jsonpb.go @@ -0,0 +1,301 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" +) + +// JSONPb is a copy of https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/marshal_jsonpb.go +// with one difference: github.com/golang/protobuf imports are replaced by github.com/gogo/protobuf +// to make it work with Gogoproto messages that we use. There are no other changes to +// JSONPb done. It should be safe to update (copy again) it to latest version of +// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/marshal_jsonpb.go +// when the github.com/grpc-ecosystem/grpc-gateway dependency is updated. + +//lint:file-ignore S1034 Ignore lint errors, this is a copied file and we don't want to modify it. + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb jsonpb.Marshaler + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return (*jsonpb.Marshaler)(j).Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Indent != "" { + return json.MarshalIndent(m, "", j.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) runtime.Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) runtime.Encoder { + return runtime.EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, v) +} + +func decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, v) + } + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, p) +} + +func decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +// allowUnknownFields helps not to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +var allowUnknownFields = true + +// DisallowUnknownFields enables option in decoder (unmarshaller) to +// return an error when it finds an unknown field. This function must be +// called before using the JSON marshaller. +func DisallowUnknownFields() { + allowUnknownFields = false +} + +// convFromType is an exact copy from https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/query.go +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(runtime.String), + reflect.Bool: reflect.ValueOf(runtime.Bool), + reflect.Float64: reflect.ValueOf(runtime.Float64), + reflect.Float32: reflect.ValueOf(runtime.Float32), + reflect.Int64: reflect.ValueOf(runtime.Int64), + reflect.Int32: reflect.ValueOf(runtime.Int32), + reflect.Uint64: reflect.ValueOf(runtime.Uint64), + reflect.Uint32: reflect.ValueOf(runtime.Uint32), + reflect.Slice: reflect.ValueOf(runtime.Bytes), + } +) diff --git a/internal/otel_collector/receiver/otlpreceiver/marshal_jsonpb_test.go b/internal/otel_collector/receiver/otlpreceiver/marshal_jsonpb_test.go new file mode 100644 index 00000000000..87e86b52160 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/marshal_jsonpb_test.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + v1 "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/testdata" +) + +const expectedJSON = `{ + "resource": { + "attributes": [ + { + "key": "resource-attr", + "value": { + "stringValue": "resource-attr-val-1" + } + } + ] + }, + "instrumentationLibrarySpans": [ + { + "instrumentationLibrary": {}, + "spans": [ + { + "traceId": "", + "spanId": "", + "parentSpanId": "", + "name": "operationA", + "startTimeUnixNano": "1581452772000000321", + "endTimeUnixNano": "1581452773000000789", + "droppedAttributesCount": 1, + "events": [ + { + "timeUnixNano": "1581452773000000123", + "name": "event-with-attr", + "attributes": [ + { + "key": "span-event-attr", + "value": { + "stringValue": "span-event-attr-val" + } + } + ], + "droppedAttributesCount": 2 + }, + { + "timeUnixNano": "1581452773000000123", + "name": "event", + "droppedAttributesCount": 2 + } + ], + "droppedEventsCount": 1, + "status": { + "deprecatedCode": "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR", + "message": "status-cancelled", + "code": "STATUS_CODE_ERROR" + } + } + ] + } + ] +}` + +func TestJSONPbMarshal(t *testing.T) { + jpb := JSONPb{ + Indent: " ", + } + td := testdata.GenerateTraceDataOneSpan() + otlp := pdata.TracesToOtlp(td) + bytes, err := jpb.Marshal(otlp[0]) + assert.NoError(t, err) + assert.JSONEq(t, expectedJSON, string(bytes)) +} + +func TestJSONPbUnmarshal(t *testing.T) { + jpb := JSONPb{ + Indent: " ", + } + var proto v1.ResourceSpans + err := jpb.Unmarshal([]byte(expectedJSON), &proto) + assert.NoError(t, err) + td := testdata.GenerateTraceDataOneSpan() + otlp := pdata.TracesToOtlp(td) + assert.EqualValues(t, &proto, otlp[0]) +} diff --git a/internal/otel_collector/receiver/otlpreceiver/metrics/otlp.go b/internal/otel_collector/receiver/otlpreceiver/metrics/otlp.go new file mode 100644 index 00000000000..c5a16f3ebf9 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/metrics/otlp.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "context" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + collectormetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + "go.opentelemetry.io/collector/obsreport" +) + +const ( + dataFormatProtobuf = "protobuf" +) + +// Receiver is the type used to handle metrics from OpenTelemetry exporters. +type Receiver struct { + instanceName string + nextConsumer consumer.MetricsConsumer +} + +// New creates a new Receiver reference. +func New(instanceName string, nextConsumer consumer.MetricsConsumer) *Receiver { + r := &Receiver{ + instanceName: instanceName, + nextConsumer: nextConsumer, + } + return r +} + +const ( + receiverTagValue = "otlp_metrics" + receiverTransport = "grpc" +) + +func (r *Receiver) Export(ctx context.Context, req *collectormetrics.ExportMetricsServiceRequest) (*collectormetrics.ExportMetricsServiceResponse, error) { + receiverCtx := obsreport.ReceiverContext(ctx, r.instanceName, receiverTransport) + + md := pdata.MetricsFromOtlp(req.ResourceMetrics) + + err := r.sendToNextConsumer(receiverCtx, md) + if err != nil { + return nil, err + } + + return &collectormetrics.ExportMetricsServiceResponse{}, nil +} + +func (r *Receiver) sendToNextConsumer(ctx context.Context, md pdata.Metrics) error { + metricCount, dataPointCount := md.MetricAndDataPointCount() + if metricCount == 0 { + return nil + } + + if c, ok := client.FromGRPC(ctx); ok { + ctx = client.NewContext(ctx, c) + } + + ctx = obsreport.StartMetricsReceiveOp(ctx, r.instanceName, receiverTransport) + err := r.nextConsumer.ConsumeMetrics(ctx, md) + obsreport.EndMetricsReceiveOp(ctx, dataFormatProtobuf, dataPointCount, err) + + return err +} diff --git a/internal/otel_collector/receiver/otlpreceiver/metrics/otlp_test.go b/internal/otel_collector/receiver/otlpreceiver/metrics/otlp_test.go new file mode 100644 index 00000000000..4de14f2b472 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/metrics/otlp_test.go @@ -0,0 +1,222 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "context" + "fmt" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + collectormetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/testutil" +) + +var _ collectormetrics.MetricsServiceServer = (*Receiver)(nil) + +func TestExport(t *testing.T) { + // given + + metricSink := new(consumertest.MetricsSink) + + port, doneFn := otlpReceiverOnGRPCServer(t, metricSink) + defer doneFn() + + metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) + require.NoError(t, err, "Failed to create the MetricsServiceClient: %v", err) + defer metricsClientDoneFn() + + // when + + unixnanos1 := uint64(12578940000000012345) + unixnanos2 := uint64(12578940000000054321) + + resourceMetrics := []*otlpmetrics.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + { + Name: "mymetric", + Description: "My metric", + Unit: "ms", + Data: &otlpmetrics.Metric_IntSum{ + IntSum: &otlpmetrics.IntSum{ + IsMonotonic: true, + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + DataPoints: []*otlpmetrics.IntDataPoint{ + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "key1", + Value: "value1", + }, + }, + StartTimeUnixNano: unixnanos1, + TimeUnixNano: unixnanos2, + Value: 123, + }, + { + Labels: []otlpcommon.StringKeyValue{ + { + Key: "key2", + Value: "value2", + }, + }, + StartTimeUnixNano: unixnanos1, + TimeUnixNano: unixnanos2, + Value: 456, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + // Keep metric data to compare the test result against it + // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream + metricData := pdata.MetricsFromOtlp(resourceMetrics).Clone() + + req := &collectormetrics.ExportMetricsServiceRequest{ + ResourceMetrics: resourceMetrics, + } + + resp, err := metricsClient.Export(context.Background(), req) + require.NoError(t, err, "Failed to export metrics: %v", err) + require.NotNil(t, resp, "The response is missing") + + // assert + + require.Equal(t, 1, len(metricSink.AllMetrics()), + "unexpected length: %v", len(metricSink.AllMetrics())) + + assert.EqualValues(t, metricData, metricSink.AllMetrics()[0]) +} + +func TestExport_EmptyRequest(t *testing.T) { + // given + + metricSink := new(consumertest.MetricsSink) + + port, doneFn := otlpReceiverOnGRPCServer(t, metricSink) + defer doneFn() + + metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) + require.NoError(t, err, "Failed to create the MetricsServiceClient: %v", err) + defer metricsClientDoneFn() + + resp, err := metricsClient.Export(context.Background(), &collectormetrics.ExportMetricsServiceRequest{}) + require.NoError(t, err) + require.NotNil(t, resp) +} + +func TestExport_ErrorConsumer(t *testing.T) { + // given + + metricSink := new(consumertest.MetricsSink) + metricSink.SetConsumeError(fmt.Errorf("error")) + + port, doneFn := otlpReceiverOnGRPCServer(t, metricSink) + defer doneFn() + + metricsClient, metricsClientDoneFn, err := makeMetricsServiceClient(port) + require.NoError(t, err, "Failed to create the MetricsServiceClient: %v", err) + defer metricsClientDoneFn() + + req := &collectormetrics.ExportMetricsServiceRequest{ResourceMetrics: []*otlpmetrics.ResourceMetrics{ + { + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + { + Name: "mymetric", + Description: "My metric", + Unit: "ms", + Data: &otlpmetrics.Metric_IntSum{ + IntSum: &otlpmetrics.IntSum{ + IsMonotonic: true, + AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + DataPoints: []*otlpmetrics.IntDataPoint{ + { + Value: 123, + }, + { + Value: 456, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }} + resp, err := metricsClient.Export(context.Background(), req) + assert.EqualError(t, err, "rpc error: code = Unknown desc = error") + assert.Nil(t, resp) +} + +func makeMetricsServiceClient(port int) (collectormetrics.MetricsServiceClient, func(), error) { + addr := fmt.Sprintf(":%d", port) + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, nil, err + } + + metricsClient := collectormetrics.NewMetricsServiceClient(cc) + + doneFn := func() { _ = cc.Close() } + return metricsClient, doneFn, nil +} + +func otlpReceiverOnGRPCServer(t *testing.T, mc consumer.MetricsConsumer) (int, func()) { + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + doneFnList := []func(){func() { ln.Close() }} + done := func() { + for _, doneFn := range doneFnList { + doneFn() + } + } + + _, port, err := testutil.HostPortFromAddr(ln.Addr()) + require.NoError(t, err) + + r := New(receiverTagValue, mc) + // Now run it as a gRPC server + srv := obsreport.GRPCServerWithObservabilityEnabled() + collectormetrics.RegisterMetricsServiceServer(srv, r) + go func() { + _ = srv.Serve(ln) + }() + + return port, done +} diff --git a/internal/otel_collector/receiver/otlpreceiver/mixin.go b/internal/otel_collector/receiver/otlpreceiver/mixin.go new file mode 100644 index 00000000000..06be24b7e24 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/mixin.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + + gatewayruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" + "google.golang.org/grpc" + + collectorlog "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + collectormetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + collectortrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + "go.opentelemetry.io/collector/receiver/otlpreceiver/logs" + "go.opentelemetry.io/collector/receiver/otlpreceiver/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/trace" +) + +// RegisterTraceReceiver registers the trace receiver with a gRPC server and/or grpc-gateway mux, if non-nil. +func RegisterTraceReceiver(ctx context.Context, receiver *trace.Receiver, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { + if serverGRPC != nil { + collectortrace.RegisterTraceServiceServer(serverGRPC, receiver) + } + if gatewayMux != nil { + err := collectortrace.RegisterTraceServiceHandlerServer(ctx, gatewayMux, receiver) + if err != nil { + return err + } + // Also register an alias handler. This fixes bug https://github.com/open-telemetry/opentelemetry-collector/issues/1968 + return collectortrace.RegisterTraceServiceHandlerServerAlias(ctx, gatewayMux, receiver) + } + return nil +} + +// RegisterMetricsReceiver registers the metrics receiver with a gRPC server and/or grpc-gateway mux, if non-nil. +func RegisterMetricsReceiver(ctx context.Context, receiver *metrics.Receiver, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { + if serverGRPC != nil { + collectormetrics.RegisterMetricsServiceServer(serverGRPC, receiver) + } + if gatewayMux != nil { + return collectormetrics.RegisterMetricsServiceHandlerServer(ctx, gatewayMux, receiver) + } + return nil +} + +// RegisterLogsReceiver registers the logs receiver with a gRPC server and/or grpc-gateway mux, if non-nil. +func RegisterLogsReceiver(ctx context.Context, receiver *logs.Receiver, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { + if serverGRPC != nil { + collectorlog.RegisterLogsServiceServer(serverGRPC, receiver) + } + if gatewayMux != nil { + return collectorlog.RegisterLogsServiceHandlerServer(ctx, gatewayMux, receiver) + } + return nil +} diff --git a/internal/otel_collector/receiver/otlpreceiver/otlp.go b/internal/otel_collector/receiver/otlpreceiver/otlp.go new file mode 100644 index 00000000000..6601e0ee84a --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/otlp.go @@ -0,0 +1,232 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + "errors" + "net" + "net/http" + "sync" + + gatewayruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" + "go.uber.org/zap" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" + collectorlog "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/logs/v1" + collectormetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/metrics/v1" + collectortrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + "go.opentelemetry.io/collector/receiver/otlpreceiver/logs" + "go.opentelemetry.io/collector/receiver/otlpreceiver/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/trace" +) + +// otlpReceiver is the type that exposes Trace and Metrics reception. +type otlpReceiver struct { + cfg *Config + serverGRPC *grpc.Server + gatewayMux *gatewayruntime.ServeMux + serverHTTP *http.Server + + traceReceiver *trace.Receiver + metricsReceiver *metrics.Receiver + logReceiver *logs.Receiver + + stopOnce sync.Once + startServerOnce sync.Once + + logger *zap.Logger +} + +// newOtlpReceiver just creates the OpenTelemetry receiver services. It is the caller's +// responsibility to invoke the respective Start*Reception methods as well +// as the various Stop*Reception methods to end it. +func newOtlpReceiver(cfg *Config, logger *zap.Logger) (*otlpReceiver, error) { + r := &otlpReceiver{ + cfg: cfg, + logger: logger, + } + if cfg.GRPC != nil { + opts, err := cfg.GRPC.ToServerOption() + if err != nil { + return nil, err + } + r.serverGRPC = grpc.NewServer(opts...) + } + if cfg.HTTP != nil { + // Use our custom JSON marshaler instead of default Protobuf JSON marshaler. + // This is needed because OTLP spec defines encoding for trace and span id + // and it is only possible to do using Gogoproto-compatible JSONPb marshaler. + jsonpb := &JSONPb{ + EmitDefaults: true, + Indent: " ", + OrigName: true, + } + r.gatewayMux = gatewayruntime.NewServeMux( + gatewayruntime.WithProtoErrorHandler(gatewayruntime.DefaultHTTPProtoErrorHandler), + gatewayruntime.WithMarshalerOption("application/x-protobuf", &xProtobufMarshaler{}), + gatewayruntime.WithMarshalerOption(gatewayruntime.MIMEWildcard, jsonpb), + ) + } + + return r, nil +} + +func (r *otlpReceiver) startGRPCServer(cfg *configgrpc.GRPCServerSettings, host component.Host) error { + r.logger.Info("Starting GRPC server on endpoint " + cfg.NetAddr.Endpoint) + var gln net.Listener + gln, err := cfg.ToListener() + if err != nil { + return err + } + go func() { + if errGrpc := r.serverGRPC.Serve(gln); errGrpc != nil { + host.ReportFatalError(errGrpc) + } + }() + return nil +} + +func (r *otlpReceiver) startHTTPServer(cfg *confighttp.HTTPServerSettings, host component.Host) error { + r.logger.Info("Starting HTTP server on endpoint " + cfg.Endpoint) + var hln net.Listener + hln, err := r.cfg.HTTP.ToListener() + if err != nil { + return err + } + go func() { + if errHTTP := r.serverHTTP.Serve(hln); errHTTP != nil { + host.ReportFatalError(errHTTP) + } + }() + return nil +} + +func (r *otlpReceiver) startProtocolServers(host component.Host) error { + var err error + if r.cfg.GRPC != nil { + err = r.startGRPCServer(r.cfg.GRPC, host) + if err != nil { + return err + } + if r.cfg.GRPC.NetAddr.Endpoint == defaultGRPCEndpoint { + r.logger.Info("Setting up a second GRPC listener on legacy endpoint " + legacyGRPCEndpoint) + + // Copy the config. + cfgLegacyGRPC := r.cfg.GRPC + // And use the legacy endpoint. + cfgLegacyGRPC.NetAddr.Endpoint = legacyGRPCEndpoint + err = r.startGRPCServer(cfgLegacyGRPC, host) + if err != nil { + return err + } + } + } + if r.cfg.HTTP != nil { + r.serverHTTP = r.cfg.HTTP.ToServer( + r.gatewayMux, + confighttp.WithErrorHandler(errorHandler), + ) + err = r.startHTTPServer(r.cfg.HTTP, host) + if err != nil { + return err + } + } + + return err +} + +// Start runs the trace receiver on the gRPC server. Currently +// it also enables the metrics receiver too. +func (r *otlpReceiver) Start(_ context.Context, host component.Host) error { + if r.traceReceiver == nil && r.metricsReceiver == nil && r.logReceiver == nil { + return errors.New("cannot start receiver: no consumers were specified") + } + + var err error + r.startServerOnce.Do(func() { + err = r.startProtocolServers(host) + }) + return err +} + +// Shutdown is a method to turn off receiving. +func (r *otlpReceiver) Shutdown(context.Context) error { + var err error + r.stopOnce.Do(func() { + err = nil + + if r.serverHTTP != nil { + err = r.serverHTTP.Close() + } + + if r.serverGRPC != nil { + r.serverGRPC.Stop() + } + }) + return err +} + +func (r *otlpReceiver) registerTraceConsumer(ctx context.Context, tc consumer.TracesConsumer) error { + if tc == nil { + return componenterror.ErrNilNextConsumer + } + r.traceReceiver = trace.New(r.cfg.Name(), tc) + if r.serverGRPC != nil { + collectortrace.RegisterTraceServiceServer(r.serverGRPC, r.traceReceiver) + } + if r.gatewayMux != nil { + err := collectortrace.RegisterTraceServiceHandlerServer(ctx, r.gatewayMux, r.traceReceiver) + if err != nil { + return err + } + // Also register an alias handler. This fixes bug https://github.com/open-telemetry/opentelemetry-collector/issues/1968 + return collectortrace.RegisterTraceServiceHandlerServerAlias(ctx, r.gatewayMux, r.traceReceiver) + } + return nil +} + +func (r *otlpReceiver) registerMetricsConsumer(ctx context.Context, mc consumer.MetricsConsumer) error { + if mc == nil { + return componenterror.ErrNilNextConsumer + } + r.metricsReceiver = metrics.New(r.cfg.Name(), mc) + if r.serverGRPC != nil { + collectormetrics.RegisterMetricsServiceServer(r.serverGRPC, r.metricsReceiver) + } + if r.gatewayMux != nil { + return collectormetrics.RegisterMetricsServiceHandlerServer(ctx, r.gatewayMux, r.metricsReceiver) + } + return nil +} + +func (r *otlpReceiver) registerLogsConsumer(ctx context.Context, tc consumer.LogsConsumer) error { + if tc == nil { + return componenterror.ErrNilNextConsumer + } + r.logReceiver = logs.New(r.cfg.Name(), tc) + if r.serverGRPC != nil { + collectorlog.RegisterLogsServiceServer(r.serverGRPC, r.logReceiver) + } + if r.gatewayMux != nil { + return collectorlog.RegisterLogsServiceHandlerServer(ctx, r.gatewayMux, r.logReceiver) + } + return nil +} diff --git a/internal/otel_collector/receiver/otlpreceiver/otlp_test.go b/internal/otel_collector/receiver/otlpreceiver/otlp_test.go new file mode 100644 index 00000000000..a14a1c09da9 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/otlp_test.go @@ -0,0 +1,777 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "testing" + "time" + + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/data" + collectortrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport/obsreporttest" + "go.opentelemetry.io/collector/testutil" + "go.opentelemetry.io/collector/translator/conventions" +) + +const otlpReceiverName = "otlp_receiver_test" + +var traceJSON = []byte(` + { + "resource_spans": [ + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { "stringValue": "testHost" } + } + ] + }, + "instrumentation_library_spans": [ + { + "spans": [ + { + "trace_id": "5B8EFFF798038103D269B633813FC60C", + "span_id": "EEE19B7EC3C1B173", + "name": "testSpan", + "start_time_unix_nano": 1544712660000000000, + "end_time_unix_nano": 1544712661000000000, + "attributes": [ + { + "key": "attr1", + "value": { "intValue": 55 } + } + ] + } + ] + } + ] + } + ] + }`) + +var resourceSpansOtlp = otlptrace.ResourceSpans{ + + Resource: otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: conventions.AttributeHostName, + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "testHost"}}, + }, + }, + }, + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + { + TraceId: data.NewTraceID([16]byte{0x5B, 0x8E, 0xFF, 0xF7, 0x98, 0x3, 0x81, 0x3, 0xD2, 0x69, 0xB6, 0x33, 0x81, 0x3F, 0xC6, 0xC}), + SpanId: data.NewSpanID([8]byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x73}), + Name: "testSpan", + StartTimeUnixNano: 1544712660000000000, + EndTimeUnixNano: 1544712661000000000, + Attributes: []otlpcommon.KeyValue{ + { + Key: "attr1", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: 55}}, + }, + }, + }, + }, + }, + }, +} + +var traceOtlp = pdata.TracesFromOtlp([]*otlptrace.ResourceSpans{&resourceSpansOtlp}) + +func TestJsonHttp(t *testing.T) { + tests := []struct { + name string + encoding string + err error + }{ + { + name: "JSONUncompressed", + encoding: "", + }, + { + name: "JSONGzipCompressed", + encoding: "gzip", + }, + { + name: "NotGRPCError", + encoding: "", + err: errors.New("my error"), + }, + { + name: "GRPCError", + encoding: "", + err: status.New(codes.Internal, "").Err(), + }, + } + addr := testutil.GetAvailableLocalAddress(t) + + // Set the buffer count to 1 to make it flush the test span immediately. + sink := new(consumertest.TracesSink) + ocr := newHTTPReceiver(t, addr, sink, nil) + + require.NoError(t, ocr.Start(context.Background(), componenttest.NewNopHost()), "Failed to start trace receiver") + defer ocr.Shutdown(context.Background()) + + // TODO(nilebox): make starting server deterministic + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + // Previously we used /v1/trace as the path. The correct path according to OTLP spec + // is /v1/traces. We currently support both on the receiving side to give graceful + // period for senders to roll out a fix, so we test for both paths to make sure + // the receiver works correctly. + targetURLPaths := []string{"/v1/trace", "/v1/traces"} + + for _, test := range tests { + for _, targetURLPath := range targetURLPaths { + t.Run(test.name+targetURLPath, func(t *testing.T) { + url := fmt.Sprintf("http://%s%s", addr, targetURLPath) + sink.Reset() + testHTTPJSONRequest(t, url, sink, test.encoding, test.err) + }) + } + } +} + +func testHTTPJSONRequest(t *testing.T, url string, sink *consumertest.TracesSink, encoding string, expectedErr error) { + var buf *bytes.Buffer + var err error + switch encoding { + case "gzip": + buf, err = compressGzip(traceJSON) + require.NoError(t, err, "Error while gzip compressing trace: %v", err) + default: + buf = bytes.NewBuffer(traceJSON) + } + sink.SetConsumeError(expectedErr) + req, err := http.NewRequest("POST", url, buf) + require.NoError(t, err, "Error creating trace POST request: %v", err) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Encoding", encoding) + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err, "Error posting trace to grpc-gateway server: %v", err) + + respBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Error reading response from trace grpc-gateway, %v", err) + } + respStr := string(respBytes) + err = resp.Body.Close() + if err != nil { + t.Errorf("Error closing response body, %v", err) + } + + allTraces := sink.AllTraces() + if expectedErr == nil { + assert.Equal(t, 200, resp.StatusCode) + var respJSON map[string]interface{} + assert.NoError(t, json.Unmarshal([]byte(respStr), &respJSON)) + assert.Len(t, respJSON, 0, "Got unexpected response from trace grpc-gateway") + + require.Len(t, allTraces, 1) + + got := allTraces[0] + assert.EqualValues(t, got, traceOtlp) + } else { + errStatus := &spb.Status{} + assert.NoError(t, json.Unmarshal([]byte(respStr), errStatus)) + if s, ok := status.FromError(expectedErr); ok { + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.True(t, proto.Equal(errStatus, s.Proto())) + } else { + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.True(t, proto.Equal(errStatus, &spb.Status{Code: int32(codes.Unknown), Message: "my error"})) + } + require.Len(t, allTraces, 0) + } + +} + +func TestJsonMarshaling(t *testing.T) { + m := jsonpb.Marshaler{} + json, err := m.MarshalToString(&resourceSpansOtlp) + assert.NoError(t, err) + + var resourceSpansOtlp2 otlptrace.ResourceSpans + err = jsonpb.UnmarshalString(json, &resourceSpansOtlp2) + assert.NoError(t, err) + + assert.EqualValues(t, resourceSpansOtlp, resourceSpansOtlp2) +} + +func TestJsonUnmarshaling(t *testing.T) { + var resourceSpansOtlp2 otlptrace.ResourceSpans + err := jsonpb.UnmarshalString(` + { + "instrumentation_library_spans": [ + { + "spans": [ + { + } + ] + } + ] + }`, &resourceSpansOtlp2) + assert.NoError(t, err) + assert.EqualValues(t, data.TraceID{}, resourceSpansOtlp2.InstrumentationLibrarySpans[0].Spans[0].TraceId) + + tests := []struct { + name string + json string + bytes [16]byte + }{ + { + name: "empty string trace id", + json: `""`, + bytes: [16]byte{}, + }, + { + name: "zero bytes trace id", + json: `"00000000000000000000000000000000"`, + bytes: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var resourceSpansOtlp2 otlptrace.ResourceSpans + jsonStr := fmt.Sprintf(` + { + "instrumentation_library_spans": [ + { + "spans": [ + { + "trace_id": %v + } + ] + } + ] + }`, test.json) + err := jsonpb.UnmarshalString(jsonStr, &resourceSpansOtlp2) + assert.NoError(t, err) + assert.EqualValues(t, data.NewTraceID(test.bytes), resourceSpansOtlp2.InstrumentationLibrarySpans[0].Spans[0].TraceId) + }) + } +} + +func TestProtoHttp(t *testing.T) { + tests := []struct { + name string + encoding string + err error + }{ + { + name: "ProtoUncompressed", + encoding: "", + }, + { + name: "ProtoGzipCompressed", + encoding: "gzip", + }, + { + name: "NotGRPCError", + encoding: "", + err: errors.New("my error"), + }, + { + name: "GRPCError", + encoding: "", + err: status.New(codes.Internal, "").Err(), + }, + } + addr := testutil.GetAvailableLocalAddress(t) + + // Set the buffer count to 1 to make it flush the test span immediately. + tSink := new(consumertest.TracesSink) + mSink := new(consumertest.MetricsSink) + ocr := newHTTPReceiver(t, addr, tSink, mSink) + + require.NoError(t, ocr.Start(context.Background(), componenttest.NewNopHost()), "Failed to start trace receiver") + defer ocr.Shutdown(context.Background()) + + // TODO(nilebox): make starting server deterministic + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + wantOtlp := pdata.TracesToOtlp(testdata.GenerateTraceDataOneSpan()) + traceProto := collectortrace.ExportTraceServiceRequest{ + ResourceSpans: wantOtlp, + } + traceBytes, err := traceProto.Marshal() + if err != nil { + t.Errorf("Error marshaling protobuf: %v", err) + } + + // Previously we used /v1/trace as the path. The correct path according to OTLP spec + // is /v1/traces. We currently support both on the receiving side to give graceful + // period for senders to roll out a fix, so we test for both paths to make sure + // the receiver works correctly. + targetURLPaths := []string{"/v1/trace", "/v1/traces"} + + for _, test := range tests { + for _, targetURLPath := range targetURLPaths { + t.Run(test.name+targetURLPath, func(t *testing.T) { + url := fmt.Sprintf("http://%s%s", addr, targetURLPath) + tSink.Reset() + testHTTPProtobufRequest(t, url, tSink, test.encoding, traceBytes, test.err, wantOtlp) + }) + } + } +} +func testHTTPProtobufRequest( + t *testing.T, + url string, + tSink *consumertest.TracesSink, + encoding string, + traceBytes []byte, + expectedErr error, + wantOtlp []*otlptrace.ResourceSpans, +) { + var buf *bytes.Buffer + var err error + switch encoding { + case "gzip": + buf, err = compressGzip(traceBytes) + require.NoError(t, err, "Error while gzip compressing trace: %v", err) + default: + buf = bytes.NewBuffer(traceBytes) + } + tSink.SetConsumeError(expectedErr) + req, err := http.NewRequest("POST", url, buf) + require.NoError(t, err, "Error creating trace POST request: %v", err) + req.Header.Set("Content-Type", "application/x-protobuf") + req.Header.Set("Content-Encoding", encoding) + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err, "Error posting trace to grpc-gateway server: %v", err) + + respBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err, "Error reading response from trace grpc-gateway") + require.NoError(t, resp.Body.Close(), "Error closing response body") + + allTraces := tSink.AllTraces() + + require.Equal(t, "application/x-protobuf", resp.Header.Get("Content-Type"), "Unexpected response Content-Type") + + if expectedErr == nil { + require.Equal(t, 200, resp.StatusCode, "Unexpected return status") + tmp := &collectortrace.ExportTraceServiceResponse{} + err = tmp.Unmarshal(respBytes) + require.NoError(t, err, "Unable to unmarshal response to ExportTraceServiceResponse proto") + + require.Len(t, allTraces, 1) + + gotOtlp := pdata.TracesToOtlp(allTraces[0]) + + if len(gotOtlp) != len(wantOtlp) { + t.Fatalf("len(traces):\nGot: %d\nWant: %d\n", len(gotOtlp), len(wantOtlp)) + } + + got := gotOtlp[0] + want := wantOtlp[0] + + if !assert.EqualValues(t, got, want) { + t.Errorf("Sending trace proto over http failed\nGot:\n%v\nWant:\n%v\n", + got.String(), + want.String()) + } + } else { + errStatus := &spb.Status{} + assert.NoError(t, proto.Unmarshal(respBytes, errStatus)) + if s, ok := status.FromError(expectedErr); ok { + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.True(t, proto.Equal(errStatus, s.Proto())) + } else { + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.True(t, proto.Equal(errStatus, &spb.Status{Code: int32(codes.Unknown), Message: "my error"})) + } + require.Len(t, allTraces, 0) + } +} + +func TestOTLPReceiverInvalidContentEncoding(t *testing.T) { + tests := []struct { + name string + content string + encoding string + reqBodyFunc func() (*bytes.Buffer, error) + resBodyFunc func() ([]byte, error) + status int + }{ + { + name: "JsonGzipUncompressed", + content: "application/json", + encoding: "gzip", + reqBodyFunc: func() (*bytes.Buffer, error) { + return bytes.NewBuffer([]byte(`{"key": "value"}`)), nil + }, + resBodyFunc: func() ([]byte, error) { + return json.Marshal(status.New(codes.InvalidArgument, "gzip: invalid header").Proto()) + }, + status: 400, + }, + { + name: "ProtoGzipUncompressed", + content: "application/x-protobuf", + encoding: "gzip", + reqBodyFunc: func() (*bytes.Buffer, error) { + return bytes.NewBuffer([]byte(`{"key": "value"}`)), nil + }, + resBodyFunc: func() ([]byte, error) { + return proto.Marshal(status.New(codes.InvalidArgument, "gzip: invalid header").Proto()) + }, + status: 400, + }, + } + addr := testutil.GetAvailableLocalAddress(t) + + // Set the buffer count to 1 to make it flush the test span immediately. + tSink := new(consumertest.TracesSink) + mSink := new(consumertest.MetricsSink) + ocr := newHTTPReceiver(t, addr, tSink, mSink) + + require.NoError(t, ocr.Start(context.Background(), componenttest.NewNopHost()), "Failed to start trace receiver") + defer ocr.Shutdown(context.Background()) + + url := fmt.Sprintf("http://%s/v1/traces", addr) + + // Wait for the servers to start + <-time.After(10 * time.Millisecond) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + body, err := test.reqBodyFunc() + require.NoError(t, err, "Error creating request body: %v", err) + + req, err := http.NewRequest("POST", url, body) + require.NoError(t, err, "Error creating trace POST request: %v", err) + req.Header.Set("Content-Type", test.content) + req.Header.Set("Content-Encoding", test.encoding) + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err, "Error posting trace to grpc-gateway server: %v", err) + + respBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err, "Error reading response from trace grpc-gateway") + exRespBytes, err := test.resBodyFunc() + require.NoError(t, err, "Error creating expecting response body") + require.NoError(t, resp.Body.Close(), "Error closing response body") + + require.Equal(t, test.status, resp.StatusCode, "Unexpected return status") + require.Equal(t, test.content, resp.Header.Get("Content-Type"), "Unexpected response Content-Type") + require.Equal(t, exRespBytes, respBytes, "Unexpected response content") + }) + } +} + +func TestGRPCNewPortAlreadyUsed(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + ln, err := net.Listen("tcp", addr) + require.NoError(t, err, "failed to listen on %q: %v", addr, err) + defer ln.Close() + + r := newGRPCReceiver(t, otlpReceiverName, addr, new(consumertest.TracesSink), new(consumertest.MetricsSink)) + require.NotNil(t, r) + + require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestHTTPNewPortAlreadyUsed(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + ln, err := net.Listen("tcp", addr) + require.NoError(t, err, "failed to listen on %q: %v", addr, err) + defer ln.Close() + + r := newHTTPReceiver(t, addr, new(consumertest.TracesSink), new(consumertest.MetricsSink)) + require.NotNil(t, r) + + require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestGRPCStartWithoutConsumers(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + r := newGRPCReceiver(t, otlpReceiverName, addr, nil, nil) + require.NotNil(t, r) + require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) +} + +func TestHTTPStartWithoutConsumers(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + r := newHTTPReceiver(t, addr, nil, nil) + require.NotNil(t, r) + require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) +} + +// TestOTLPReceiverTrace_HandleNextConsumerResponse checks if the trace receiver +// is returning the proper response (return and metrics) when the next consumer +// in the pipeline reports error. The test changes the responses returned by the +// next trace consumer, checks if data was passed down the pipeline and if +// proper metrics were recorded. It also uses all endpoints supported by the +// trace receiver. +func TestOTLPReceiverTrace_HandleNextConsumerResponse(t *testing.T) { + type ingestionStateTest struct { + okToIngest bool + expectedCode codes.Code + } + tests := []struct { + name string + expectedReceivedBatches int + expectedIngestionBlockedRPCs int + ingestionStates []ingestionStateTest + }{ + { + name: "IngestTest", + expectedReceivedBatches: 2, + expectedIngestionBlockedRPCs: 1, + ingestionStates: []ingestionStateTest{ + { + okToIngest: true, + expectedCode: codes.OK, + }, + { + okToIngest: false, + expectedCode: codes.Unknown, + }, + { + okToIngest: true, + expectedCode: codes.OK, + }, + }, + }, + } + + addr := testutil.GetAvailableLocalAddress(t) + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*otlptrace.ResourceSpans{ + { + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + { + TraceId: data.NewTraceID( + [16]byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + }, + ), + }, + }, + }, + }, + }, + }, + } + + exportBidiFn := func( + t *testing.T, + cc *grpc.ClientConn, + msg *collectortrace.ExportTraceServiceRequest) error { + + acc := collectortrace.NewTraceServiceClient(cc) + _, err := acc.Export(context.Background(), req) + + return err + } + + exporters := []struct { + receiverTag string + exportFn func( + t *testing.T, + cc *grpc.ClientConn, + msg *collectortrace.ExportTraceServiceRequest) error + }{ + { + receiverTag: "otlp_trace", + exportFn: exportBidiFn, + }, + } + for _, exporter := range exporters { + for _, tt := range tests { + t.Run(tt.name+"/"+exporter.receiverTag, func(t *testing.T) { + doneFn, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer doneFn() + + sink := new(consumertest.TracesSink) + + ocr := newGRPCReceiver(t, exporter.receiverTag, addr, sink, nil) + require.NotNil(t, ocr) + require.NoError(t, ocr.Start(context.Background(), componenttest.NewNopHost())) + defer ocr.Shutdown(context.Background()) + + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + require.NoError(t, err) + defer cc.Close() + + for _, ingestionState := range tt.ingestionStates { + if ingestionState.okToIngest { + sink.SetConsumeError(nil) + } else { + sink.SetConsumeError(fmt.Errorf("%q: consumer error", tt.name)) + } + + err = exporter.exportFn(t, cc, req) + + status, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, ingestionState.expectedCode, status.Code()) + } + + require.Equal(t, tt.expectedReceivedBatches, len(sink.AllTraces())) + + obsreporttest.CheckReceiverTracesViews(t, exporter.receiverTag, "grpc", int64(tt.expectedReceivedBatches), int64(tt.expectedIngestionBlockedRPCs)) + }) + } + } +} + +func TestGRPCInvalidTLSCredentials(t *testing.T) { + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: "IncorrectTLS", + }, + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: "tcp", + }, + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "willfail", + }, + }, + }, + }, + } + + // TLS is resolved during Creation of the receiver for GRPC. + _, err := createReceiver(cfg, zap.NewNop()) + assert.EqualError(t, err, + `failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither`) +} + +func TestHTTPInvalidTLSCredentials(t *testing.T) { + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: "IncorrectTLS", + }, + Protocols: Protocols{ + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: testutil.GetAvailableLocalAddress(t), + TLSSetting: &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "willfail", + }, + }, + }, + }, + } + + // TLS is resolved during Start for HTTP. + r := newReceiver(t, NewFactory(), cfg, new(consumertest.TracesSink), new(consumertest.MetricsSink)) + assert.EqualError(t, r.Start(context.Background(), componenttest.NewNopHost()), + `failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither`) +} + +func newGRPCReceiver(t *testing.T, name string, endpoint string, tc consumer.TracesConsumer, mc consumer.MetricsConsumer) *otlpReceiver { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.SetName(name) + cfg.GRPC.NetAddr.Endpoint = endpoint + cfg.HTTP = nil + return newReceiver(t, factory, cfg, tc, mc) +} + +func newHTTPReceiver(t *testing.T, endpoint string, tc consumer.TracesConsumer, mc consumer.MetricsConsumer) *otlpReceiver { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.SetName(otlpReceiverName) + cfg.HTTP.Endpoint = endpoint + cfg.GRPC = nil + return newReceiver(t, factory, cfg, tc, mc) +} + +func newReceiver(t *testing.T, factory component.ReceiverFactory, cfg *Config, tc consumer.TracesConsumer, mc consumer.MetricsConsumer) *otlpReceiver { + r, err := createReceiver(cfg, zap.NewNop()) + require.NoError(t, err) + if tc != nil { + params := component.ReceiverCreateParams{} + _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, tc) + require.NoError(t, err) + } + if mc != nil { + params := component.ReceiverCreateParams{} + _, err := factory.CreateMetricsReceiver(context.Background(), params, cfg, mc) + require.NoError(t, err) + } + return r +} + +func compressGzip(body []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + + gw := gzip.NewWriter(&buf) + defer gw.Close() + + _, err := gw.Write(body) + if err != nil { + return nil, err + } + + return &buf, nil +} diff --git a/internal/otel_collector/receiver/otlpreceiver/otlphttp.go b/internal/otel_collector/receiver/otlpreceiver/otlphttp.go new file mode 100644 index 00000000000..1f0e14d5171 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/otlphttp.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "bytes" + "net/http" + + "github.com/gogo/protobuf/jsonpb" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// xProtobufMarshaler is a Marshaler which wraps runtime.ProtoMarshaller +// and sets ContentType to application/x-protobuf +type xProtobufMarshaler struct { + *runtime.ProtoMarshaller +} + +// ContentType always returns "application/x-protobuf". +func (*xProtobufMarshaler) ContentType() string { + return "application/x-protobuf" +} + +var jsonMarshaller = &jsonpb.Marshaler{} + +// errorHandler encodes the HTTP error message inside a rpc.Status message as required +// by the OTLP protocol. +func errorHandler(w http.ResponseWriter, r *http.Request, errMsg string, statusCode int) { + var ( + msg []byte + s *status.Status + err error + ) + // Pre-computed status with code=Internal to be used in case of a marshaling error. + fallbackMsg := []byte(`{"code": 13, "message": "failed to marshal error message"}`) + fallbackContentType := "application/json" + + if statusCode == http.StatusBadRequest { + s = status.New(codes.InvalidArgument, errMsg) + } else { + s = status.New(codes.Internal, errMsg) + } + + contentType := r.Header.Get("Content-Type") + if contentType == "application/json" { + buf := new(bytes.Buffer) + err = jsonMarshaller.Marshal(buf, s.Proto()) + msg = buf.Bytes() + } else { + msg, err = proto.Marshal(s.Proto()) + } + if err != nil { + msg = fallbackMsg + contentType = fallbackContentType + statusCode = http.StatusInternalServerError + } + + w.Header().Set("Content-Type", contentType) + w.WriteHeader(statusCode) + w.Write(msg) +} diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/bad_empty_config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_empty_config.yaml new file mode 100644 index 00000000000..db0b165a615 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_empty_config.yaml @@ -0,0 +1,15 @@ +receivers: + otlp: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/bad_no_proto_config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_no_proto_config.yaml new file mode 100644 index 00000000000..09731f05939 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_no_proto_config.yaml @@ -0,0 +1,16 @@ +receivers: + otlp: + protocols: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/bad_proto_config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_proto_config.yaml new file mode 100644 index 00000000000..3d79ae12a8c --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_proto_config.yaml @@ -0,0 +1,18 @@ +receivers: + otlp: + protocols: + thrift: + endpoint: "127.0.0.1:1234" + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/config.yaml new file mode 100644 index 00000000000..29ab7bfa108 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/config.yaml @@ -0,0 +1,95 @@ +receivers: + # The following entry initializes the default OTLP receiver. + # The full name of this receiver is `otlp` and can be referenced in pipelines by 'otlp'. + otlp: + protocols: + grpc: + http: + # The following entry initializes the default OTLP receiver with only gRPC support. + otlp/only_grpc: + protocols: + grpc: + # The following entry initializes the default OTLP receiver with only http support. + otlp/only_http: + protocols: + http: + # The following entry demonstrates configuring the common receiver settings: + # - endpoint + # This configuration is of type 'otlp' and has the name 'customname' with a full name of 'otlp/customname' + # ('/'. To reference this configuration in a pipeline, use the full name `otlp/customname`. + otlp/customname: + protocols: + grpc: + # The receiver will listen on endpoint: "localhost:9090". + endpoint: localhost:9090 + # The following entry configures all of the keep alive settings. These settings are used to configure the receiver. + otlp/keepalive: + protocols: + grpc: + keepalive: + server_parameters: + max_connection_idle: 11s + max_connection_age: 12s + max_connection_age_grace: 13s + time: 30s + timeout: 5s + enforcement_policy: + min_time: 10s + permit_without_stream: true + # The following demonstrates how to set maximum limits on stream, message size and connection idle time. + # Note: The test yaml has demonstrated configuration on a grouped by their structure; however, all of the settings can + # be mix and matched like adding the maximum connection idle setting in this example. + otlp/msg-size-conc-connect-max-idle: + protocols: + grpc: + max_recv_msg_size_mib: 32 + max_concurrent_streams: 16 + read_buffer_size: 1024 + write_buffer_size: 1024 + keepalive: + server_parameters: + max_connection_idle: 10s + # The following entry demonstrates how to specify TLS credentials for the server. + # Note: These files do not exist. If the receiver is started with this configuration, it will fail. + otlp/tlscredentials: + protocols: + grpc: + tls_settings: + cert_file: test.crt + key_file: test.key + http: + tls_settings: + cert_file: test.crt + key_file: test.key + # The following entry demonstrates how to specify a Unix Domain Socket for the server. + otlp/uds: + protocols: + grpc: + transport: unix + endpoint: /tmp/grpc_otlp.sock + http: + # transport: unix + endpoint: /tmp/http_otlp.sock + # The following entry demonstrates how to configure the OTLP receiver to allow Cross-Origin Resource Sharing (CORS). + # Both fully qualified domain names and the use of wildcards are supported. + otlp/cors: + protocols: + http: + cors_allowed_origins: + - https://*.test.com # Wildcard subdomain. Allows domains like https://www.test.com and https://foo.test.com but not https://wwwtest.com. + - https://test.com # Fully qualified domain name. Allows https://test.com only. +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [otlp/customname] + processors: [exampleprocessor] + exporters: [exampleexporter] + metrics: + receivers: [otlp] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/typo_default_proto_config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/typo_default_proto_config.yaml new file mode 100644 index 00000000000..15cf1f28599 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/typo_default_proto_config.yaml @@ -0,0 +1,18 @@ +receivers: + otlp: + protocols: + grpc: + htttp: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/otlpreceiver/trace/otlp.go b/internal/otel_collector/receiver/otlpreceiver/trace/otlp.go new file mode 100644 index 00000000000..3cbb5442e93 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/trace/otlp.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + collectortrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/obsreport" +) + +const ( + dataFormatProtobuf = "protobuf" +) + +// Receiver is the type used to handle spans from OpenTelemetry exporters. +type Receiver struct { + instanceName string + nextConsumer consumer.TracesConsumer +} + +// New creates a new Receiver reference. +func New(instanceName string, nextConsumer consumer.TracesConsumer) *Receiver { + r := &Receiver{ + instanceName: instanceName, + nextConsumer: nextConsumer, + } + + return r +} + +const ( + receiverTagValue = "otlp_trace" + receiverTransport = "grpc" +) + +func (r *Receiver) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { + // We need to ensure that it propagates the receiver name as a tag + ctxWithReceiverName := obsreport.ReceiverContext(ctx, r.instanceName, receiverTransport) + + // Perform backward compatibility conversion of Span Status code according to + // OTLP specification. + // See https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L231 + // + // If code==STATUS_CODE_UNSET then the value of `deprecated_code` is the + // carrier of the overall status according to these rules: + // + // if deprecated_code==DEPRECATED_STATUS_CODE_OK then the receiver MUST interpret + // the overall status to be STATUS_CODE_UNSET. + // + // if deprecated_code!=DEPRECATED_STATUS_CODE_OK then the receiver MUST interpret + // the overall status to be STATUS_CODE_ERROR. + // + // If code!=STATUS_CODE_UNSET then the value of `deprecated_code` MUST be + // ignored, the `code` field is the sole carrier of the status. + for _, rss := range req.ResourceSpans { + for _, ils := range rss.InstrumentationLibrarySpans { + for _, span := range ils.Spans { + if span.Status.Code == otlptrace.Status_STATUS_CODE_UNSET && + span.Status.DeprecatedCode != otlptrace.Status_DEPRECATED_STATUS_CODE_OK { + span.Status.Code = otlptrace.Status_STATUS_CODE_ERROR + } + } + } + } + + td := pdata.TracesFromOtlp(req.ResourceSpans) + err := r.sendToNextConsumer(ctxWithReceiverName, td) + if err != nil { + return nil, err + } + + return &collectortrace.ExportTraceServiceResponse{}, nil +} + +func (r *Receiver) sendToNextConsumer(ctx context.Context, td pdata.Traces) error { + numSpans := td.SpanCount() + if numSpans == 0 { + return nil + } + + if c, ok := client.FromGRPC(ctx); ok { + ctx = client.NewContext(ctx, c) + } + + ctx = obsreport.StartTraceDataReceiveOp(ctx, r.instanceName, receiverTransport) + err := r.nextConsumer.ConsumeTraces(ctx, td) + obsreport.EndTraceDataReceiveOp(ctx, dataFormatProtobuf, numSpans, err) + + return err +} diff --git a/internal/otel_collector/receiver/otlpreceiver/trace/otlp_test.go b/internal/otel_collector/receiver/otlpreceiver/trace/otlp_test.go new file mode 100644 index 00000000000..aef37569e89 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/trace/otlp_test.go @@ -0,0 +1,288 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + "fmt" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/data" + collectortrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/collector/trace/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/testutil" +) + +var _ collectortrace.TraceServiceServer = (*Receiver)(nil) + +func TestExport(t *testing.T) { + // given + + traceSink := new(consumertest.TracesSink) + + port, doneFn := otlpReceiverOnGRPCServer(t, traceSink) + defer doneFn() + + traceClient, traceClientDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) + defer traceClientDoneFn() + + // when + + unixnanos := uint64(12578940000000012345) + traceID := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1} + spanID := [8]byte{8, 7, 6, 5, 4, 3, 2, 1} + resourceSpans := []*otlptrace.ResourceSpans{ + { + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + { + TraceId: data.NewTraceID(traceID), + SpanId: data.NewSpanID(spanID), + Name: "operationB", + Kind: otlptrace.Span_SPAN_KIND_SERVER, + StartTimeUnixNano: unixnanos, + EndTimeUnixNano: unixnanos, + Status: otlptrace.Status{Message: "status-cancelled", Code: otlptrace.Status_STATUS_CODE_ERROR}, + TraceState: "a=text,b=123", + }, + }, + }, + }, + }, + } + + // Keep trace data to compare the test result against it + // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream + traceData := pdata.TracesFromOtlp(resourceSpans).Clone() + + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: resourceSpans, + } + + resp, err := traceClient.Export(context.Background(), req) + require.NoError(t, err, "Failed to export trace: %v", err) + require.NotNil(t, resp, "The response is missing") + + // assert + + require.Equal(t, 1, len(traceSink.AllTraces()), "unexpected length: %v", len(traceSink.AllTraces())) + + assert.EqualValues(t, traceData, traceSink.AllTraces()[0]) +} + +func TestExport_EmptyRequest(t *testing.T) { + traceSink := new(consumertest.TracesSink) + + port, doneFn := otlpReceiverOnGRPCServer(t, traceSink) + defer doneFn() + + traceClient, traceClientDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) + defer traceClientDoneFn() + + resp, err := traceClient.Export(context.Background(), &collectortrace.ExportTraceServiceRequest{}) + assert.NoError(t, err, "Failed to export trace: %v", err) + assert.NotNil(t, resp, "The response is missing") +} + +func TestExport_ErrorConsumer(t *testing.T) { + traceSink := new(consumertest.TracesSink) + traceSink.SetConsumeError(fmt.Errorf("error")) + + port, doneFn := otlpReceiverOnGRPCServer(t, traceSink) + defer doneFn() + + traceClient, traceClientDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) + defer traceClientDoneFn() + + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*otlptrace.ResourceSpans{ + { + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + { + Name: "operationB", + }, + }, + }, + }, + }, + }, + } + + resp, err := traceClient.Export(context.Background(), req) + assert.EqualError(t, err, "rpc error: code = Unknown desc = error") + assert.Nil(t, resp) +} + +func makeTraceServiceClient(port int) (collectortrace.TraceServiceClient, func(), error) { + addr := fmt.Sprintf(":%d", port) + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, nil, err + } + + metricsClient := collectortrace.NewTraceServiceClient(cc) + + doneFn := func() { _ = cc.Close() } + return metricsClient, doneFn, nil +} + +func otlpReceiverOnGRPCServer(t *testing.T, tc consumer.TracesConsumer) (int, func()) { + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + doneFnList := []func(){func() { ln.Close() }} + done := func() { + for _, doneFn := range doneFnList { + doneFn() + } + } + + _, port, err := testutil.HostPortFromAddr(ln.Addr()) + if err != nil { + done() + t.Fatalf("Failed to parse host:port from listener address: %s error: %v", ln.Addr(), err) + } + + r := New(receiverTagValue, tc) + require.NoError(t, err) + + // Now run it as a gRPC server + srv := obsreport.GRPCServerWithObservabilityEnabled() + collectortrace.RegisterTraceServiceServer(srv, r) + go func() { + _ = srv.Serve(ln) + }() + + return port, done +} + +func TestDeprecatedStatusCode(t *testing.T) { + traceSink := new(consumertest.TracesSink) + + port, doneFn := otlpReceiverOnGRPCServer(t, traceSink) + defer doneFn() + + traceClient, traceClientDoneFn, err := makeTraceServiceClient(port) + require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) + defer traceClientDoneFn() + + // See specification for handling status code here: + // https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L231 + tests := []struct { + sendCode otlptrace.Status_StatusCode + sendDeprecatedCode otlptrace.Status_DeprecatedStatusCode + expectedRcvCode otlptrace.Status_StatusCode + }{ + { + // If code==STATUS_CODE_UNSET then the value of `deprecated_code` is the + // carrier of the overall status according to these rules: + // + // if deprecated_code==DEPRECATED_STATUS_CODE_OK then the receiver MUST interpret + // the overall status to be STATUS_CODE_UNSET. + sendCode: otlptrace.Status_STATUS_CODE_UNSET, + sendDeprecatedCode: otlptrace.Status_DEPRECATED_STATUS_CODE_OK, + expectedRcvCode: otlptrace.Status_STATUS_CODE_UNSET, + }, + { + // if deprecated_code!=DEPRECATED_STATUS_CODE_OK then the receiver MUST interpret + // the overall status to be STATUS_CODE_ERROR. + sendCode: otlptrace.Status_STATUS_CODE_UNSET, + sendDeprecatedCode: otlptrace.Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR, + expectedRcvCode: otlptrace.Status_STATUS_CODE_ERROR, + }, + { + // If code!=STATUS_CODE_UNSET then the value of `deprecated_code` MUST be + // ignored, the `code` field is the sole carrier of the status. + sendCode: otlptrace.Status_STATUS_CODE_OK, + sendDeprecatedCode: otlptrace.Status_DEPRECATED_STATUS_CODE_OK, + expectedRcvCode: otlptrace.Status_STATUS_CODE_OK, + }, + { + // If code!=STATUS_CODE_UNSET then the value of `deprecated_code` MUST be + // ignored, the `code` field is the sole carrier of the status. + sendCode: otlptrace.Status_STATUS_CODE_OK, + sendDeprecatedCode: otlptrace.Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR, + expectedRcvCode: otlptrace.Status_STATUS_CODE_OK, + }, + { + // If code!=STATUS_CODE_UNSET then the value of `deprecated_code` MUST be + // ignored, the `code` field is the sole carrier of the status. + sendCode: otlptrace.Status_STATUS_CODE_ERROR, + sendDeprecatedCode: otlptrace.Status_DEPRECATED_STATUS_CODE_OK, + expectedRcvCode: otlptrace.Status_STATUS_CODE_ERROR, + }, + { + // If code!=STATUS_CODE_UNSET then the value of `deprecated_code` MUST be + // ignored, the `code` field is the sole carrier of the status. + sendCode: otlptrace.Status_STATUS_CODE_ERROR, + sendDeprecatedCode: otlptrace.Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR, + expectedRcvCode: otlptrace.Status_STATUS_CODE_ERROR, + }, + } + + for _, test := range tests { + resourceSpans := []*otlptrace.ResourceSpans{ + { + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + { + Status: otlptrace.Status{ + Code: test.sendCode, + DeprecatedCode: test.sendDeprecatedCode, + }, + }, + }, + }, + }, + }, + } + + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: resourceSpans, + } + + traceSink.Reset() + + resp, err := traceClient.Export(context.Background(), req) + require.NoError(t, err, "Failed to export trace: %v", err) + require.NotNil(t, resp, "The response is missing") + + require.Equal(t, 1, len(traceSink.AllTraces()), "unexpected length: %v", len(traceSink.AllTraces())) + + rcvdStatus := traceSink.AllTraces()[0].ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).Status() + + // Check that Code is as expected. + assert.EqualValues(t, rcvdStatus.Code(), test.expectedRcvCode) + + // Check that DeprecatedCode is passed as is. + assert.EqualValues(t, rcvdStatus.DeprecatedCode(), test.sendDeprecatedCode) + } +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/DESIGN.md b/internal/otel_collector/receiver/prometheusreceiver/DESIGN.md new file mode 100644 index 00000000000..b46fabda8a9 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/DESIGN.md @@ -0,0 +1,584 @@ +## Design Goals + +### Provide a seamless onboarding experience for users who are already familiar with Prometheus scrape config + +Prometheus has a very powerful config system for user to config how Prometheus +can scrape the metrics data from any application which expose a Prometheus +format metrics endpoint. It provides very useful features like filtering +unwanted metrics, relabeling tags, etc. The original Prometheus receiver of +OpenTelemetry took the approach of using Prometheus' own scraper's source code +as a library to achieve this goal. Overall the idea was great, however, the +original implementation has a lot of glitches, it cannot be fixed by small +patches. This new Prometheus receiver is going to follow the same idea of +leveraging Prometheus sourcecode, with a proper implementation. + +### Map Prometheus metrics to the corresponding OpenTelemetry metrics properly + +Prometheus receiver shall be able to map Prometheus metrics to OpenTelemetry's +proto based metrics, it shall respect the original metric name, value, +timestamp, as well as tags. It doesn't need to provide one-to-one mapping, +since supported metric types are different from the two systems. However, it +shall not drop data. + +### Parity between Prometheus and OpenTelemetry Prometheus exporter + +Prometheus itself can also used as an exporter, that it can expose the metrics +it scrape from other system with its own metrics endpoint, so is OpenTelemetry +service. We shall be able to retain parity from the following two setups: + +1. app -> prometheus -> metric-endpoint +2. app -> otelcol-with-prometheus-receiver -> otelcol-prometheus-exporter-metrics-endpoint + + +## Prometheus Text Format Overview + +Prometheus text format is a line orient format. For each non-empty line, which +not begins with #, is a metric data point with includes a metric name and its +value, which is of float64 type, as well as some optional data such as tags and +timestamp, which is in milliseconds. For lines begin with #, they are either +comments, which need to be filtered, or metadata, which including type hints +and units that are usually indicating the beginning of a new individual metric +or a group of new metrics. More details of Prometheus text format can be found +from its [official +document](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format). + +### Metric types +Based on this document, Prometheus supports the following 5 types of metrics: +* Counter +* Gauge +* Histogram +* Summary +* Untyped (untyped metrics are converted to `gauge` by default) + +However, this is not the whole story, from the implementation details of +Prometheus scraper, which the receiver based on, it supports a couple more +undocumented metrics types, including: + +* Gaugehistogram +* Info +* Statset + +More details can be found from the +[prometheus text parser source code]( https://github.com/prometheus/prometheus/blob/master/pkg/textparse/interface.go#L82) + +### Metric Grouping + +Other than metric types, the type hint comment and metric grouping are also +important to know in order to parse Prometheus text metrics properly. From any +Prometheus metrics endpoints, metrics are usually grouped together by starting +with a comment section which includes some very important information like type +hints about the metrics, and metrics points of the same group will have the +same metric name but a different set of tag values, for example: + +``` +# HELP container_cpu_load_average_10s Value of container cpu load average over the last 10 seconds. +# TYPE container_cpu_load_average_10s gauge +container_cpu_load_average_10s{id="/",image="",name=""} 0 +container_cpu_load_average_10s{id="/000-metadata",image="",name=""} 0 +container_cpu_load_average_10s{id="/001-sysfs",image="",name=""} 0 +``` + +The above example was taken from an cadvisor metric endpoint, the type hint +tells that the name of this metric group is `container_cpu_load_average_10s` +and it's of `gague` type. Then it follows by some individual metric points +which are of the same metric name. For each individual metric within this +group, they share the same set of tag keys, with unique value sets. + +## Prometheus Metric Scraper Anatomy + +The metrics scraper is a component which is used to scrape remote Prometheus +metric endpoints, it is also the component which Prometheus receiver is based +on. It's important to understand how it works in order to implement the +receiver properly. + +### Major components of Prometheus Scape package + +- **[ScapeManager](https://github.com/prometheus/prometheus/blob/v2.9.2/scrape/manager.go):** +the component which loads the scrape_config, and manage the scraping tasks + +- **[ScrapePool](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L154-L439):** +an object which manage scrapes for a sets of targets + +- **[Scraper](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L506-L511):** +a http client to fetch data from remote metrics endpoints + +- **[Target](https://github.com/prometheus/prometheus/blob/v2.9.2/scrape/target.go):** +the remote metric endpoint, as well as related relabing settings and other metadata + +- **[TextParser](https://github.com/prometheus/prometheus/tree/v2.9.2/pkg/textparse):** +a DFA style streaming decoder/parser for prometheus text format + +- **[Appendable](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/manager.go#L37-L39):** +it is used to acquire a storage appender instance at the beginning of each scrapeLoop run + +- **[storage.Appender](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/storage/interface.go#L86-L95):** +an abstraction of the metric storage which can be a filesystem, a database or an remote endpoint...etc. As for OpenTelemetry prometheus receiver, this is +also the interface we need to implement to provide a customized storage appender which is backed by metrics sink. + +- **[ScrapeLoop](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L586-L1024):** +the actual scrape pipeline which performs the main scraping and ingestion logic. + +### Prometheus ScrapeLoop workflow explained +Each scraping cycle is trigger by an configured interval, its workflow is as +shown in the flowchart below: + +![ScrapeLoop Flowchart](scrapeloop-flowchart.png) + +It basically does the following things in turn: + + 1. make a http call to fetch data from the binding [target](#target)'s metrics endpoint with [scraper](#scraper) + 2. acquired a [storage appender](#storage-appender) instance with the [Appendable](#appendable) interface + 3. feed the data to a textParser + 4. parse and feed metric data points to storage appender + 5. commit if success or rollback + 6. report task status + +## Implementing Prometheus storage.Appender with metrics sink + +### The storage.Appender interface +As discussed in the previous section, the storage.Appender is the most +important piece of components for us to implement so as to bring the two worlds +together. It has a very simple interface which is defined below: +```go +type Appender interface { + Add(l labels.Labels, t int64, v float64) (uint64, error) + + + AddFast(l labels.Labels, ref uint64, t int64, v float64) error + + + // Commit submits the collected samples and purges the batch. + Commit() error + + + Rollback() error +} +``` + +*Note: the above code belongs to the Prometheus project, its license can be found [here](https://github.com/prometheus/prometheus/blob/v2.9.2/LICENSE)* + +One can see that the interface is very simple, it only has 4 methods: `Add`, +`AddFast`, `Commit` and `Rollback`. The last two methods are easy to +understand: `Commit` is called when the processing of the scraped page is +completed and success, whereas `Rollback` is called if error occurs in between +the process. + +However for the two methods starting with 'Add', there's no document on the +Prometheus project for how they should be used. By examining the scrapeLoop +source code, as well as some storage.Appender implementations. It indicates +that the first method `Add` is always used for the first time when a unique +metrics, which means the combination of metric name and its tags are unique, is +seen for the first time. The `Add` method can return a non zero reference +number, then the scrapeLoop can cache this number with the metric's uniq +signature. The next time, such as the next scrape cycle of the same target, +when the metric is seen again by matching its signature, it will call the +`AddFast` method with the cached reference number. This reference number might +make sense to databases which has unique key as numbers, however, in our use +case, it's not necessary, thus we can always return 0 ref number from the `Add` +method to skip this caching mechanism. + +### Challenges and solutions +Even though the definition of this interface is very simple, however, to +implement it properly is a bit challenging, given that every time the +Add/AddFast method is called, it only provides the information about the +current data point, the context of what metric group this data point belonging +to is not provided, we have to keep track of it internally within the appender. +And this is not the whole story, there are a couple other issues we need to +address, including: + +1. Have a way to link the Target with the current appender instance + +The labels provided to the Add/AddFast methods dose not include some target +specified information such as `job name` which is important construct the [Node +proto](https://github.com/census-instrumentation/opencensus-proto/blob/e2601ef16f8a085a69d94ace5133f97438f8945f/src/opencensus/proto/agent/common/v1/common.proto#L36-L51) +object of OpenTelemetry. The target object is not accessible from the Appender +interface, however, we can get it from the ScrapeManager, when designing the +appender, we need to have a way to inject the binding target into the appender +instance. + +2. Group metrics from the same family together + +In OpenTelemetry, metric points of the same name are usually grouped together +as one timeseries but different data points. It's important for the appender to +keep track of the metric family changes, and group metrics of the same family +together Keep in mind that the Add/AddFast method is operated in a streaming +manner, ScrapeLoop does not provide any direct hints on metric name change, the +appender itself need to keep track of it. It's also important to know that for +some special types such as `histogram` and `summary`, not all the data points +have the same name, there are some special metric points has postfix like +`_sum` and `_count`, we need to handle this properly, and do not consider this +is a metric family change. + +3. Group complex metrics such as histogram together in proper order + +In Prometheus, a single aggregated type of metric data such as `histogram` and +`summary` is represent by multiple metric data points, such as buckets and +quantiles as well as the additional `_sum` and `_count` data. ScrapeLoop will +feed them into the appender individually. The appender needs to have a way to +bundle them together to transform them into a single Metric Datapoint Proto +object. + +4. Tags need to be handled carefully + +ScrapeLoop strips out any tag with empty value, however, in OpenTelemetry, the +tag keys is stored separately, we need to able to get all the possible tag keys +of the same metric family before committing the metric family to the sink. + +5. StartTimestamp and values of metrics of cumulative types + +In OpenTelemetry, every metrics of cumulative type is required to have a +StartTimestamp, which records when a metric is first recorded, however, +Prometheus dose not provide such data. One of the solutions to tackle this +problem is to cache the first observed value of these metrics as well as the +timestamp, then for any subsequent data of the same metric, use the cached +timestamp as StartTimestamp and the delta with the first value as value. +However, metrics can come and go, or the remote server can restart at any given +time, the receiver also needs to take care of issues such as a new value is +smaller than the previous seen value, by considering it as a metrics with new +StartTime. + +## Prometheus Metric to OpenTelemetry Metric Proto Mapping + +### Target as Node +The Target of Prometheus is defined by the scrape_config, it has the +information like `hostname` of the remote service, and a user defined `job +name` which can be used as the service name. These two piece of information +makes it a great fit to map it into the `Node` proto of the OpenTelemetry +MetricsData type, as shown below: + +```go +type MetricsData struct { + Node *commonpb.Node + Resource *resourcepb.Resource + Metrics []*metricspb.Metric +} +``` + +The scrape page as whole also can be fit into the above `MetricsData` data +structure, and all the metrics data points can be stored with the `Metrics` +array. We will explain the mappings of individual metric types in the following +couple sections + +### Metric Value Mapping + In OpenTelemetry, metrics value types can be either `int64` or `float64`, + while in Prometheus the value can be safely assumed it's always `float64` + based on the [Prometheus Text Format + Document](https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-details) + as quoted below: + +> value is a float represented as required by Go's ParseFloat() function. +> In addition to standard numerical values, Nan, +Inf, and -Inf are valid +> values representing not a number, positive infinity, and negative infinity, +> respectively. + +It will make sense for us to stick with this data type as much as possible +across all metrics types. + +### Counter +Counter as described in the [Prometheus Metric Types +Document](https://prometheus.io/docs/concepts/metric_types/#counter), + +> is a cumulative metric that represents a single monotonically increasing +> counter whose value can only increase or be reset to zero on restart. + +It is one of the most simple metric types found in both systems, however, it is +a cumulative type of metric. Consider what happens when we have two consecutive +scrapes from a target, with the first one as shown below: +``` +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 1027 +http_requests_total{method="post",code="400"} 3 +``` + +and the 2nd one: +``` +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 1028 +http_requests_total{method="post",code="400"} 5 +``` + +The Prometheus Receiver will only produce one Metric from the 2nd scrape and +subsequent ones if any. The 1st scrape, however, is stored as metadata to +calcualate a delta from. + +The output of the 2nd scrape is as shown below: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "method"}, {Key: "code"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: true}, {Value: "200", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 1.0}}, + }, + }, + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: false}, {Value: "400", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 2.0}}, + }, + }, + }, + }, +} +``` + +*Note: `startTimestamp` is the timestamp cached from the first scrape, `currentTimestamp` is the timestamp of the current scrape* + + +### Gauge +Gauge, as described in the [Prometheus Metric Types Document](https://prometheus.io/docs/concepts/metric_types/#guage), +> is a metric that represents a single numerical value that can arbitrarily go up and down + +``` +# HELP gauge_test some test gauges. +# TYPE gauge_test gague +gauge_test{id="1",foo="bar"} 1.0 +gauge_test{id="2",foo=""} 2.0 + +``` + +A major different between Gauges of Prometheus and OpenTelemetry are the value +types. In Prometheus, as mentioned earlier, all values can be considered as +float type, however, in OpenTelemetry, Gauges can either be `Int64` or +`Double`. To make the transformation easier, we always assume the data type is +`Double`. + +The corresponding OpenTelemetry Metric of the above examples will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "gauge_test", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "id"}, {Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: nil, + LabelValues: []*metricspb.LabelValue{{Value: "1", HasValue: true}, {Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 1.0}}, + }, + }, + { + StartTimestamp: nil, + LabelValues: []*metricspb.LabelValue{{Value: "2", HasValue: true}, {Value: "", HasValue: false}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 2.0}}, + }, + }, + }, + }, +} +``` + +### Histogram +Histogram is a complex data type, in Prometheus, it uses multiple data points +to represent a single histogram. Its description can be found from: [Prometheus +Histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +Similar to counter, histogram is also a cumulative type metric, thus only the +2nd and subsequent scrapes can produce a metric for OpenTelemetry, with the +first scrape stored as metadata. + +An example of histogram with first scrape response: +``` +# HELP hist_test This is my histogram vec +# TYPE hist_test histogram +hist_test_bucket{t1="1",,le="10.0"} 1.0 +hist_test_bucket{t1="1",le="20.0"} 3.0 +hist_test_bucket{t1="1",le="+inf"} 10.0 +hist_test_sum{t1="1"} 100.0 +hist_test_count{t1="1"} 10.0 +hist_test_bucket{t1="2",,le="10.0"} 10.0 +hist_test_bucket{t1="2",le="20.0"} 30.0 +hist_test_bucket{t1="2",le="+inf"} 100.0 +hist_test_sum{t1="2"} 10000.0 +hist_test_count{t1="2"} 100.0 + +``` + +And a subsequent 2nd scrape response: +``` +# HELP hist_test This is my histogram vec +# TYPE hist_test histogram +hist_test_bucket{t1="1",,le="10.0"} 2.0 +hist_test_bucket{t1="1",le="20.0"} 6.0 +hist_test_bucket{t1="1",le="+inf"} 13.0 +hist_test_sum{t1="1"} 150.0 +hist_test_count{t1="1"} 13.0 +hist_test_bucket{t1="2",,le="10.0"} 10.0 +hist_test_bucket{t1="2",le="20.0"} 30.0 +hist_test_bucket{t1="2",le="+inf"} 100.0 +hist_test_sum{t1="2"} 10000.0 +hist_test_count{t1="2"} 100.0 + +``` + +Its corresponding OpenTelemetry metrics will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{{Key: "t1"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "1", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 3, + Sum: 50.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 2}, {Count: 0}}, + }}}, + }, + }, + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "2", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 0, + Sum: 0.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 0}, {Count: 0}, {Count: 0}}, + }}}, + }, + }, + }, + }, +} + +``` + +There's an important difference between Prometheus bucket and OpenTelemetry +bucket that, bucket counts from Prometheus are cumulative, to transform this +into OpenTelemetry format, one needs to apply the following formula: + +``` +CurrentOCBucketVlaue = CurrentPrometheusBucketValue - PrevPrometheusBucketValue +``` + +OpenTelemetry does not use `+inf` as bound, one needs to remove it to generate +the Bounds of the OpenTelemetry Bounds. + +Other than that, the `SumOfSquaredDeviation`, which is required by +OpenTelemetry format for histogram, is not provided by Prometheus. We have to +set this value to `0` instead. + +### Gaugehistogram + +This is an undocumented data type, that's not currently supported. + +### Summary + +Same as histogram, summary is also a complex metric type which is represent by +multiple data points. A detailed description can be found from [Prometheus +Summary](https://prometheus.io/docs/concepts/metric_types/#summary) + +The sum and count from Summary is also cumulative, however, the quantiles are +not. The receiver will still consider the first scrape as metadata, and won't +produce an output. For any subsequent scrapes, the count and sum will be deltas +from the first scrape, while the quantiles are left as it is. + +For the following two scrapes, with the first one: + +``` +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0.0001271 +go_gc_duration_seconds{quantile="0.25"} 0.0002455 +go_gc_duration_seconds{quantile="0.5"} 0.0002904 +go_gc_duration_seconds{quantile="0.75"} 0.0003426 +go_gc_duration_seconds{quantile="1"} 0.0023638 +go_gc_duration_seconds_sum 17.391350544 +go_gc_duration_seconds_count 52489 +``` + +And the 2nd one: +``` +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0.0001271 +go_gc_duration_seconds{quantile="0.25"} 0.0002455 +go_gc_duration_seconds{quantile="0.5"} 0.0002904 +go_gc_duration_seconds{quantile="0.75"} 0.0003426 +go_gc_duration_seconds{quantile="1"} 0.0023639 +go_gc_duration_seconds_sum 17.491350544 +go_gc_duration_seconds_count 52490 +``` + +The corresponding OpenTelemetry metrics is as shown below: + +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_gc_duration_seconds", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: []*metricspb.LabelKey{}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 0.1}, + Count: &wrappers.Int64Value{Value: 1}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + {Percentile: 0.0, Value: 0.0001271}, + {Percentile: 25.0, Value: 0.0002455}, + {Percentile: 50.0, Value: 0.0002904}, + {Percentile: 75.0, Value: 0.0003426}, + {Percentile: 100.0, Value: 0.0023639}, + }, + }}}}, + }, + }, + }, + }, +} + +``` + +There's also some differences between the two systems. One difference is that +Prometheus uses `quantile`, while OpenTelemetry uses `percentile`. +Additionally, OpenTelemetry has optional values for `Sum` and `Count` of a +snapshot, however, they are not provided by Prometheus, and `nil` will be used +for these values. + +Other than that, in some Prometheus client implementations, such as the Python +version, Summary is allowed to have no quantiles, in which case the receiver +will produce an OpenTelemetry Summary with Snapshot set to `nil`. + +### Others + +For any other Prometheus metrics types, they will be transformed into the +OpenTelemetry [Gauge](#gague) type. diff --git a/internal/otel_collector/receiver/prometheusreceiver/README.md b/internal/otel_collector/receiver/prometheusreceiver/README.md new file mode 100644 index 00000000000..18813909772 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/README.md @@ -0,0 +1,62 @@ +# Prometheus Receiver + +Receives metric data in [Prometheus](https://prometheus.io/) format. See the +[Design](DESIGN.md) for additional information on this receiver. + +Supported pipeline types: metrics + +## ⚠️ Warning + +Note: This component is currently work in progress. It has several limitations +and please don't use it if the following limitations is a concern: + +* Collector cannot auto-scale the scraping yet when multiple replicas of the + collector is run. +* When running multiple replicas of the collector with the same config, it will + scrape the targets multiple times. +* Users need to configure each replica with different scraping configuration + if they want to manually shard the scraping. +* The Prometheus receiver is a stateful component. + +## Getting Started + +This receiver is a drop-in replacement for getting Prometheus to scrape your +services. It supports the full set of Prometheus configuration, including +service discovery. Just like you would write in a YAML configuration file +before starting Prometheus, such as with: + +```shell +prometheus --config.file=prom.yaml +``` + +You can copy and paste that same configuration under: + +```yaml +receivers: + prometheus: + config: +``` + +For example: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 5s + static_configs: + - targets: ['0.0.0.0:8888'] + - job_name: k8s + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + regex: "true" + action: keep + metric_relabel_configs: + - source_labels: [__name__] + regex: "(request_duration_seconds.*|response_duration_seconds.*)" + action: keep +``` diff --git a/internal/otel_collector/receiver/prometheusreceiver/config.go b/internal/otel_collector/receiver/prometheusreceiver/config.go new file mode 100644 index 00000000000..992ceb59269 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/config.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "time" + + "github.com/prometheus/prometheus/config" + + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for Prometheus receiver. +type Config struct { + configmodels.ReceiverSettings `mapstructure:",squash"` + PrometheusConfig *config.Config `mapstructure:"-"` + BufferPeriod time.Duration `mapstructure:"buffer_period"` + BufferCount int `mapstructure:"buffer_count"` + UseStartTimeMetric bool `mapstructure:"use_start_time_metric"` + StartTimeMetricRegex string `mapstructure:"start_time_metric_regex"` + + // ConfigPlaceholder is just an entry to make the configuration pass a check + // that requires that all keys present in the config actually exist on the + // structure, ie.: it will error if an unknown key is present. + ConfigPlaceholder interface{} `mapstructure:"config"` +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/config_test.go b/internal/otel_collector/receiver/prometheusreceiver/config_test.go new file mode 100644 index 00000000000..7583ff3a82e --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/config_test.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "fmt" + "os" + "path" + "testing" + "time" + + "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, len(cfg.Receivers), 2) + + r0 := cfg.Receivers["prometheus"] + assert.Equal(t, r0, factory.CreateDefaultConfig()) + + r1 := cfg.Receivers["prometheus/customname"].(*Config) + assert.Equal(t, r1.ReceiverSettings, + configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "prometheus/customname", + }) + assert.Equal(t, r1.PrometheusConfig.ScrapeConfigs[0].JobName, "demo") + assert.Equal(t, time.Duration(r1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval), 5*time.Second) + assert.Equal(t, r1.UseStartTimeMetric, true) + assert.Equal(t, r1.StartTimeMetricRegex, "^(.+_)*process_start_time_seconds$") +} + +func TestLoadConfigWithEnvVar(t *testing.T) { + const jobname = "JobName" + const jobnamevar = "JOBNAME" + os.Setenv(jobnamevar, jobname) + + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config_env.yaml"), factories) + require.NoError(t, err) + require.NotNil(t, cfg) + + r := cfg.Receivers["prometheus"].(*Config) + assert.Equal(t, r.ReceiverSettings, + configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "prometheus", + }) + assert.Equal(t, r.PrometheusConfig.ScrapeConfigs[0].JobName, jobname) + os.Unsetenv(jobnamevar) +} + +func TestLoadConfigK8s(t *testing.T) { + const node = "node1" + const nodenamevar = "NODE_NAME" + os.Setenv(nodenamevar, node) + defer os.Unsetenv(nodenamevar) + + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config_k8s.yaml"), factories) + require.NoError(t, err) + require.NotNil(t, cfg) + + r := cfg.Receivers["prometheus"].(*Config) + assert.Equal(t, r.ReceiverSettings, + configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "prometheus", + }) + + scrapeConfig := r.PrometheusConfig.ScrapeConfigs[0] + kubeSDConfig := scrapeConfig.ServiceDiscoveryConfigs[0].(*kubernetes.SDConfig) + assert.Equal(t, + kubeSDConfig.Selectors[0].Field, + fmt.Sprintf("spec.nodeName=%s", node)) + assert.Equal(t, + scrapeConfig.RelabelConfigs[1].Replacement, + "$1:$2") +} + +func TestLoadConfigFailsOnUnknownSection(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile( + t, + path.Join(".", "testdata", "invalid-config-section.yaml"), factories) + + require.Error(t, err) + require.Nil(t, cfg) +} + +// As one of the config parameters is consuming prometheus +// configuration as a subkey, ensure that invalid configuration +// within the subkey will also raise an error. +func TestLoadConfigFailsOnUnknownPrometheusSection(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile( + t, + path.Join(".", "testdata", "invalid-config-prometheus-section.yaml"), factories) + + require.Error(t, err) + require.Nil(t, cfg) +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/doc.go b/internal/otel_collector/receiver/prometheusreceiver/doc.go new file mode 100644 index 00000000000..cc269dc5421 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheusreceiver has the logic for scraping Prometheus metrics from +// already instrumented applications and then passing them onto a metricsink instance. +package prometheusreceiver diff --git a/internal/otel_collector/receiver/prometheusreceiver/factory.go b/internal/otel_collector/receiver/prometheusreceiver/factory.go new file mode 100644 index 00000000000..46890fa382c --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/factory.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "context" + "errors" + "fmt" + + _ "github.com/prometheus/prometheus/discovery/install" // init() of this package registers service discovery impl. + "github.com/spf13/viper" + "gopkg.in/yaml.v2" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +// This file implements config for Prometheus receiver. + +const ( + // The value of "type" key in configuration. + typeStr = "prometheus" + + // The key for Prometheus scraping configs. + prometheusConfigKey = "config" +) + +var ( + errNilScrapeConfig = errors.New("expecting a non-nil ScrapeConfig") +) + +func NewFactory() component.ReceiverFactory { + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithMetrics(createMetricsReceiver), + receiverhelper.WithCustomUnmarshaler(customUnmarshaler)) +} + +func customUnmarshaler(componentViperSection *viper.Viper, intoCfg interface{}) error { + if componentViperSection == nil { + return nil + } + // We need custom unmarshaling because prometheus "config" subkey defines its own + // YAML unmarshaling routines so we need to do it explicitly. + + err := componentViperSection.UnmarshalExact(intoCfg) + if err != nil { + return fmt.Errorf("prometheus receiver failed to parse config: %s", err) + } + + // Unmarshal prometheus's config values. Since prometheus uses `yaml` tags, so use `yaml`. + if !componentViperSection.IsSet(prometheusConfigKey) { + return nil + } + promCfgMap := componentViperSection.Sub(prometheusConfigKey).AllSettings() + out, err := yaml.Marshal(promCfgMap) + if err != nil { + return fmt.Errorf("prometheus receiver failed to marshal config to yaml: %s", err) + } + config := intoCfg.(*Config) + + err = yaml.UnmarshalStrict(out, &config.PrometheusConfig) + if err != nil { + return fmt.Errorf("prometheus receiver failed to unmarshal yaml to prometheus config: %s", err) + } + if len(config.PrometheusConfig.ScrapeConfigs) == 0 { + return errNilScrapeConfig + } + return nil +} + +func createDefaultConfig() configmodels.Receiver { + return &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + } +} + +func createMetricsReceiver( + _ context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.MetricsConsumer, +) (component.MetricsReceiver, error) { + config := cfg.(*Config) + if config.PrometheusConfig == nil || len(config.PrometheusConfig.ScrapeConfigs) == 0 { + return nil, errNilScrapeConfig + } + return newPrometheusReceiver(params.Logger, config, nextConsumer), nil +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/factory_test.go b/internal/otel_collector/receiver/prometheusreceiver/factory_test.go new file mode 100644 index 00000000000..557c1265de2 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/factory_test.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "context" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + cfg := createDefaultConfig() + + // The default config does not provide scrape_config so we expect that metrics receiver + // creation must also fail. + creationParams := component.ReceiverCreateParams{Logger: zap.NewNop()} + mReceiver, err := createMetricsReceiver(context.Background(), creationParams, cfg, nil) + assert.Equal(t, err, errNilScrapeConfig) + assert.Nil(t, mReceiver) +} + +func TestFactoryCanParseServiceDiscoveryConfigs(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "config_sd.yaml"), factories) + + assert.NoError(t, err) +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/internal_test.go b/internal/otel_collector/receiver/prometheusreceiver/internal/internal_test.go new file mode 100644 index 00000000000..14da9af4b8e --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/internal_test.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/scrape" + "go.uber.org/zap" +) + +// test helpers + +var testLogger *zap.Logger + +func init() { + zl, _ := zap.NewDevelopment() + testLogger = zl +} + +type mockMetadataCache struct { + data map[string]scrape.MetricMetadata +} + +func newMockMetadataCache(data map[string]scrape.MetricMetadata) *mockMetadataCache { + return &mockMetadataCache{data: data} +} + +func (m *mockMetadataCache) Metadata(metricName string) (scrape.MetricMetadata, bool) { + mm, ok := m.data[metricName] + return mm, ok +} + +func (m *mockMetadataCache) SharedLabels() labels.Labels { + return labels.FromStrings("__scheme__", "http") +} + +type mockScrapeManager struct { + targets map[string][]*scrape.Target +} + +func (sm *mockScrapeManager) TargetsAll() map[string][]*scrape.Target { + return sm.targets +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/logger.go b/internal/otel_collector/receiver/prometheusreceiver/internal/logger.go new file mode 100644 index 00000000000..f827e85245c --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/logger.go @@ -0,0 +1,136 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + gokitLog "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "go.uber.org/zap" +) + +const ( + levelKey = "level" + msgKey = "msg" +) + +// NewZapToGokitLogAdapter create an adapter for zap.Logger to gokitLog.Logger +func NewZapToGokitLogAdapter(logger *zap.Logger) gokitLog.Logger { + // need to skip two levels in order to get the correct caller + // one for this method, the other for gokitLog + logger = logger.WithOptions(zap.AddCallerSkip(2)) + return &zapToGokitLogAdapter{l: logger.Sugar()} +} + +type zapToGokitLogAdapter struct { + l *zap.SugaredLogger +} + +type logData struct { + level level.Value + msg string + otherFields []interface{} +} + +func (w *zapToGokitLogAdapter) Log(keyvals ...interface{}) error { + // expecting key value pairs, the number of items need to be even + if len(keyvals)%2 == 0 { + // Extract log level and message and log them using corresponding zap function + ld := extractLogData(keyvals) + logFunc := levelToFunc(w.l, ld.level) + logFunc(ld.msg, ld.otherFields...) + } else { + // in case something goes wrong + w.l.Info(keyvals...) + } + return nil +} + +func extractLogData(keyvals []interface{}) *logData { + lvl := level.InfoValue() // default + msg := "" + + other := make([]interface{}, 0, len(keyvals)) + for i := 0; i < len(keyvals); i += 2 { + key := keyvals[i] + val := keyvals[i+1] + + if l, ok := matchLogLevel(key, val); ok { + lvl = l + continue + } + + if m, ok := matchLogMessage(key, val); ok { + msg = m + continue + } + + other = append(other, key, val) + } + + return &logData{ + level: lvl, + msg: msg, + otherFields: other, + } +} + +// check if a given key-value pair represents go-kit log message and return it +func matchLogMessage(key interface{}, val interface{}) (string, bool) { + strKey, ok := key.(string) + if !ok || strKey != msgKey { + return "", false + } + + msg, ok := val.(string) + if !ok { + return "", false + } + + return msg, true +} + +// check if a given key-value pair represents go-kit log level and return it +func matchLogLevel(key interface{}, val interface{}) (level.Value, bool) { + strKey, ok := key.(string) + if !ok || strKey != levelKey { + return nil, false + } + + levelVal, ok := val.(level.Value) + if !ok { + return nil, false + } + + return levelVal, true +} + +// find a matching zap logging function to be used for a given level +func levelToFunc(logger *zap.SugaredLogger, lvl level.Value) func(string, ...interface{}) { + switch lvl { + case level.DebugValue(): + return logger.Debugw + case level.InfoValue(): + return logger.Infow + case level.WarnValue(): + return logger.Warnw + case level.ErrorValue(): + return logger.Errorw + } + + // default + return logger.Infof +} + +var _ gokitLog.Logger = (*zapToGokitLogAdapter)(nil) diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/logger_test.go b/internal/otel_collector/receiver/prometheusreceiver/internal/logger_test.go new file mode 100644 index 00000000000..2fc8dbda896 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/logger_test.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "testing" + + "github.com/go-kit/kit/log/level" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func TestLog(t *testing.T) { + tcs := []struct { + name string + input []interface{} + wantLevel zapcore.Level + wantMessage string + }{ + { + name: "Starting provider", + input: []interface{}{ + "level", + level.DebugValue(), + "msg", + "Starting provider", + "provider", + "string/0", + "subs", + "[target1]", + }, + wantLevel: zapcore.DebugLevel, + wantMessage: "Starting provider", + }, + { + name: "Scrape failed", + input: []interface{}{ + "level", + level.ErrorValue(), + "scrape_pool", + "target1", + "msg", + "Scrape failed", + "err", + "server returned HTTP status 500 Internal Server Error", + }, + wantLevel: zapcore.ErrorLevel, + wantMessage: "Scrape failed", + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + conf := zap.NewProductionConfig() + conf.Level.SetLevel(zapcore.DebugLevel) + + // capture zap log entry + var entry zapcore.Entry + h := func(e zapcore.Entry) error { + entry = e + return nil + } + + logger, err := conf.Build(zap.Hooks(h)) + require.NoError(t, err) + + adapter := NewZapToGokitLogAdapter(logger) + err = adapter.Log(tc.input...) + require.NoError(t, err) + + assert.Equal(t, tc.wantLevel, entry.Level) + assert.Equal(t, tc.wantMessage, entry.Message) + }) + } +} + +func TestExtractLogData(t *testing.T) { + tcs := []struct { + name string + input []interface{} + wantLevel level.Value + wantMessage string + wantOutput []interface{} + }{ + { + name: "nil fields", + input: nil, + wantLevel: level.InfoValue(), // Default + wantMessage: "", + wantOutput: []interface{}{}, + }, + { + name: "empty fields", + input: []interface{}{}, + wantLevel: level.InfoValue(), // Default + wantMessage: "", + wantOutput: []interface{}{}, + }, + { + name: "info level", + input: []interface{}{ + "level", + level.InfoValue(), + }, + wantLevel: level.InfoValue(), + wantMessage: "", + wantOutput: []interface{}{}, + }, + { + name: "warn level", + input: []interface{}{ + "level", + level.WarnValue(), + }, + wantLevel: level.WarnValue(), + wantMessage: "", + wantOutput: []interface{}{}, + }, + { + name: "error level", + input: []interface{}{ + "level", + level.ErrorValue(), + }, + wantLevel: level.ErrorValue(), + wantMessage: "", + wantOutput: []interface{}{}, + }, + { + name: "debug level + extra fields", + input: []interface{}{ + "timestamp", + 1596604719, + "level", + level.DebugValue(), + "msg", + "http client error", + }, + wantLevel: level.DebugValue(), + wantMessage: "http client error", + wantOutput: []interface{}{ + "timestamp", + 1596604719, + }, + }, + { + name: "missing level field", + input: []interface{}{ + "timestamp", + 1596604719, + "msg", + "http client error", + }, + wantLevel: level.InfoValue(), // Default + wantMessage: "http client error", + wantOutput: []interface{}{ + "timestamp", + 1596604719, + }, + }, + { + name: "invalid level type", + input: []interface{}{ + "level", + "warn", // String is not recognized + }, + wantLevel: level.InfoValue(), // Default + wantOutput: []interface{}{ + "level", + "warn", // Field is preserved + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + ld := extractLogData(tc.input) + assert.Equal(t, tc.wantLevel, ld.level) + assert.Equal(t, tc.wantMessage, ld.msg) + assert.Equal(t, tc.wantOutput, ld.otherFields) + }) + } +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metadata.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metadata.go new file mode 100644 index 00000000000..e5b4ccd20de --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metadata.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/scrape" +) + +// MetadataCache is an adapter to prometheus' scrape.Target and provide only the functionality which is needed +type MetadataCache interface { + Metadata(metricName string) (scrape.MetricMetadata, bool) + SharedLabels() labels.Labels +} + +type ScrapeManager interface { + TargetsAll() map[string][]*scrape.Target +} + +type metadataService struct { + sm ScrapeManager +} + +func (s *metadataService) Get(job, instance string) (MetadataCache, error) { + targetGroup, ok := s.sm.TargetsAll()[job] + if !ok { + return nil, errors.New("unable to find a target group with job=" + job) + } + + // from the same targetGroup, instance is not going to be duplicated + for _, target := range targetGroup { + if target.Labels().Get(model.InstanceLabel) == instance { + return &mCache{target}, nil + } + } + + return nil, errors.New("unable to find a target with job=" + job + ", and instance=" + instance) +} + +// adapter to get metadata from scrape.Target +type mCache struct { + t *scrape.Target +} + +func (m *mCache) Metadata(metricName string) (scrape.MetricMetadata, bool) { + return m.t.Metadata(metricName) +} + +func (m *mCache) SharedLabels() labels.Labels { + return m.t.DiscoveredLabels() +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metricfamily.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metricfamily.go new file mode 100644 index 00000000000..2ffe19b6adc --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metricfamily.go @@ -0,0 +1,373 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + "strings" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/textparse" + "github.com/prometheus/prometheus/scrape" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// MetricFamily is unit which is corresponding to the metrics items which shared the same TYPE/UNIT/... metadata from +// a single scrape. +type MetricFamily interface { + Add(metricName string, ls labels.Labels, t int64, v float64) error + IsSameFamily(metricName string) bool + ToMetric() (*metricspb.Metric, int, int) +} + +type metricFamily struct { + name string + mtype metricspb.MetricDescriptor_Type + mc MetadataCache + droppedTimeseries int + labelKeys map[string]bool + labelKeysOrdered []string + metadata *scrape.MetricMetadata + groupOrders map[string]int + groups map[string]*metricGroup +} + +func newMetricFamily(metricName string, mc MetadataCache) MetricFamily { + familyName := normalizeMetricName(metricName) + + // lookup metadata based on familyName + metadata, ok := mc.Metadata(familyName) + if !ok && metricName != familyName { + // use the original metricName as metricFamily + familyName = metricName + // perform a 2nd lookup with the original metric name. it can happen if there's a metric which is not histogram + // or summary, but ends with one of those _count/_sum suffixes + metadata, ok = mc.Metadata(metricName) + // still not found, this can happen when metric has no TYPE HINT + if !ok { + metadata.Metric = familyName + metadata.Type = textparse.MetricTypeUnknown + } + } + + return &metricFamily{ + name: familyName, + mtype: convToOCAMetricType(metadata.Type), + mc: mc, + droppedTimeseries: 0, + labelKeys: make(map[string]bool), + labelKeysOrdered: make([]string, 0), + metadata: &metadata, + groupOrders: make(map[string]int), + groups: make(map[string]*metricGroup), + } +} + +func (mf *metricFamily) IsSameFamily(metricName string) bool { + // trim known suffix if necessary + familyName := normalizeMetricName(metricName) + return mf.name == familyName || familyName != metricName && mf.name == metricName +} + +// updateLabelKeys is used to store all the label keys of a same metric family in observed order. since prometheus +// receiver removes any label with empty value before feeding it to an appender, in order to figure out all the labels +// from the same metric family we will need to keep track of what labels have ever been observed. +func (mf *metricFamily) updateLabelKeys(ls labels.Labels) { + for _, l := range ls { + if isUsefulLabel(mf.mtype, l.Name) { + if _, ok := mf.labelKeys[l.Name]; !ok { + mf.labelKeys[l.Name] = true + // use insertion sort to maintain order + i := sort.SearchStrings(mf.labelKeysOrdered, l.Name) + labelKeys := append(mf.labelKeysOrdered, "") + copy(labelKeys[i+1:], labelKeys[i:]) + labelKeys[i] = l.Name + mf.labelKeysOrdered = labelKeys + } + } + } +} + +func (mf *metricFamily) isCumulativeType() bool { + return mf.mtype == metricspb.MetricDescriptor_CUMULATIVE_DOUBLE || + mf.mtype == metricspb.MetricDescriptor_CUMULATIVE_INT64 || + mf.mtype == metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION || + mf.mtype == metricspb.MetricDescriptor_SUMMARY +} + +func (mf *metricFamily) getGroupKey(ls labels.Labels) string { + mf.updateLabelKeys(ls) + return dpgSignature(mf.labelKeysOrdered, ls) +} + +// getGroups to return groups in insertion order +func (mf *metricFamily) getGroups() []*metricGroup { + groups := make([]*metricGroup, len(mf.groupOrders)) + for k, v := range mf.groupOrders { + groups[v] = mf.groups[k] + } + + return groups +} + +func (mf *metricFamily) loadMetricGroupOrCreate(groupKey string, ls labels.Labels, ts int64) *metricGroup { + mg, ok := mf.groups[groupKey] + if !ok { + mg = &metricGroup{ + family: mf, + ts: ts, + ls: ls, + complexValue: make([]*dataPoint, 0), + } + mf.groups[groupKey] = mg + // maintaining data insertion order is helpful to generate stable/reproducible metric output + mf.groupOrders[groupKey] = len(mf.groupOrders) + } + return mg +} + +func (mf *metricFamily) getLabelKeys() []*metricspb.LabelKey { + lks := make([]*metricspb.LabelKey, len(mf.labelKeysOrdered)) + for i, k := range mf.labelKeysOrdered { + lks[i] = &metricspb.LabelKey{Key: k} + } + return lks +} + +func (mf *metricFamily) Add(metricName string, ls labels.Labels, t int64, v float64) error { + groupKey := mf.getGroupKey(ls) + mg := mf.loadMetricGroupOrCreate(groupKey, ls, t) + switch mf.mtype { + case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION: + fallthrough + case metricspb.MetricDescriptor_SUMMARY: + switch { + case strings.HasSuffix(metricName, metricsSuffixSum): + // always use the timestamp from sum (count is ok too), because the startTs from quantiles won't be reliable + // in cases like remote server restart + mg.ts = t + mg.sum = v + mg.hasSum = true + case strings.HasSuffix(metricName, metricsSuffixCount): + mg.count = v + mg.hasCount = true + default: + boundary, err := getBoundary(mf.mtype, ls) + if err != nil { + mf.droppedTimeseries++ + return err + } + mg.complexValue = append(mg.complexValue, &dataPoint{value: v, boundary: boundary}) + } + default: + mg.value = v + } + + return nil +} + +func (mf *metricFamily) ToMetric() (*metricspb.Metric, int, int) { + timeseries := make([]*metricspb.TimeSeries, 0, len(mf.groups)) + switch mf.mtype { + // not supported currently + // case metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: + // return nil + case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION: + for _, mg := range mf.getGroups() { + tss := mg.toDistributionTimeSeries(mf.labelKeysOrdered) + if tss != nil { + timeseries = append(timeseries, tss) + } else { + mf.droppedTimeseries++ + } + } + case metricspb.MetricDescriptor_SUMMARY: + for _, mg := range mf.getGroups() { + tss := mg.toSummaryTimeSeries(mf.labelKeysOrdered) + if tss != nil { + timeseries = append(timeseries, tss) + } else { + mf.droppedTimeseries++ + } + } + default: + for _, mg := range mf.getGroups() { + tss := mg.toDoubleValueTimeSeries(mf.labelKeysOrdered) + if tss != nil { + timeseries = append(timeseries, tss) + } else { + mf.droppedTimeseries++ + } + } + } + + // note: the total number of timeseries is the length of timeseries plus the number of dropped timeseries. + numTimeseries := len(timeseries) + if numTimeseries != 0 { + return &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: mf.name, + Description: mf.metadata.Help, + Unit: heuristicalMetricAndKnownUnits(mf.name, mf.metadata.Unit), + Type: mf.mtype, + LabelKeys: mf.getLabelKeys(), + }, + Timeseries: timeseries, + }, + numTimeseries + mf.droppedTimeseries, + mf.droppedTimeseries + } + return nil, mf.droppedTimeseries, mf.droppedTimeseries +} + +type dataPoint struct { + value float64 + boundary float64 +} + +// metricGroup, represents a single metric of a metric family. for example a histogram metric is usually represent by +// a couple data complexValue (buckets and count/sum), a group of a metric family always share a same set of tags. for +// simple types like counter and gauge, each data point is a group of itself +type metricGroup struct { + family *metricFamily + ts int64 + ls labels.Labels + count float64 + hasCount bool + sum float64 + hasSum bool + value float64 + complexValue []*dataPoint +} + +func (mg *metricGroup) sortPoints() { + sort.Slice(mg.complexValue, func(i, j int) bool { + return mg.complexValue[i].boundary < mg.complexValue[j].boundary + }) +} + +func (mg *metricGroup) toDistributionTimeSeries(orderedLabelKeys []string) *metricspb.TimeSeries { + if !(mg.hasCount && mg.hasSum) || len(mg.complexValue) == 0 { + return nil + } + mg.sortPoints() + // for OCAgent Proto, the bounds won't include +inf + bounds := make([]float64, len(mg.complexValue)-1) + buckets := make([]*metricspb.DistributionValue_Bucket, len(mg.complexValue)) + + for i := 0; i < len(mg.complexValue); i++ { + if i != len(mg.complexValue)-1 { + // not need to add +inf as bound to oc proto + bounds[i] = mg.complexValue[i].boundary + } + adjustedCount := mg.complexValue[i].value + if i != 0 { + adjustedCount -= mg.complexValue[i-1].value + } + buckets[i] = &metricspb.DistributionValue_Bucket{Count: int64(adjustedCount)} + } + + dv := &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: bounds, + }, + }, + }, + Count: int64(mg.count), + Sum: mg.sum, + Buckets: buckets, + // SumOfSquaredDeviation: // there's no way to compute this value from prometheus data + } + + return &metricspb.TimeSeries{ + StartTimestamp: timestampFromMs(mg.ts), + LabelValues: populateLabelValues(orderedLabelKeys, mg.ls), + Points: []*metricspb.Point{ + { + Timestamp: timestampFromMs(mg.ts), + Value: &metricspb.Point_DistributionValue{DistributionValue: dv}, + }, + }, + } +} + +func (mg *metricGroup) toSummaryTimeSeries(orderedLabelKeys []string) *metricspb.TimeSeries { + // expecting count and sum to be provided, however, in the following two cases, they can be missed. + // 1. data is corrupted + // 2. ignored by startValue evaluation + if !(mg.hasCount && mg.hasSum) { + return nil + } + mg.sortPoints() + percentiles := make([]*metricspb.SummaryValue_Snapshot_ValueAtPercentile, len(mg.complexValue)) + for i, p := range mg.complexValue { + percentiles[i] = + &metricspb.SummaryValue_Snapshot_ValueAtPercentile{Percentile: p.boundary * 100, Value: p.value} + } + + // allow percentiles to be nil when no data provided from prometheus + var snapshot *metricspb.SummaryValue_Snapshot + if len(percentiles) != 0 { + snapshot = &metricspb.SummaryValue_Snapshot{ + PercentileValues: percentiles, + } + } + + // Based on the summary description from https://prometheus.io/docs/concepts/metric_types/#summary + // the quantiles are calculated over a sliding time window, however, the count is the total count of + // observations and the corresponding sum is a sum of all observed values, thus the sum and count used + // at the global level of the metricspb.SummaryValue + + summaryValue := &metricspb.SummaryValue{ + Sum: &wrapperspb.DoubleValue{Value: mg.sum}, + Count: &wrapperspb.Int64Value{Value: int64(mg.count)}, + Snapshot: snapshot, + } + return &metricspb.TimeSeries{ + StartTimestamp: timestampFromMs(mg.ts), + LabelValues: populateLabelValues(orderedLabelKeys, mg.ls), + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(mg.ts), Value: &metricspb.Point_SummaryValue{SummaryValue: summaryValue}}, + }, + } +} + +func (mg *metricGroup) toDoubleValueTimeSeries(orderedLabelKeys []string) *metricspb.TimeSeries { + var startTs *timestamppb.Timestamp + // gauge/undefined types has no start time + if mg.family.isCumulativeType() { + startTs = timestampFromMs(mg.ts) + } + + return &metricspb.TimeSeries{ + StartTimestamp: startTs, + Points: []*metricspb.Point{{Timestamp: timestampFromMs(mg.ts), Value: &metricspb.Point_DoubleValue{DoubleValue: mg.value}}}, + LabelValues: populateLabelValues(orderedLabelKeys, mg.ls), + } +} + +func populateLabelValues(orderedKeys []string, ls labels.Labels) []*metricspb.LabelValue { + lvs := make([]*metricspb.LabelValue, len(orderedKeys)) + lmap := ls.Map() + for i, k := range orderedKeys { + value := lmap[k] + lvs[i] = &metricspb.LabelValue{Value: value, HasValue: value != ""} + } + return lvs +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster.go new file mode 100644 index 00000000000..4e734faff3f --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster.go @@ -0,0 +1,371 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "strings" + "sync" + "time" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Notes on garbage collection (gc): +// +// Job-level gc: +// The Prometheus receiver will likely execute in a long running service whose lifetime may exceed +// the lifetimes of many of the jobs that it is collecting from. In order to keep the JobsMap from +// leaking memory for entries of no-longer existing jobs, the JobsMap needs to remove entries that +// haven't been accessed for a long period of time. +// +// Timeseries-level gc: +// Some jobs that the Prometheus receiver is collecting from may export timeseries based on metrics +// from other jobs (e.g. cAdvisor). In order to keep the timeseriesMap from leaking memory for entries +// of no-longer existing jobs, the timeseriesMap for each job needs to remove entries that haven't +// been accessed for a long period of time. +// +// The gc strategy uses a standard mark-and-sweep approach - each time a timeseriesMap is accessed, +// it is marked. Similarly, each time a timeseriesinfo is accessed, it is also marked. +// +// At the end of each JobsMap.get(), if the last time the JobsMap was gc'd exceeds the 'gcInterval', +// the JobsMap is locked and any timeseriesMaps that are unmarked are removed from the JobsMap +// otherwise the timeseriesMap is gc'd +// +// The gc for the timeseriesMap is straightforward - the map is locked and, for each timeseriesinfo +// in the map, if it has not been marked, it is removed otherwise it is unmarked. +// +// Alternative Strategies +// 1. If the job-level gc doesn't run often enough, or runs too often, a separate go routine can +// be spawned at JobMap creation time that gc's at periodic intervals. This approach potentially +// adds more contention and latency to each scrape so the current approach is used. Note that +// the go routine will need to be cancelled upon Shutdown(). +// 2. If the gc of each timeseriesMap during the gc of the JobsMap causes too much contention, +// the gc of timeseriesMaps can be moved to the end of MetricsAdjuster().AdjustMetrics(). This +// approach requires adding 'lastGC' Time and (potentially) a gcInterval duration to +// timeseriesMap so the current approach is used instead. + +// timeseriesinfo contains the information necessary to adjust from the initial point and to detect +// resets. +type timeseriesinfo struct { + mark bool + initial *metricspb.TimeSeries + previous *metricspb.TimeSeries +} + +// timeseriesMap maps from a timeseries instance (metric * label values) to the timeseries info for +// the instance. +type timeseriesMap struct { + sync.RWMutex + mark bool + tsiMap map[string]*timeseriesinfo +} + +// Get the timeseriesinfo for the timeseries associated with the metric and label values. +func (tsm *timeseriesMap) get( + metric *metricspb.Metric, values []*metricspb.LabelValue) *timeseriesinfo { + name := metric.GetMetricDescriptor().GetName() + sig := getTimeseriesSignature(name, values) + tsi, ok := tsm.tsiMap[sig] + if !ok { + tsi = ×eriesinfo{} + tsm.tsiMap[sig] = tsi + } + tsm.mark = true + tsi.mark = true + return tsi +} + +// Remove timeseries that have aged out. +func (tsm *timeseriesMap) gc() { + tsm.Lock() + defer tsm.Unlock() + // this shouldn't happen under the current gc() strategy + if !tsm.mark { + return + } + for ts, tsi := range tsm.tsiMap { + if !tsi.mark { + delete(tsm.tsiMap, ts) + } else { + tsi.mark = false + } + } + tsm.mark = false +} + +func newTimeseriesMap() *timeseriesMap { + return ×eriesMap{mark: true, tsiMap: map[string]*timeseriesinfo{}} +} + +// Create a unique timeseries signature consisting of the metric name and label values. +func getTimeseriesSignature(name string, values []*metricspb.LabelValue) string { + labelValues := make([]string, 0, len(values)) + for _, label := range values { + if label.GetValue() != "" { + labelValues = append(labelValues, label.GetValue()) + } + } + return fmt.Sprintf("%s,%s", name, strings.Join(labelValues, ",")) +} + +// JobsMap maps from a job instance to a map of timeseries instances for the job. +type JobsMap struct { + sync.RWMutex + gcInterval time.Duration + lastGC time.Time + jobsMap map[string]*timeseriesMap +} + +// NewJobsMap creates a new (empty) JobsMap. +func NewJobsMap(gcInterval time.Duration) *JobsMap { + return &JobsMap{gcInterval: gcInterval, lastGC: time.Now(), jobsMap: make(map[string]*timeseriesMap)} +} + +// Remove jobs and timeseries that have aged out. +func (jm *JobsMap) gc() { + jm.Lock() + defer jm.Unlock() + // once the structure is locked, confirm that gc() is still necessary + if time.Since(jm.lastGC) > jm.gcInterval { + for sig, tsm := range jm.jobsMap { + tsm.RLock() + tsmNotMarked := !tsm.mark + tsm.RUnlock() + if tsmNotMarked { + delete(jm.jobsMap, sig) + } else { + tsm.gc() + } + } + jm.lastGC = time.Now() + } +} + +func (jm *JobsMap) maybeGC() { + // speculatively check if gc() is necessary, recheck once the structure is locked + jm.RLock() + defer jm.RUnlock() + if time.Since(jm.lastGC) > jm.gcInterval { + go jm.gc() + } +} + +func (jm *JobsMap) get(job, instance string) *timeseriesMap { + sig := job + ":" + instance + jm.RLock() + tsm, ok := jm.jobsMap[sig] + jm.RUnlock() + defer jm.maybeGC() + if ok { + return tsm + } + jm.Lock() + defer jm.Unlock() + tsm2, ok2 := jm.jobsMap[sig] + if ok2 { + return tsm2 + } + tsm2 = newTimeseriesMap() + jm.jobsMap[sig] = tsm2 + return tsm2 +} + +// MetricsAdjuster takes a map from a metric instance to the initial point in the metrics instance +// and provides AdjustMetrics, which takes a sequence of metrics and adjust their values based on +// the initial points. +type MetricsAdjuster struct { + tsm *timeseriesMap + logger *zap.Logger +} + +// NewMetricsAdjuster is a constructor for MetricsAdjuster. +func NewMetricsAdjuster(tsm *timeseriesMap, logger *zap.Logger) *MetricsAdjuster { + return &MetricsAdjuster{ + tsm: tsm, + logger: logger, + } +} + +// AdjustMetrics takes a sequence of metrics and adjust their values based on the initial and +// previous points in the timeseriesMap. If the metric is the first point in the timeseries, or the +// timeseries has been reset, it is removed from the sequence and added to the timeseriesMap. +// Additionally returns the total number of timeseries dropped from the metrics. +func (ma *MetricsAdjuster) AdjustMetrics(metrics []*metricspb.Metric) ([]*metricspb.Metric, int) { + var adjusted = make([]*metricspb.Metric, 0, len(metrics)) + dropped := 0 + ma.tsm.Lock() + defer ma.tsm.Unlock() + for _, metric := range metrics { + adj, d := ma.adjustMetric(metric) + dropped += d + if adj { + adjusted = append(adjusted, metric) + } + } + return adjusted, dropped +} + +// Returns true if at least one of the metric's timeseries was adjusted and false if all of the +// timeseries are an initial occurrence or a reset. Additionally returns the number of timeseries +// dropped from the metric. +// +// Types of metrics returned supported by prometheus: +// - MetricDescriptor_GAUGE_DOUBLE +// - MetricDescriptor_GAUGE_DISTRIBUTION +// - MetricDescriptor_CUMULATIVE_DOUBLE +// - MetricDescriptor_CUMULATIVE_DISTRIBUTION +// - MetricDescriptor_SUMMARY +func (ma *MetricsAdjuster) adjustMetric(metric *metricspb.Metric) (bool, int) { + switch metric.MetricDescriptor.Type { + case metricspb.MetricDescriptor_GAUGE_DOUBLE, metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: + // gauges don't need to be adjusted so no additional processing is necessary + return true, 0 + default: + return ma.adjustMetricTimeseries(metric) + } +} + +// Returns true if at least one of the metric's timeseries was adjusted and false if all of the +// timeseries are an initial occurrence or a reset. Additionally returns the number of timeseries +// dropped. +func (ma *MetricsAdjuster) adjustMetricTimeseries(metric *metricspb.Metric) (bool, int) { + dropped := 0 + filtered := make([]*metricspb.TimeSeries, 0, len(metric.GetTimeseries())) + for _, current := range metric.GetTimeseries() { + tsi := ma.tsm.get(metric, current.GetLabelValues()) + if tsi.initial == nil { + // initial timeseries + tsi.initial = current + tsi.previous = current + dropped++ + } else { + if ma.adjustTimeseries(metric.MetricDescriptor.Type, current, tsi.initial, + tsi.previous) { + tsi.previous = current + filtered = append(filtered, current) + } else { + // reset timeseries + tsi.initial = current + tsi.previous = current + dropped++ + } + } + } + metric.Timeseries = filtered + return len(filtered) > 0, dropped +} + +// Returns true if 'current' was adjusted and false if 'current' is an the initial occurrence or a +// reset of the timeseries. +func (ma *MetricsAdjuster) adjustTimeseries(metricType metricspb.MetricDescriptor_Type, + current, initial, previous *metricspb.TimeSeries) bool { + if !ma.adjustPoints( + metricType, current.GetPoints(), initial.GetPoints(), previous.GetPoints()) { + return false + } + current.StartTimestamp = initial.StartTimestamp + return true +} + +func (ma *MetricsAdjuster) adjustPoints(metricType metricspb.MetricDescriptor_Type, + current, initial, previous []*metricspb.Point) bool { + if len(current) != 1 || len(initial) != 1 || len(current) != 1 { + ma.logger.Info("Adjusting Points, all lengths should be 1", + zap.Int("len(current)", len(current)), zap.Int("len(initial)", len(initial)), zap.Int("len(previous)", len(previous))) + return true + } + return ma.adjustPoint(metricType, current[0], initial[0], previous[0]) +} + +// Note: There is an important, subtle point here. When a new timeseries or a reset is detected, +// current and initial are the same object. When initial == previous, the previous value/count/sum +// are all the initial value. When initial != previous, the previous value/count/sum has been +// adjusted wrt the initial value so both they must be combined to find the actual previous +// value/count/sum. This happens because the timeseries are updated in-place - if new copies of the +// timeseries were created instead, previous could be used directly but this would mean reallocating +// all of the metrics. +func (ma *MetricsAdjuster) adjustPoint(metricType metricspb.MetricDescriptor_Type, + current, initial, previous *metricspb.Point) bool { + switch metricType { + case metricspb.MetricDescriptor_CUMULATIVE_DOUBLE: + currentValue := current.GetDoubleValue() + initialValue := initial.GetDoubleValue() + previousValue := initialValue + if initial != previous { + previousValue += previous.GetDoubleValue() + } + if currentValue < previousValue { + // reset detected + return false + } + current.Value = + &metricspb.Point_DoubleValue{DoubleValue: currentValue - initialValue} + case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION: + // note: sum of squared deviation not currently supported + currentDist := current.GetDistributionValue() + initialDist := initial.GetDistributionValue() + previousCount := initialDist.Count + previousSum := initialDist.Sum + if initial != previous { + previousCount += previous.GetDistributionValue().Count + previousSum += previous.GetDistributionValue().Sum + } + if currentDist.Count < previousCount || currentDist.Sum < previousSum { + // reset detected + return false + } + currentDist.Count -= initialDist.Count + currentDist.Sum -= initialDist.Sum + ma.adjustBuckets(currentDist.Buckets, initialDist.Buckets) + case metricspb.MetricDescriptor_SUMMARY: + // note: for summary, we don't adjust the snapshot + currentCount := current.GetSummaryValue().Count.GetValue() + currentSum := current.GetSummaryValue().Sum.GetValue() + initialCount := initial.GetSummaryValue().Count.GetValue() + initialSum := initial.GetSummaryValue().Sum.GetValue() + previousCount := initialCount + previousSum := initialSum + if initial != previous { + previousCount += previous.GetSummaryValue().Count.GetValue() + previousSum += previous.GetSummaryValue().Sum.GetValue() + } + if currentCount < previousCount || currentSum < previousSum { + // reset detected + return false + } + current.GetSummaryValue().Count = + &wrapperspb.Int64Value{Value: currentCount - initialCount} + current.GetSummaryValue().Sum = + &wrapperspb.DoubleValue{Value: currentSum - initialSum} + default: + // this shouldn't happen + ma.logger.Info("Adjust - skipping unexpected point", zap.String("type", metricType.String())) + } + return true +} + +func (ma *MetricsAdjuster) adjustBuckets(current, initial []*metricspb.DistributionValue_Bucket) { + if len(current) != len(initial) { + // this shouldn't happen + ma.logger.Info("Bucket sizes not equal", zap.Int("len(current)", len(current)), zap.Int("len(initial)", len(initial))) + return + } + for i := 0; i < len(current); i++ { + current[i].Count -= initial[i].Count + } +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster_test.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster_test.go new file mode 100644 index 00000000000..c2236a4398d --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster_test.go @@ -0,0 +1,377 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "testing" + "time" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + mtu "go.opentelemetry.io/collector/testutil/metricstestutil" +) + +func Test_gauge(t *testing.T) { + script := []*metricsAdjusterTest{{ + "Gauge: round 1 - gauge not adjusted", + []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, + []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, + }, { + "Gauge: round 2 - gauge not adjusted", + []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66)))}, + []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66)))}, + }, { + "Gauge: round 3 - value less than previous value - gauge is not adjusted", + []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55)))}, + []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55)))}, + }} + runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) +} + +func Test_gaugeDistribution(t *testing.T) { + script := []*metricsAdjusterTest{{ + "GaugeDist: round 1 - gauge distribution not adjusted", + []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})))}, + []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})))}, + }, { + "GaugeDist: round 2 - gauge distribution not adjusted", + []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 5, 8, 11})))}, + []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 5, 8, 11})))}, + }, { + "GaugeDist: round 3 - count/sum less than previous - gauge distribution not adjusted", + []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{2, 0, 1, 5})))}, + []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{2, 0, 1, 5})))}, + }} + runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) +} + +func Test_cumulative(t *testing.T) { + script := []*metricsAdjusterTest{{ + "Cumulative: round 1 - initial instance, adjusted should be empty", + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, + []*metricspb.Metric{}, + }, { + "Cumulative: round 2 - instance adjusted based on round 1", + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 22)))}, + }, { + "Cumulative: round 3 - instance reset (value less than previous value), adjusted should be empty", + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55)))}, + []*metricspb.Metric{}, + }, { + "Cumulative: round 4 - instance adjusted based on round 3", + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 72)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t4Ms, 17)))}, + }} + runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) +} + +func Test_cumulativeDistribution(t *testing.T) { + script := []*metricsAdjusterTest{{ + "CumulativeDist: round 1 - initial instance, adjusted should be empty", + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})))}, + []*metricspb.Metric{}, + }, { + "CumulativeDist: round 2 - instance adjusted based on round 1", + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 3, 4, 8})))}, + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{2, 1, 1, 1})))}, + }, { + "CumulativeDist: round 3 - instance reset (value less than previous value), adjusted should be empty", + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{5, 3, 2, 7})))}, + []*metricspb.Metric{}, + }, { + "CumulativeDist: round 4 - instance adjusted based on round 3", + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{7, 4, 2, 12})))}, + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{2, 1, 0, 5})))}, + }} + runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) +} + +func Test_summary(t *testing.T) { + script := []*metricsAdjusterTest{{ + "Summary: round 1 - initial instance, adjusted should be empty", + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t1Ms, 10, 40, percent0, []float64{1, 5, 8})))}, + []*metricspb.Metric{}, + }, { + "Summary: round 2 - instance adjusted based on round 1", + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.SummPt(t2Ms, 15, 70, percent0, []float64{7, 44, 9})))}, + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t2Ms, 5, 30, percent0, []float64{7, 44, 9})))}, + }, { + "Summary: round 3 - instance reset (count less than previous), adjusted should be empty", + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t3Ms, 12, 66, percent0, []float64{3, 22, 5})))}, + []*metricspb.Metric{}, + }, { + "Summary: round 4 - instance adjusted based on round 3", + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.SummPt(t4Ms, 14, 96, percent0, []float64{9, 47, 8})))}, + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t4Ms, 2, 30, percent0, []float64{9, 47, 8})))}, + }} + runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) +} + +func Test_multiMetrics(t *testing.T) { + script := []*metricsAdjusterTest{{ + "MultiMetrics: round 1 - combined round 1 of individual metrics", + []*metricspb.Metric{ + mtu.Gauge(g1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44))), + mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t1Ms, 10, 40, percent0, []float64{1, 5, 8}))), + }, + []*metricspb.Metric{ + mtu.Gauge(g1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44))), + mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7}))), + }, + }, { + "MultiMetrics: round 2 - combined round 2 of individual metrics", + []*metricspb.Metric{ + mtu.Gauge(g1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66))), + mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 5, 8, 11}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 3, 4, 8}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.SummPt(t2Ms, 15, 70, percent0, []float64{7, 44, 9}))), + }, + []*metricspb.Metric{ + mtu.Gauge(g1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66))), + mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 5, 8, 11}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 22))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{2, 1, 1, 1}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t2Ms, 5, 30, percent0, []float64{7, 44, 9}))), + }, + }, { + "MultiMetrics: round 3 - combined round 3 of individual metrics", + []*metricspb.Metric{ + mtu.Gauge(g1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55))), + mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{2, 0, 1, 5}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{5, 3, 2, 7}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t3Ms, 12, 66, percent0, []float64{3, 22, 5}))), + }, + []*metricspb.Metric{ + mtu.Gauge(g1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55))), + mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{2, 0, 1, 5}))), + }, + }, { + "MultiMetrics: round 4 - combined round 4 of individual metrics", + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 72))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{7, 4, 2, 12}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.SummPt(t4Ms, 14, 96, percent0, []float64{9, 47, 8}))), + }, + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t4Ms, 17))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{2, 1, 0, 5}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t4Ms, 2, 30, percent0, []float64{9, 47, 8}))), + }, + }} + runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) +} + +func Test_multiTimeseries(t *testing.T) { + script := []*metricsAdjusterTest{{ + "MultiTimeseries: round 1 - initial first instance, adjusted should be empty", + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, + []*metricspb.Metric{}, + }, { + "MultiTimeseries: round 2 - first instance adjusted based on round 1, initial second instance", + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t2Ms, 20)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 22)))}, + }, { + "MultiTimeseries: round 3 - first instance adjusted based on round 1, second based on round 2", + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 88)), mtu.Timeseries(t3Ms, v10v20, mtu.Double(t3Ms, 49)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t3Ms, 44)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t3Ms, 29)))}, + }, { + "MultiTimeseries: round 4 - first instance reset, second instance adjusted based on round 2, initial third instance", + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 87)), mtu.Timeseries(t4Ms, v10v20, mtu.Double(t4Ms, 57)), mtu.Timeseries(t4Ms, v100v200, mtu.Double(t4Ms, 10)))}, + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t2Ms, v10v20, mtu.Double(t4Ms, 37)))}, + }, { + "MultiTimeseries: round 5 - first instance adusted based on round 4, second on round 2, third on round 4", + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t5Ms, v1v2, mtu.Double(t5Ms, 90)), mtu.Timeseries(t5Ms, v10v20, mtu.Double(t5Ms, 65)), mtu.Timeseries(t5Ms, v100v200, mtu.Double(t5Ms, 22)))}, + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t5Ms, 3)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t5Ms, 45)), mtu.Timeseries(t4Ms, v100v200, mtu.Double(t5Ms, 12)))}, + }} + runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) +} + +func Test_emptyLabels(t *testing.T) { + script := []*metricsAdjusterTest{{ + "EmptyLabels: round 1 - initial instance, implicitly empty labels, adjusted should be empty", + []*metricspb.Metric{mtu.Cumulative(c1, []string{}, mtu.Timeseries(t1Ms, []string{}, mtu.Double(t1Ms, 44)))}, + []*metricspb.Metric{}, + }, { + "EmptyLabels: round 2 - instance adjusted based on round 1", + []*metricspb.Metric{mtu.Cumulative(c1, []string{}, mtu.Timeseries(t2Ms, []string{}, mtu.Double(t2Ms, 66)))}, + []*metricspb.Metric{mtu.Cumulative(c1, []string{}, mtu.Timeseries(t1Ms, []string{}, mtu.Double(t2Ms, 22)))}, + }, { + "EmptyLabels: round 3 - one explicitly empty label, instance adjusted based on round 1", + []*metricspb.Metric{mtu.Cumulative(c1, k1, mtu.Timeseries(t3Ms, []string{""}, mtu.Double(t3Ms, 77)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1, mtu.Timeseries(t1Ms, []string{""}, mtu.Double(t3Ms, 33)))}, + }, { + "EmptyLabels: round 4 - three explicitly empty labels, instance adjusted based on round 1", + []*metricspb.Metric{mtu.Cumulative(c1, k1k2k3, mtu.Timeseries(t3Ms, []string{"", "", ""}, mtu.Double(t3Ms, 88)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2k3, mtu.Timeseries(t1Ms, []string{"", "", ""}, mtu.Double(t3Ms, 44)))}, + }} + runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) +} + +func Test_tsGC(t *testing.T) { + script1 := []*metricsAdjusterTest{{ + "TsGC: round 1 - initial instances, adjusted should be empty", + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)), mtu.Timeseries(t1Ms, v10v20, mtu.Double(t1Ms, 20))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})), mtu.Timeseries(t1Ms, v10v20, mtu.DistPt(t1Ms, bounds0, []int64{40, 20, 30, 70}))), + }, + []*metricspb.Metric{}, + }} + + script2 := []*metricsAdjusterTest{{ + "TsGC: round 2 - metrics first timeseries adjusted based on round 2, second timeseries not updated", + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 88))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{8, 7, 9, 14}))), + }, + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 44))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{4, 5, 6, 7}))), + }, + }} + + script3 := []*metricsAdjusterTest{{ + "TsGC: round 3 - metrics first timeseries adjusted based on round 2, second timeseries empty due to timeseries gc()", + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 99)), mtu.Timeseries(t3Ms, v10v20, mtu.Double(t3Ms, 80))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{9, 8, 10, 15})), mtu.Timeseries(t3Ms, v10v20, mtu.DistPt(t3Ms, bounds0, []int64{55, 66, 33, 77}))), + }, + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t3Ms, 55))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{5, 6, 7, 8}))), + }, + }} + + jobsMap := NewJobsMap(time.Minute) + + // run round 1 + runScript(t, jobsMap.get("job", "0"), script1) + // gc the tsmap, unmarking all entries + jobsMap.get("job", "0").gc() + // run round 2 - update metrics first timeseries only + runScript(t, jobsMap.get("job", "0"), script2) + // gc the tsmap, collecting umarked entries + jobsMap.get("job", "0").gc() + // run round 3 - verify that metrics second timeseries have been gc'd + runScript(t, jobsMap.get("job", "0"), script3) +} + +func Test_jobGC(t *testing.T) { + job1Script1 := []*metricsAdjusterTest{{ + "JobGC: job 1, round 1 - initial instances, adjusted should be empty", + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)), mtu.Timeseries(t1Ms, v10v20, mtu.Double(t1Ms, 20))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})), mtu.Timeseries(t1Ms, v10v20, mtu.DistPt(t1Ms, bounds0, []int64{40, 20, 30, 70}))), + }, + []*metricspb.Metric{}, + }} + + job2Script1 := []*metricsAdjusterTest{{ + "JobGC: job2, round 1 - no metrics adjusted, just trigger gc", + []*metricspb.Metric{}, + []*metricspb.Metric{}, + }} + + job1Script2 := []*metricsAdjusterTest{{ + "JobGC: job 1, round 2 - metrics timeseries empty due to job-level gc", + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 99)), mtu.Timeseries(t4Ms, v10v20, mtu.Double(t4Ms, 80))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{9, 8, 10, 15})), mtu.Timeseries(t4Ms, v10v20, mtu.DistPt(t4Ms, bounds0, []int64{55, 66, 33, 77}))), + }, + []*metricspb.Metric{}, + }} + + gcInterval := 10 * time.Millisecond + jobsMap := NewJobsMap(gcInterval) + + // run job 1, round 1 - all entries marked + runScript(t, jobsMap.get("job", "0"), job1Script1) + // sleep longer than gcInterval to enable job gc in the next run + time.Sleep(2 * gcInterval) + // run job 2, round1 - trigger job gc, unmarking all entries + runScript(t, jobsMap.get("job", "1"), job2Script1) + // sleep longer than gcInterval to enable job gc in the next run + time.Sleep(2 * gcInterval) + // re-run job 2, round1 - trigger job gc, removing unmarked entries + runScript(t, jobsMap.get("job", "1"), job2Script1) + // ensure that at least one jobsMap.gc() completed + jobsMap.gc() + // run job 1, round 2 - verify that all job 1 timeseries have been gc'd + runScript(t, jobsMap.get("job", "0"), job1Script2) +} + +var ( + g1 = "gauge1" + gd1 = "gaugedist1" + c1 = "cumulative1" + cd1 = "cumulativedist1" + s1 = "summary1" + k1 = []string{"k1"} + k1k2 = []string{"k1", "k2"} + k1k2k3 = []string{"k1", "k2", "k3"} + v1v2 = []string{"v1", "v2"} + v10v20 = []string{"v10", "v20"} + v100v200 = []string{"v100", "v200"} + bounds0 = []float64{1, 2, 4} + percent0 = []float64{10, 50, 90} + t1Ms = time.Unix(0, 1000000) + t2Ms = time.Unix(0, 2000000) + t3Ms = time.Unix(0, 3000000) + t4Ms = time.Unix(0, 5000000) + t5Ms = time.Unix(0, 5000000) +) + +type metricsAdjusterTest struct { + description string + metrics []*metricspb.Metric + adjusted []*metricspb.Metric +} + +func (mat *metricsAdjusterTest) dropped() int { + metricsTimeseries := 0 + for _, metric := range mat.metrics { + metricsTimeseries += len(metric.GetTimeseries()) + } + + adjustedTimeseries := 0 + for _, adjusted := range mat.adjusted { + adjustedTimeseries += len(adjusted.GetTimeseries()) + } + return metricsTimeseries - adjustedTimeseries +} + +func runScript(t *testing.T, tsm *timeseriesMap, script []*metricsAdjusterTest) { + l := zap.NewNop() + defer l.Sync() // flushes buffer, if any + ma := NewMetricsAdjuster(tsm, l) + + for _, test := range script { + expectedDropped := test.dropped() + adjusted, dropped := ma.AdjustMetrics(test.metrics) + assert.EqualValuesf(t, test.adjusted, adjusted, "Test: %v - expected: %v, actual: %v", test.description, test.adjusted, adjusted) + assert.Equalf(t, expectedDropped, dropped, "Test: %v", test.description) + } +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder.go new file mode 100644 index 00000000000..fd18f8f900c --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder.go @@ -0,0 +1,303 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/textparse" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + metricsSuffixCount = "_count" + metricsSuffixBucket = "_bucket" + metricsSuffixSum = "_sum" + startTimeMetricName = "process_start_time_seconds" + scrapeUpMetricName = "up" +) + +var ( + trimmableSuffixes = []string{metricsSuffixBucket, metricsSuffixCount, metricsSuffixSum} + errNoDataToBuild = errors.New("there's no data to build") + errNoBoundaryLabel = errors.New("given metricType has no BucketLabel or QuantileLabel") + errEmptyBoundaryLabel = errors.New("BucketLabel or QuantileLabel is empty") +) + +type metricBuilder struct { + hasData bool + hasInternalMetric bool + mc MetadataCache + metrics []*metricspb.Metric + numTimeseries int + droppedTimeseries int + useStartTimeMetric bool + startTimeMetricRegex *regexp.Regexp + startTime float64 + logger *zap.Logger + currentMf MetricFamily +} + +// newMetricBuilder creates a MetricBuilder which is allowed to feed all the datapoints from a single prometheus +// scraped page by calling its AddDataPoint function, and turn them into an opencensus data.MetricsData object +// by calling its Build function +func newMetricBuilder(mc MetadataCache, useStartTimeMetric bool, startTimeMetricRegex string, logger *zap.Logger) *metricBuilder { + var regex *regexp.Regexp + if startTimeMetricRegex != "" { + regex, _ = regexp.Compile(startTimeMetricRegex) + } + return &metricBuilder{ + mc: mc, + metrics: make([]*metricspb.Metric, 0), + logger: logger, + numTimeseries: 0, + droppedTimeseries: 0, + useStartTimeMetric: useStartTimeMetric, + startTimeMetricRegex: regex, + } +} + +func (b *metricBuilder) matchStartTimeMetric(metricName string) bool { + if b.startTimeMetricRegex != nil { + return b.startTimeMetricRegex.MatchString(metricName) + } + + return metricName == startTimeMetricName +} + +// AddDataPoint is for feeding prometheus data complexValue in its processing order +func (b *metricBuilder) AddDataPoint(ls labels.Labels, t int64, v float64) error { + metricName := ls.Get(model.MetricNameLabel) + switch { + case metricName == "": + b.numTimeseries++ + b.droppedTimeseries++ + return errMetricNameNotFound + case isInternalMetric(metricName): + b.hasInternalMetric = true + lm := ls.Map() + delete(lm, model.MetricNameLabel) + // See https://www.prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series + // up: 1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed. + if metricName == scrapeUpMetricName && v != 1.0 { + if v == 0.0 { + b.logger.Warn("Failed to scrape Prometheus endpoint", + zap.Int64("scrape_timestamp", t), + zap.String("target_labels", fmt.Sprintf("%v", lm))) + } else { + b.logger.Warn("The 'up' metric contains invalid value", + zap.Float64("value", v), + zap.Int64("scrape_timestamp", t), + zap.String("target_labels", fmt.Sprintf("%v", lm))) + } + } + return nil + case b.useStartTimeMetric && b.matchStartTimeMetric(metricName): + b.startTime = v + } + + b.hasData = true + + if b.currentMf != nil && !b.currentMf.IsSameFamily(metricName) { + m, ts, dts := b.currentMf.ToMetric() + b.numTimeseries += ts + b.droppedTimeseries += dts + if m != nil { + b.metrics = append(b.metrics, m) + } + b.currentMf = newMetricFamily(metricName, b.mc) + } else if b.currentMf == nil { + b.currentMf = newMetricFamily(metricName, b.mc) + } + + return b.currentMf.Add(metricName, ls, t, v) +} + +// Build an opencensus data.MetricsData based on all added data complexValue. +// The only error returned by this function is errNoDataToBuild. +func (b *metricBuilder) Build() ([]*metricspb.Metric, int, int, error) { + if !b.hasData { + if b.hasInternalMetric { + return make([]*metricspb.Metric, 0), 0, 0, nil + } + return nil, 0, 0, errNoDataToBuild + } + + if b.currentMf != nil { + m, ts, dts := b.currentMf.ToMetric() + b.numTimeseries += ts + b.droppedTimeseries += dts + if m != nil { + b.metrics = append(b.metrics, m) + } + b.currentMf = nil + } + + return b.metrics, b.numTimeseries, b.droppedTimeseries, nil +} + +// TODO: move the following helper functions to a proper place, as they are not called directly in this go file + +func isUsefulLabel(mType metricspb.MetricDescriptor_Type, labelKey string) bool { + result := false + switch labelKey { + case model.MetricNameLabel: + case model.InstanceLabel: + case model.SchemeLabel: + case model.MetricsPathLabel: + case model.JobLabel: + case model.BucketLabel: + result = mType != metricspb.MetricDescriptor_GAUGE_DISTRIBUTION && + mType != metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION + case model.QuantileLabel: + result = mType != metricspb.MetricDescriptor_SUMMARY + default: + result = true + } + return result +} + +// dpgSignature is used to create a key for data complexValue belong to a same group of a metric family +func dpgSignature(orderedKnownLabelKeys []string, ls labels.Labels) string { + sign := make([]string, 0, len(orderedKnownLabelKeys)) + for _, k := range orderedKnownLabelKeys { + v := ls.Get(k) + if v == "" { + continue + } + sign = append(sign, k+"="+v) + } + return fmt.Sprintf("%#v", sign) +} + +func normalizeMetricName(name string) string { + for _, s := range trimmableSuffixes { + if strings.HasSuffix(name, s) && name != s { + return strings.TrimSuffix(name, s) + } + } + return name +} + +func getBoundary(metricType metricspb.MetricDescriptor_Type, labels labels.Labels) (float64, error) { + labelName := "" + switch metricType { + case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: + labelName = model.BucketLabel + case metricspb.MetricDescriptor_SUMMARY: + labelName = model.QuantileLabel + default: + return 0, errNoBoundaryLabel + } + + v := labels.Get(labelName) + if v == "" { + return 0, errEmptyBoundaryLabel + } + + return strconv.ParseFloat(v, 64) +} + +func convToOCAMetricType(metricType textparse.MetricType) metricspb.MetricDescriptor_Type { + switch metricType { + case textparse.MetricTypeCounter: + // always use float64, as it's the internal data type used in prometheus + return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE + // textparse.MetricTypeUnknown is converted to gauge by default to fix Prometheus untyped metrics from being dropped + case textparse.MetricTypeGauge, textparse.MetricTypeUnknown: + return metricspb.MetricDescriptor_GAUGE_DOUBLE + case textparse.MetricTypeHistogram: + return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION + // dropping support for gaugehistogram for now until we have an official spec of its implementation + // a draft can be found in: https://docs.google.com/document/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit#heading=h.1cvzqd4ksd23 + // case textparse.MetricTypeGaugeHistogram: + // return metricspb.MetricDescriptor_GAUGE_DISTRIBUTION + case textparse.MetricTypeSummary: + return metricspb.MetricDescriptor_SUMMARY + default: + // including: textparse.MetricTypeInfo, textparse.MetricTypeStateset + return metricspb.MetricDescriptor_UNSPECIFIED + } +} + +/* + code borrowed from the original promreceiver +*/ + +func heuristicalMetricAndKnownUnits(metricName, parsedUnit string) string { + if parsedUnit != "" { + return parsedUnit + } + lastUnderscoreIndex := strings.LastIndex(metricName, "_") + if lastUnderscoreIndex <= 0 || lastUnderscoreIndex >= len(metricName)-1 { + return "" + } + + unit := "" + + supposedUnit := metricName[lastUnderscoreIndex+1:] + switch strings.ToLower(supposedUnit) { + case "millisecond", "milliseconds", "ms": + unit = "ms" + case "second", "seconds", "s": + unit = "s" + case "microsecond", "microseconds", "us": + unit = "us" + case "nanosecond", "nanoseconds", "ns": + unit = "ns" + case "byte", "bytes", "by": + unit = "By" + case "bit", "bits": + unit = "Bi" + case "kilogram", "kilograms", "kg": + unit = "kg" + case "gram", "grams", "g": + unit = "g" + case "meter", "meters", "metre", "metres", "m": + unit = "m" + case "kilometer", "kilometers", "kilometre", "kilometres", "km": + unit = "km" + case "milimeter", "milimeters", "milimetre", "milimetres", "mm": + unit = "mm" + case "nanogram", "ng", "nanograms": + unit = "ng" + } + + return unit +} + +func timestampFromMs(timeAtMs int64) *timestamppb.Timestamp { + secs, ns := timeAtMs/1e3, (timeAtMs%1e3)*1e6 + return ×tamppb.Timestamp{ + Seconds: secs, + Nanos: int32(ns), + } +} + +func isInternalMetric(metricName string) bool { + if metricName == scrapeUpMetricName || strings.HasPrefix(metricName, "scrape_") { + return true + } + return false +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder_test.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder_test.go new file mode 100644 index 00000000000..7a64bbf1635 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder_test.go @@ -0,0 +1,1380 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "reflect" + "testing" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/textparse" + "github.com/prometheus/prometheus/scrape" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +const startTs = int64(1555366610000) +const interval = int64(15 * 1000) +const defaultBuilderStartTime = float64(1.0) + +var testMetadata = map[string]scrape.MetricMetadata{ + "counter_test": {Metric: "counter_test", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "counter_test2": {Metric: "counter_test2", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "gauge_test": {Metric: "gauge_test", Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "gauge_test2": {Metric: "gauge_test2", Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "hist_test": {Metric: "hist_test", Type: textparse.MetricTypeHistogram, Help: "", Unit: ""}, + "hist_test2": {Metric: "hist_test2", Type: textparse.MetricTypeHistogram, Help: "", Unit: ""}, + "ghist_test": {Metric: "ghist_test", Type: textparse.MetricTypeGaugeHistogram, Help: "", Unit: ""}, + "summary_test": {Metric: "summary_test", Type: textparse.MetricTypeSummary, Help: "", Unit: ""}, + "summary_test2": {Metric: "summary_test2", Type: textparse.MetricTypeSummary, Help: "", Unit: ""}, + "unknown_test": {Metric: "unknown_test", Type: textparse.MetricTypeUnknown, Help: "", Unit: ""}, + "poor_name_count": {Metric: "poor_name_count", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "up": {Metric: "up", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "scrape_foo": {Metric: "scrape_foo", Type: textparse.MetricTypeCounter, Help: "", Unit: ""}, + "example_process_start_time_seconds": {Metric: "example_process_start_time_seconds", + Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "process_start_time_seconds": {Metric: "process_start_time_seconds", + Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, + "badprocess_start_time_seconds": {Metric: "badprocess_start_time_seconds", + Type: textparse.MetricTypeGauge, Help: "", Unit: ""}, +} + +type testDataPoint struct { + lb labels.Labels + t int64 + v float64 +} + +type testScrapedPage struct { + pts []*testDataPoint +} + +type buildTestData struct { + name string + inputs []*testScrapedPage + wants [][]*metricspb.Metric +} + +func createLabels(mFamily string, tagPairs ...string) labels.Labels { + lm := make(map[string]string) + lm[model.MetricNameLabel] = mFamily + if len(tagPairs)%2 != 0 { + panic("tag pairs is not even") + } + + for i := 0; i < len(tagPairs); i += 2 { + lm[tagPairs[i]] = tagPairs[i+1] + } + + return labels.FromMap(lm) +} + +func createDataPoint(mname string, value float64, tagPairs ...string) *testDataPoint { + return &testDataPoint{ + lb: createLabels(mname, tagPairs...), + v: value, + } +} + +func runBuilderTests(t *testing.T, tests []buildTestData) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.EqualValues(t, len(tt.wants), len(tt.inputs)) + mc := newMockMetadataCache(testMetadata) + st := startTs + for i, page := range tt.inputs { + b := newMetricBuilder(mc, true, "", testLogger) + b.startTime = defaultBuilderStartTime // set to a non-zero value + for _, pt := range page.pts { + // set ts for testing + pt.t = st + assert.NoError(t, b.AddDataPoint(pt.lb, pt.t, pt.v)) + } + metrics, _, _, err := b.Build() + assert.NoError(t, err) + assert.EqualValues(t, tt.wants[i], metrics) + st += interval + } + }) + } +} + +func runBuilderStartTimeTests(t *testing.T, tests []buildTestData, + startTimeMetricRegex string, expectedBuilderStartTime float64) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mc := newMockMetadataCache(testMetadata) + st := startTs + for _, page := range tt.inputs { + b := newMetricBuilder(mc, true, startTimeMetricRegex, + testLogger) + b.startTime = defaultBuilderStartTime // set to a non-zero value + for _, pt := range page.pts { + // set ts for testing + pt.t = st + assert.NoError(t, b.AddDataPoint(pt.lb, pt.t, pt.v)) + } + _, _, _, err := b.Build() + assert.NoError(t, err) + assert.EqualValues(t, b.startTime, expectedBuilderStartTime) + st += interval + } + }) + } +} + +func Test_startTimeMetricMatch(t *testing.T) { + matchBuilderStartTime := 123.456 + matchTests := []buildTestData{ + { + name: "prefix_match", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("example_process_start_time_seconds", + matchBuilderStartTime, "foo", "bar"), + }, + }, + }, + }, + { + name: "match", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("process_start_time_seconds", + matchBuilderStartTime, "foo", "bar"), + }, + }, + }, + }, + } + nomatchTests := []buildTestData{ + { + name: "nomatch1", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("_process_start_time_seconds", + matchBuilderStartTime, "foo", "bar"), + }, + }, + }, + }, + { + name: "nomatch2", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("subprocess_start_time_seconds", + matchBuilderStartTime, "foo", "bar"), + }, + }, + }, + }, + } + + runBuilderStartTimeTests(t, matchTests, "^(.+_)*process_start_time_seconds$", matchBuilderStartTime) + runBuilderStartTimeTests(t, nomatchTests, "^(.+_)*process_start_time_seconds$", defaultBuilderStartTime) +} + +func Test_metricBuilder_counters(t *testing.T) { + tests := []buildTestData{ + { + name: "single-item", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 100, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "counter_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "two-items", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 150, "foo", "bar"), + createDataPoint("counter_test", 25, "foo", "other"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "counter_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 150.0}}, + }, + }, + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "other", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 25.0}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "two-metrics", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 150, "foo", "bar"), + createDataPoint("counter_test", 25, "foo", "other"), + createDataPoint("counter_test2", 100, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "counter_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 150.0}}, + }, + }, + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "other", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 25.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "counter_test2", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "metrics-with-poor-names", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("poor_name_count", 100, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "poor_name_count", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + }, + }, + }, + }, + }, + } + + runBuilderTests(t, tests) +} + +func Test_metricBuilder_gauges(t *testing.T) { + tests := []buildTestData{ + { + name: "one-gauge", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, "foo", "bar"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 90, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "gauge_test", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + }, + }, + }, + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "gauge_test", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs + interval), Value: &metricspb.Point_DoubleValue{DoubleValue: 90.0}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "gauge-with-different-tags", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, "foo", "bar"), + createDataPoint("gauge_test", 200, "bar", "foo"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "gauge_test", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "bar"}, {Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "", HasValue: false}, {Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + { + LabelValues: []*metricspb.LabelValue{{Value: "foo", HasValue: true}, {Value: "", HasValue: false}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 200.0}}, + }, + }, + }, + }, + }, + }, + }, + { + // TODO: A decision need to be made. If we want to have the behavior which can generate different tag key + // sets because metrics come and go + name: "gauge-comes-and-go-with-different-tagset", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, "foo", "bar"), + createDataPoint("gauge_test", 200, "bar", "foo"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 20, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "gauge_test", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "bar"}, {Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "", HasValue: false}, {Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + { + LabelValues: []*metricspb.LabelValue{{Value: "foo", HasValue: true}, {Value: "", HasValue: false}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 200.0}}, + }, + }, + }, + }, + }, + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "gauge_test", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs + interval), Value: &metricspb.Point_DoubleValue{DoubleValue: 20.0}}, + }, + }, + }, + }, + }, + }, + }, + } + + runBuilderTests(t, tests) +} + +func Test_metricBuilder_untype(t *testing.T) { + tests := []buildTestData{ + { + name: "one-unknown", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("unknown_test", 100, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "unknown_test", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "no-type-hint", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("something_not_exists", 100, "foo", "bar"), + createDataPoint("theother_not_exists", 200, "foo", "bar"), + createDataPoint("theother_not_exists", 300, "bar", "foo"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "something_not_exists", + Type: metricspb.MetricDescriptor_UNSPECIFIED, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "theother_not_exists", + Type: metricspb.MetricDescriptor_UNSPECIFIED, + LabelKeys: []*metricspb.LabelKey{{Key: "bar"}, {Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "", HasValue: false}, {Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 200.0}}, + }, + }, + { + LabelValues: []*metricspb.LabelValue{{Value: "foo", HasValue: true}, {Value: "", HasValue: false}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 300.0}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "untype-metric-poor-names", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("some_count", 100, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "some_count", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + }, + }, + }, + }, + }, + } + + runBuilderTests(t, tests) +} + +func Test_metricBuilder_histogram(t *testing.T) { + tests := []buildTestData{ + { + name: "single item", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test", 1, "foo", "bar", "le", "10"), + createDataPoint("hist_test", 2, "foo", "bar", "le", "20"), + createDataPoint("hist_test", 10, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, "foo", "bar"), + createDataPoint("hist_test_count", 10, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 10, + Sum: 99.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 1}, {Count: 8}}, + }}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "multi-groups", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test", 1, "foo", "bar", "le", "10"), + createDataPoint("hist_test", 2, "foo", "bar", "le", "20"), + createDataPoint("hist_test", 10, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, "foo", "bar"), + createDataPoint("hist_test_count", 10, "foo", "bar"), + createDataPoint("hist_test", 1, "key2", "v2", "le", "10"), + createDataPoint("hist_test", 2, "key2", "v2", "le", "20"), + createDataPoint("hist_test", 3, "key2", "v2", "le", "+inf"), + createDataPoint("hist_test_sum", 50, "key2", "v2"), + createDataPoint("hist_test_count", 3, "key2", "v2"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}, {Key: "key2"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}, {Value: "", HasValue: false}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 10, + Sum: 99.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 1}, {Count: 8}}, + }}}, + }, + }, + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "", HasValue: false}, {Value: "v2", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 3, + Sum: 50.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 1}, {Count: 1}}, + }}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "multi-groups-and-families", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test", 1, "foo", "bar", "le", "10"), + createDataPoint("hist_test", 2, "foo", "bar", "le", "20"), + createDataPoint("hist_test", 10, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, "foo", "bar"), + createDataPoint("hist_test_count", 10, "foo", "bar"), + createDataPoint("hist_test", 1, "key2", "v2", "le", "10"), + createDataPoint("hist_test", 2, "key2", "v2", "le", "20"), + createDataPoint("hist_test", 3, "key2", "v2", "le", "+inf"), + createDataPoint("hist_test_sum", 50, "key2", "v2"), + createDataPoint("hist_test_count", 3, "key2", "v2"), + createDataPoint("hist_test2", 1, "le", "10"), + createDataPoint("hist_test2", 2, "le", "20"), + createDataPoint("hist_test2", 3, "le", "+inf"), + createDataPoint("hist_test2_sum", 50), + createDataPoint("hist_test2_count", 3), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}, {Key: "key2"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}, {Value: "", HasValue: false}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 10, + Sum: 99.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 1}, {Count: 8}}, + }}}, + }, + }, + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "", HasValue: false}, {Value: "v2", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 3, + Sum: 50.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 1}, {Count: 1}}, + }}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test2", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 3, + Sum: 50.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 1}, {Count: 1}}, + }}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "unordered-buckets", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test", 10, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test", 1, "foo", "bar", "le", "10"), + createDataPoint("hist_test", 2, "foo", "bar", "le", "20"), + createDataPoint("hist_test_sum", 99, "foo", "bar"), + createDataPoint("hist_test_count", 10, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 10, + Sum: 99.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 1}, {Count: 8}}, + }}}, + }, + }, + }, + }, + }, + }, + }, + { + // this won't likely happen in real env, as prometheus wont generate histogram with less than 3 buckets + name: "only-one-bucket", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test", 3, "le", "+inf"), + createDataPoint("hist_test_count", 3), + createDataPoint("hist_test_sum", 100), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{}, + }, + }, + }, + Count: 3, + Sum: 100, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 3}}, + }}}, + }, + }, + }, + }, + }, + }, + }, + { + // this won't likely happen in real env, as prometheus wont generate histogram with less than 3 buckets + name: "only-one-bucket-noninf", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test", 3, "le", "20"), + createDataPoint("hist_test_count", 3), + createDataPoint("hist_test_sum", 100), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{}, + }, + }, + }, + Count: 3, + Sum: 100, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 3}}, + }}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "corrupted-no-buckets", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_sum", 99), + createDataPoint("hist_test_count", 10), + }, + }, + }, + wants: [][]*metricspb.Metric{ + {}, + }, + }, + { + name: "corrupted-no-sum", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test", 1, "foo", "bar", "le", "10"), + createDataPoint("hist_test", 2, "foo", "bar", "le", "20"), + createDataPoint("hist_test", 3, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_count", 3), + }, + }, + }, + wants: [][]*metricspb.Metric{ + {}, + }, + }, + { + name: "corrupted-no-count", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test", 1, "foo", "bar", "le", "10"), + createDataPoint("hist_test", 2, "foo", "bar", "le", "20"), + createDataPoint("hist_test", 3, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99), + }, + }, + }, + wants: [][]*metricspb.Metric{ + {}, + }, + }, + } + + runBuilderTests(t, tests) +} + +func Test_metricBuilder_summary(t *testing.T) { + tests := []buildTestData{ + { + name: "no-sum-and-count", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 5, "foo", "bar", "quantile", "1"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + {}, + }, + }, + { + name: "empty-quantiles", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test_sum", 100, "foo", "bar"), + createDataPoint("summary_test_count", 500, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "summary_test", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + { + Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrapperspb.DoubleValue{Value: 100.0}, + Count: &wrapperspb.Int64Value{Value: 500}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "regular-summary", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 1, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_sum", 100, "foo", "bar"), + createDataPoint("summary_test_count", 500, "foo", "bar"), + }, + }, + }, + wants: [][]*metricspb.Metric{ + { + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "summary_test", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: timestampFromMs(startTs), + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: timestampFromMs(startTs), Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrapperspb.DoubleValue{Value: 100.0}, + Count: &wrapperspb.Int64Value{Value: 500}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + {Percentile: 50.0, Value: 1}, + {Percentile: 75.0, Value: 2}, + {Percentile: 100.0, Value: 5}, + }, + }}}}, + }, + }, + }, + }, + }, + }, + }, + } + + runBuilderTests(t, tests) +} + +func Test_metricBuilder_skipped(t *testing.T) { + tests := []buildTestData{ + { + name: "skip-internal-metrics", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("scrape_foo", 1), + createDataPoint("up", 1.0), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("scrape_foo", 2), + createDataPoint("up", 2.0), + }, + }, + }, + wants: [][]*metricspb.Metric{ + {}, + {}, + }, + }, + } + + runBuilderTests(t, tests) +} + +func Test_metricBuilder_baddata(t *testing.T) { + t.Run("empty-metric-name", func(t *testing.T) { + mc := newMockMetadataCache(testMetadata) + b := newMetricBuilder(mc, true, "", testLogger) + b.startTime = 1.0 // set to a non-zero value + if err := b.AddDataPoint(labels.FromStrings("a", "b"), startTs, 123); err != errMetricNameNotFound { + t.Error("expecting errMetricNameNotFound error, but get nil") + return + } + + if _, _, _, err := b.Build(); err != errNoDataToBuild { + t.Error("expecting errNoDataToBuild error, but get nil") + } + }) + + t.Run("histogram-datapoint-no-bucket-label", func(t *testing.T) { + mc := newMockMetadataCache(testMetadata) + b := newMetricBuilder(mc, true, "", testLogger) + b.startTime = 1.0 // set to a non-zero value + if err := b.AddDataPoint(createLabels("hist_test", "k", "v"), startTs, 123); err != errEmptyBoundaryLabel { + t.Error("expecting errEmptyBoundaryLabel error, but get nil") + } + }) + + t.Run("summary-datapoint-no-quantile-label", func(t *testing.T) { + mc := newMockMetadataCache(testMetadata) + b := newMetricBuilder(mc, true, "", testLogger) + b.startTime = 1.0 // set to a non-zero value + if err := b.AddDataPoint(createLabels("summary_test", "k", "v"), startTs, 123); err != errEmptyBoundaryLabel { + t.Error("expecting errEmptyBoundaryLabel error, but get nil") + } + }) + +} + +func Test_isUsefulLabel(t *testing.T) { + type args struct { + mType metricspb.MetricDescriptor_Type + labelKey string + } + tests := []struct { + name string + args args + want bool + }{ + {"metricName", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, model.MetricNameLabel}, false}, + {"instance", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, model.InstanceLabel}, false}, + {"scheme", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, model.SchemeLabel}, false}, + {"metricPath", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, model.MetricsPathLabel}, false}, + {"job", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, model.JobLabel}, false}, + {"bucket", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, model.BucketLabel}, true}, + {"bucketForGaugeDistribution", args{metricspb.MetricDescriptor_GAUGE_DISTRIBUTION, model.BucketLabel}, false}, + {"bucketForCumulativeDistribution", args{metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, model.BucketLabel}, false}, + {"Quantile", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, model.QuantileLabel}, true}, + {"QuantileForSummay", args{metricspb.MetricDescriptor_SUMMARY, model.QuantileLabel}, false}, + {"other", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, "other"}, true}, + {"empty", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, ""}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isUsefulLabel(tt.args.mType, tt.args.labelKey); got != tt.want { + t.Errorf("isUsefulLabel() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_dpgSignature(t *testing.T) { + knownLabelKeys := []string{"a", "b"} + + tests := []struct { + name string + ls labels.Labels + want string + }{ + {"1st label", labels.FromStrings("a", "va"), `[]string{"a=va"}`}, + {"2nd label", labels.FromStrings("b", "vb"), `[]string{"b=vb"}`}, + {"two labels", labels.FromStrings("a", "va", "b", "vb"), `[]string{"a=va", "b=vb"}`}, + {"extra label", labels.FromStrings("a", "va", "b", "vb", "x", "xa"), `[]string{"a=va", "b=vb"}`}, + {"different order", labels.FromStrings("b", "vb", "a", "va"), `[]string{"a=va", "b=vb"}`}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := dpgSignature(knownLabelKeys, tt.ls); got != tt.want { + t.Errorf("dpgSignature() = %v, want %v", got, tt.want) + } + }) + } + + // this is important for caching start values, as new metrics with new tag of a same group can come up in a 2nd run, + // however, its order within the group is not predictable. we need to have a way to generate a stable key even if + // the total number of keys changes in between different scrape runs + t.Run("knownLabelKeys updated", func(t *testing.T) { + ls := labels.FromStrings("a", "va") + want := dpgSignature(knownLabelKeys, ls) + got := dpgSignature(append(knownLabelKeys, "c"), ls) + if got != want { + t.Errorf("dpgSignature() = %v, want %v", got, want) + } + }) +} + +func Test_normalizeMetricName(t *testing.T) { + tests := []struct { + name string + mname string + want string + }{ + {"normal", "normal", "normal"}, + {"count", "foo_count", "foo"}, + {"bucket", "foo_bucket", "foo"}, + {"sum", "foo_sum", "foo"}, + {"no_prefix", "_sum", "_sum"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := normalizeMetricName(tt.mname); got != tt.want { + t.Errorf("normalizeMetricName() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getBoundary(t *testing.T) { + ls := labels.FromStrings("le", "100.0", "foo", "bar", "quantile", "0.5") + ls2 := labels.FromStrings("foo", "bar") + ls3 := labels.FromStrings("le", "xyz", "foo", "bar", "quantile", "0.5") + type args struct { + metricType metricspb.MetricDescriptor_Type + labels labels.Labels + } + tests := []struct { + name string + args args + want float64 + wantErr bool + }{ + {"histogram", args{metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, ls}, 100.0, false}, + {"gaugehistogram", args{metricspb.MetricDescriptor_GAUGE_DISTRIBUTION, ls}, 100.0, false}, + {"gaugehistogram_no_label", args{metricspb.MetricDescriptor_GAUGE_DISTRIBUTION, ls2}, 0, true}, + {"gaugehistogram_bad_value", args{metricspb.MetricDescriptor_GAUGE_DISTRIBUTION, ls3}, 0, true}, + {"summary", args{metricspb.MetricDescriptor_SUMMARY, ls}, 0.5, false}, + {"otherType", args{metricspb.MetricDescriptor_GAUGE_DOUBLE, ls}, 0, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getBoundary(tt.args.metricType, tt.args.labels) + if (err != nil) != tt.wantErr { + t.Errorf("getBoundary() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("getBoundary() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_convToOCAMetricType(t *testing.T) { + tests := []struct { + name string + metricType textparse.MetricType + want metricspb.MetricDescriptor_Type + }{ + {"counter", textparse.MetricTypeCounter, metricspb.MetricDescriptor_CUMULATIVE_DOUBLE}, + {"gauge", textparse.MetricTypeGauge, metricspb.MetricDescriptor_GAUGE_DOUBLE}, + {"histogram", textparse.MetricTypeHistogram, metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION}, + {"guageHistogram", textparse.MetricTypeGaugeHistogram, metricspb.MetricDescriptor_UNSPECIFIED}, + {"summary", textparse.MetricTypeSummary, metricspb.MetricDescriptor_SUMMARY}, + {"info", textparse.MetricTypeInfo, metricspb.MetricDescriptor_UNSPECIFIED}, + {"stateset", textparse.MetricTypeStateset, metricspb.MetricDescriptor_UNSPECIFIED}, + {"unknown", textparse.MetricTypeUnknown, metricspb.MetricDescriptor_GAUGE_DOUBLE}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := convToOCAMetricType(tt.metricType); !reflect.DeepEqual(got, tt.want) { + t.Errorf("convToOCAMetricType() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_heuristicalMetricAndKnownUnits(t *testing.T) { + tests := []struct { + metricName string + parsedUnit string + want string + }{ + {"test", "ms", "ms"}, + {"millisecond", "", ""}, + {"test_millisecond", "", "ms"}, + {"test_milliseconds", "", "ms"}, + {"test_ms", "", "ms"}, + {"test_second", "", "s"}, + {"test_seconds", "", "s"}, + {"test_s", "", "s"}, + {"test_microsecond", "", "us"}, + {"test_microseconds", "", "us"}, + {"test_us", "", "us"}, + {"test_nanosecond", "", "ns"}, + {"test_nanoseconds", "", "ns"}, + {"test_ns", "", "ns"}, + {"test_byte", "", "By"}, + {"test_bytes", "", "By"}, + {"test_by", "", "By"}, + {"test_bit", "", "Bi"}, + {"test_bits", "", "Bi"}, + {"test_kilogram", "", "kg"}, + {"test_kilograms", "", "kg"}, + {"test_kg", "", "kg"}, + {"test_gram", "", "g"}, + {"test_grams", "", "g"}, + {"test_g", "", "g"}, + {"test_nanogram", "", "ng"}, + {"test_nanograms", "", "ng"}, + {"test_ng", "", "ng"}, + {"test_meter", "", "m"}, + {"test_meters", "", "m"}, + {"test_metre", "", "m"}, + {"test_metres", "", "m"}, + {"test_m", "", "m"}, + {"test_kilometer", "", "km"}, + {"test_kilometers", "", "km"}, + {"test_kilometre", "", "km"}, + {"test_kilometres", "", "km"}, + {"test_km", "", "km"}, + {"test_milimeter", "", "mm"}, + {"test_milimeters", "", "mm"}, + {"test_milimetre", "", "mm"}, + {"test_milimetres", "", "mm"}, + {"test_mm", "", "mm"}, + } + for _, tt := range tests { + t.Run(tt.metricName, func(t *testing.T) { + if got := heuristicalMetricAndKnownUnits(tt.metricName, tt.parsedUnit); got != tt.want { + t.Errorf("heuristicalMetricAndKnownUnits() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore.go b/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore.go new file mode 100644 index 00000000000..e019d9bf4be --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "sync/atomic" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/storage" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer" +) + +const ( + runningStateInit = iota + runningStateReady + runningStateStop +) + +var idSeq int64 +var noop = &noopAppender{} + +// OcaStore translates Prometheus scraping diffs into OpenCensus format. +type OcaStore struct { + ctx context.Context + + running int32 // access atomically + sink consumer.MetricsConsumer + mc *metadataService + jobsMap *JobsMap + useStartTimeMetric bool + startTimeMetricRegex string + receiverName string + + logger *zap.Logger +} + +// NewOcaStore returns an ocaStore instance, which can be acted as prometheus' scrape.Appendable +func NewOcaStore(ctx context.Context, sink consumer.MetricsConsumer, logger *zap.Logger, jobsMap *JobsMap, useStartTimeMetric bool, startTimeMetricRegex string, receiverName string) *OcaStore { + return &OcaStore{ + running: runningStateInit, + ctx: ctx, + sink: sink, + logger: logger, + jobsMap: jobsMap, + useStartTimeMetric: useStartTimeMetric, + startTimeMetricRegex: startTimeMetricRegex, + receiverName: receiverName, + } +} + +// SetScrapeManager is used to config the underlying scrape.Manager as it's needed for OcaStore, otherwise OcaStore +// cannot accept any Appender() request +func (o *OcaStore) SetScrapeManager(scrapeManager *scrape.Manager) { + if scrapeManager != nil && atomic.CompareAndSwapInt32(&o.running, runningStateInit, runningStateReady) { + o.mc = &metadataService{sm: scrapeManager} + } +} + +func (o *OcaStore) Appender(context.Context) storage.Appender { + state := atomic.LoadInt32(&o.running) + if state == runningStateReady { + return newTransaction(o.ctx, o.jobsMap, o.useStartTimeMetric, o.startTimeMetricRegex, o.receiverName, o.mc, o.sink, o.logger) + } else if state == runningStateInit { + panic("ScrapeManager is not set") + } + // instead of returning an error, return a dummy appender instead, otherwise it can trigger panic + return noop +} + +func (o *OcaStore) Close() error { + atomic.CompareAndSwapInt32(&o.running, runningStateReady, runningStateStop) + return nil +} + +// noopAppender, always return error on any operations +type noopAppender struct{} + +func (*noopAppender) Add(labels.Labels, int64, float64) (uint64, error) { + return 0, componenterror.ErrAlreadyStopped +} + +func (*noopAppender) AddFast(uint64, int64, float64) error { + return componenterror.ErrAlreadyStopped +} + +func (*noopAppender) Commit() error { + return componenterror.ErrAlreadyStopped +} + +func (*noopAppender) Rollback() error { + return nil +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore_test.go b/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore_test.go new file mode 100644 index 00000000000..eb95b876b1a --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore_test.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "testing" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/scrape" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOcaStore(t *testing.T) { + + o := NewOcaStore(context.Background(), nil, nil, nil, false, "", "prometheus") + o.SetScrapeManager(&scrape.Manager{}) + + app := o.Appender(context.Background()) + require.NotNil(t, app, "Expecting app") + + _ = o.Close() + + app = o.Appender(context.Background()) + assert.Equal(t, noop, app) +} + +func TestNoopAppender(t *testing.T) { + if _, err := noop.Add(labels.FromStrings("t", "v"), 1, 1); err == nil { + t.Error("expecting error from Add method of noopApender") + } + if _, err := noop.Add(labels.FromStrings("t", "v"), 1, 1); err == nil { + t.Error("expecting error from Add method of noopApender") + } + + if err := noop.AddFast(0, 1, 1); err == nil { + t.Error("expecting error from AddFast method of noopApender") + } + + if err := noop.Commit(); err == nil { + t.Error("expecting error from Commit method of noopApender") + } + + if err := noop.Rollback(); err != nil { + t.Error("expecting no error from Rollback method of noopApender") + } + +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/transaction.go b/internal/otel_collector/receiver/prometheusreceiver/internal/transaction.go new file mode 100644 index 00000000000..bd40e2b0459 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/transaction.go @@ -0,0 +1,236 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "errors" + "math" + "net" + "sync/atomic" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/translator/internaldata" +) + +const ( + portAttr = "port" + schemeAttr = "scheme" + + transport = "http" + dataformat = "prometheus" +) + +var errMetricNameNotFound = errors.New("metricName not found from labels") +var errTransactionAborted = errors.New("transaction aborted") +var errNoJobInstance = errors.New("job or instance cannot be found from labels") +var errNoStartTimeMetrics = errors.New("process_start_time_seconds metric is missing") + +// A transaction is corresponding to an individual scrape operation or stale report. +// That said, whenever prometheus receiver scrapped a target metric endpoint a page of raw metrics is returned, +// a transaction, which acts as appender, is created to process this page of data, the scrapeLoop will call the Add or +// AddFast method to insert metrics data points, when finished either Commit, which means success, is called and data +// will be flush to the downstream consumer, or Rollback, which means discard all the data, is called and all data +// points are discarded. +type transaction struct { + id int64 + ctx context.Context + isNew bool + sink consumer.MetricsConsumer + job string + instance string + jobsMap *JobsMap + useStartTimeMetric bool + startTimeMetricRegex string + receiverName string + ms *metadataService + node *commonpb.Node + resource *resourcepb.Resource + metricBuilder *metricBuilder + logger *zap.Logger +} + +func newTransaction(ctx context.Context, jobsMap *JobsMap, useStartTimeMetric bool, startTimeMetricRegex string, receiverName string, ms *metadataService, sink consumer.MetricsConsumer, logger *zap.Logger) *transaction { + return &transaction{ + id: atomic.AddInt64(&idSeq, 1), + ctx: ctx, + isNew: true, + sink: sink, + jobsMap: jobsMap, + useStartTimeMetric: useStartTimeMetric, + startTimeMetricRegex: startTimeMetricRegex, + receiverName: receiverName, + ms: ms, + logger: logger, + } +} + +// ensure *transaction has implemented the storage.Appender interface +var _ storage.Appender = (*transaction)(nil) + +// always returns 0 to disable label caching +func (tr *transaction) Add(ls labels.Labels, t int64, v float64) (uint64, error) { + // Important, must handle. prometheus will still try to feed the appender some data even if it failed to + // scrape the remote target, if the previous scrape was success and some data were cached internally + // in our case, we don't need these data, simply drop them shall be good enough. more details: + // https://github.com/prometheus/prometheus/blob/851131b0740be7291b98f295567a97f32fffc655/scrape/scrape.go#L933-L935 + if math.IsNaN(v) { + return 0, nil + } + + select { + case <-tr.ctx.Done(): + return 0, errTransactionAborted + default: + } + + if tr.isNew { + if err := tr.initTransaction(ls); err != nil { + return 0, err + } + } + return 0, tr.metricBuilder.AddDataPoint(ls, t, v) +} + +// always returns error since caching is not supported by Add() function +func (tr *transaction) AddFast(_ uint64, _ int64, _ float64) error { + return storage.ErrNotFound +} + +func (tr *transaction) initTransaction(ls labels.Labels) error { + job, instance := ls.Get(model.JobLabel), ls.Get(model.InstanceLabel) + if job == "" || instance == "" { + return errNoJobInstance + } + // discover the binding target when this method is called for the first time during a transaction + mc, err := tr.ms.Get(job, instance) + if err != nil { + return err + } + if tr.jobsMap != nil { + tr.job = job + tr.instance = instance + } + tr.node, tr.resource = createNodeAndResource(job, instance, mc.SharedLabels().Get(model.SchemeLabel)) + tr.metricBuilder = newMetricBuilder(mc, tr.useStartTimeMetric, tr.startTimeMetricRegex, tr.logger) + tr.isNew = false + return nil +} + +// submit metrics data to consumers +func (tr *transaction) Commit() error { + if tr.isNew { + // In a situation like not able to connect to the remote server, scrapeloop will still commit even if it had + // never added any data points, that the transaction has not been initialized. + return nil + } + + ctx := obsreport.StartMetricsReceiveOp(tr.ctx, tr.receiverName, transport) + metrics, _, _, err := tr.metricBuilder.Build() + if err != nil { + // Only error by Build() is errNoDataToBuild, with numReceivedPoints set to zero. + obsreport.EndMetricsReceiveOp(ctx, dataformat, 0, err) + return err + } + + if tr.useStartTimeMetric { + // startTime is mandatory in this case, but may be zero when the + // process_start_time_seconds metric is missing from the target endpoint. + if tr.metricBuilder.startTime == 0.0 { + // Since we are unable to adjust metrics properly, we will drop them + // and return an error. + err = errNoStartTimeMetrics + obsreport.EndMetricsReceiveOp(ctx, dataformat, 0, err) + return err + } + + adjustStartTime(tr.metricBuilder.startTime, metrics) + } else { + // AdjustMetrics - jobsMap has to be non-nil in this case. + // Note: metrics could be empty after adjustment, which needs to be checked before passing it on to ConsumeMetricsData() + metrics, _ = NewMetricsAdjuster(tr.jobsMap.get(tr.job, tr.instance), tr.logger).AdjustMetrics(metrics) + } + + numPoints := 0 + if len(metrics) > 0 { + md := internaldata.OCToMetrics(consumerdata.MetricsData{ + Node: tr.node, + Resource: tr.resource, + Metrics: metrics, + }) + _, numPoints = md.MetricAndDataPointCount() + err = tr.sink.ConsumeMetrics(ctx, md) + } + obsreport.EndMetricsReceiveOp(ctx, dataformat, numPoints, err) + return err +} + +func (tr *transaction) Rollback() error { + return nil +} + +func adjustStartTime(startTime float64, metrics []*metricspb.Metric) { + startTimeTs := timestampFromFloat64(startTime) + for _, metric := range metrics { + switch metric.GetMetricDescriptor().GetType() { + case metricspb.MetricDescriptor_GAUGE_DOUBLE, metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: + continue + default: + for _, ts := range metric.GetTimeseries() { + ts.StartTimestamp = startTimeTs + } + } + } +} + +func timestampFromFloat64(ts float64) *timestamppb.Timestamp { + secs := int64(ts) + nanos := int64((ts - float64(secs)) * 1e9) + return ×tamppb.Timestamp{ + Seconds: secs, + Nanos: int32(nanos), + } +} + +func createNodeAndResource(job, instance, scheme string) (*commonpb.Node, *resourcepb.Resource) { + host, port, err := net.SplitHostPort(instance) + if err != nil { + host = instance + } + node := &commonpb.Node{ + ServiceInfo: &commonpb.ServiceInfo{Name: job}, + Identifier: &commonpb.ProcessIdentifier{ + HostName: host, + }, + } + resource := &resourcepb.Resource{ + Labels: map[string]string{ + portAttr: port, + schemeAttr: scheme, + }, + } + return node, resource +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/transaction_test.go b/internal/otel_collector/receiver/prometheusreceiver/internal/transaction_test.go new file mode 100644 index 00000000000..d52737c7fb6 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/transaction_test.go @@ -0,0 +1,163 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "math" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/scrape" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/translator/internaldata" +) + +func Test_transaction(t *testing.T) { + // discoveredLabels contain labels prior to any processing + discoveredLabels := labels.New( + labels.Label{ + Name: model.AddressLabel, + Value: "address:8080", + }, + labels.Label{ + Name: model.MetricNameLabel, + Value: "foo", + }, + labels.Label{ + Name: model.SchemeLabel, + Value: "http", + }, + ) + // processedLabels contain label values after processing (e.g. relabeling) + processedLabels := labels.New( + labels.Label{ + Name: model.InstanceLabel, + Value: "localhost:8080", + }, + ) + + ms := &metadataService{ + sm: &mockScrapeManager{targets: map[string][]*scrape.Target{ + "test": {scrape.NewTarget(processedLabels, discoveredLabels, nil)}, + }}, + } + + rn := "prometheus" + + t.Run("Commit Without Adding", func(t *testing.T) { + nomc := consumertest.NewMetricsNop() + tr := newTransaction(context.Background(), nil, true, "", rn, ms, nomc, testLogger) + if got := tr.Commit(); got != nil { + t.Errorf("expecting nil from Commit() but got err %v", got) + } + }) + + t.Run("Rollback dose nothing", func(t *testing.T) { + nomc := consumertest.NewMetricsNop() + tr := newTransaction(context.Background(), nil, true, "", rn, ms, nomc, testLogger) + if got := tr.Rollback(); got != nil { + t.Errorf("expecting nil from Rollback() but got err %v", got) + } + }) + + badLabels := labels.Labels([]labels.Label{{Name: "foo", Value: "bar"}}) + t.Run("Add One No Target", func(t *testing.T) { + nomc := consumertest.NewMetricsNop() + tr := newTransaction(context.Background(), nil, true, "", rn, ms, nomc, testLogger) + if _, got := tr.Add(badLabels, time.Now().Unix()*1000, 1.0); got == nil { + t.Errorf("expecting error from Add() but got nil") + } + }) + + jobNotFoundLb := labels.Labels([]labels.Label{ + {Name: "instance", Value: "localhost:8080"}, + {Name: "job", Value: "test2"}, + {Name: "foo", Value: "bar"}}) + t.Run("Add One Job not found", func(t *testing.T) { + nomc := consumertest.NewMetricsNop() + tr := newTransaction(context.Background(), nil, true, "", rn, ms, nomc, testLogger) + if _, got := tr.Add(jobNotFoundLb, time.Now().Unix()*1000, 1.0); got == nil { + t.Errorf("expecting error from Add() but got nil") + } + }) + + goodLabels := labels.Labels([]labels.Label{{Name: "instance", Value: "localhost:8080"}, + {Name: "job", Value: "test"}, + {Name: "__name__", Value: "foo"}}) + t.Run("Add One Good", func(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(context.Background(), nil, true, "", rn, ms, sink, testLogger) + if _, got := tr.Add(goodLabels, time.Now().Unix()*1000, 1.0); got != nil { + t.Errorf("expecting error == nil from Add() but got: %v\n", got) + } + tr.metricBuilder.startTime = 1.0 // set to a non-zero value + if got := tr.Commit(); got != nil { + t.Errorf("expecting nil from Commit() but got err %v", got) + } + expectedNode, expectedResource := createNodeAndResource("test", "localhost:8080", "http") + mds := sink.AllMetrics() + if len(mds) != 1 { + t.Fatalf("wanted one batch, got %v\n", sink.AllMetrics()) + } + ocmds := internaldata.MetricsToOC(mds[0]) + if len(ocmds) != 1 { + t.Fatalf("wanted one batch per node, got %v\n", sink.AllMetrics()) + } + if !proto.Equal(ocmds[0].Node, expectedNode) { + t.Errorf("generated node %v and expected node %v is different\n", ocmds[0].Node, expectedNode) + } + if !proto.Equal(ocmds[0].Resource, expectedResource) { + t.Errorf("generated resource %v and expected resource %v is different\n", ocmds[0].Resource, expectedResource) + } + + // TODO: re-enable this when handle unspecified OC type + // assert.Len(t, ocmds[0].Metrics, 1) + }) + + t.Run("Error when start time is zero", func(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(context.Background(), nil, true, "", rn, ms, sink, testLogger) + if _, got := tr.Add(goodLabels, time.Now().Unix()*1000, 1.0); got != nil { + t.Errorf("expecting error == nil from Add() but got: %v\n", got) + } + tr.metricBuilder.startTime = 0 // zero value means the start time metric is missing + got := tr.Commit() + if got == nil { + t.Error("expecting error from Commit() but got nil") + } else if got.Error() != errNoStartTimeMetrics.Error() { + t.Errorf("expected error %q but got %q", errNoStartTimeMetrics, got) + } + }) + + t.Run("Drop NaN value", func(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(context.Background(), nil, true, "", rn, ms, sink, testLogger) + if _, got := tr.Add(goodLabels, time.Now().Unix()*1000, math.NaN()); got != nil { + t.Errorf("expecting error == nil from Add() but got: %v\n", got) + } + if got := tr.Commit(); got != nil { + t.Errorf("expecting nil from Commit() but got err %v", got) + } + if len(sink.AllMetrics()) != 0 { + t.Errorf("wanted nil, got %v\n", sink.AllMetrics()) + } + }) + +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver.go b/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver.go new file mode 100644 index 00000000000..3c9828b154e --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "context" + "time" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/scrape" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/prometheusreceiver/internal" +) + +// pReceiver is the type that provides Prometheus scraper/receiver functionality. +type pReceiver struct { + cfg *Config + consumer consumer.MetricsConsumer + cancelFunc context.CancelFunc + + logger *zap.Logger +} + +// New creates a new prometheus.Receiver reference. +func newPrometheusReceiver(logger *zap.Logger, cfg *Config, next consumer.MetricsConsumer) *pReceiver { + pr := &pReceiver{ + cfg: cfg, + consumer: next, + logger: logger, + } + return pr +} + +// Start is the method that starts Prometheus scraping and it +// is controlled by having previously defined a Configuration using perhaps New. +func (r *pReceiver) Start(ctx context.Context, host component.Host) error { + discoveryCtx, cancel := context.WithCancel(context.Background()) + r.cancelFunc = cancel + + logger := internal.NewZapToGokitLogAdapter(r.logger) + + discoveryManager := discovery.NewManager(discoveryCtx, logger) + discoveryCfg := make(map[string]discovery.Configs) + for _, scrapeConfig := range r.cfg.PrometheusConfig.ScrapeConfigs { + discoveryCfg[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs + } + if err := discoveryManager.ApplyConfig(discoveryCfg); err != nil { + return err + } + go func() { + if err := discoveryManager.Run(); err != nil { + r.logger.Error("Discovery manager failed", zap.Error(err)) + host.ReportFatalError(err) + } + }() + + var jobsMap *internal.JobsMap + if !r.cfg.UseStartTimeMetric { + jobsMap = internal.NewJobsMap(2 * time.Minute) + } + ocaStore := internal.NewOcaStore(ctx, r.consumer, r.logger, jobsMap, r.cfg.UseStartTimeMetric, r.cfg.StartTimeMetricRegex, r.cfg.Name()) + + scrapeManager := scrape.NewManager(logger, ocaStore) + ocaStore.SetScrapeManager(scrapeManager) + if err := scrapeManager.ApplyConfig(r.cfg.PrometheusConfig); err != nil { + return err + } + go func() { + if err := scrapeManager.Run(discoveryManager.SyncCh()); err != nil { + r.logger.Error("Scrape manager failed", zap.Error(err)) + host.ReportFatalError(err) + } + }() + return nil +} + +// Shutdown stops and cancels the underlying Prometheus scrapers. +func (r *pReceiver) Shutdown(context.Context) error { + r.cancelFunc() + return nil +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver_test.go b/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver_test.go new file mode 100644 index 00000000000..14bfff93c6e --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver_test.go @@ -0,0 +1,1183 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "context" + "fmt" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync" + "sync/atomic" + "testing" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "github.com/golang/protobuf/ptypes/wrappers" + promcfg "github.com/prometheus/prometheus/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" + "gopkg.in/yaml.v2" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/translator/internaldata" +) + +var logger = zap.NewNop() + +type mockPrometheusResponse struct { + code int + data string +} + +type mockPrometheus struct { + endpoints map[string][]mockPrometheusResponse + accessIndex map[string]*int32 + wg *sync.WaitGroup + srv *httptest.Server +} + +func newMockPrometheus(endpoints map[string][]mockPrometheusResponse) *mockPrometheus { + accessIndex := make(map[string]*int32) + wg := &sync.WaitGroup{} + wg.Add(len(endpoints)) + for k := range endpoints { + v := int32(0) + accessIndex[k] = &v + } + mp := &mockPrometheus{ + wg: wg, + accessIndex: accessIndex, + endpoints: endpoints, + } + srv := httptest.NewServer(mp) + mp.srv = srv + return mp +} + +func (mp *mockPrometheus) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + iptr, ok := mp.accessIndex[req.URL.Path] + if !ok { + rw.WriteHeader(404) + return + } + index := int(*iptr) + atomic.AddInt32(iptr, 1) + pages := mp.endpoints[req.URL.Path] + if index >= len(pages) { + if index == len(pages) { + mp.wg.Done() + } + rw.WriteHeader(404) + return + } + rw.WriteHeader(pages[index].code) + _, _ = rw.Write([]byte(pages[index].data)) +} + +func (mp *mockPrometheus) Close() { + mp.srv.Close() +} + +// ------------------------- +// EndToEnd Test and related +// ------------------------- + +var srvPlaceHolder = "__SERVER_ADDRESS__" + +type testData struct { + name string + pages []mockPrometheusResponse + node *commonpb.Node + resource *resourcepb.Resource + validateFunc func(t *testing.T, td *testData, result []consumerdata.MetricsData) +} + +// setupMockPrometheus to create a mocked prometheus based on targets, returning the server and a prometheus exporting +// config +func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, error) { + jobs := make([]map[string]interface{}, 0, len(tds)) + endpoints := make(map[string][]mockPrometheusResponse) + for _, t := range tds { + metricPath := fmt.Sprintf("/%s/metrics", t.name) + endpoints[metricPath] = t.pages + job := make(map[string]interface{}) + job["job_name"] = t.name + job["metrics_path"] = metricPath + job["scrape_interval"] = "1s" + job["static_configs"] = []map[string]interface{}{{"targets": []string{srvPlaceHolder}}} + jobs = append(jobs, job) + } + + if len(jobs) != len(tds) { + log.Fatal("len(jobs) != len(targets), make sure job names are unique") + } + config := make(map[string]interface{}) + config["scrape_configs"] = jobs + + mp := newMockPrometheus(endpoints) + cfg, err := yaml.Marshal(&config) + if err != nil { + return mp, nil, err + } + u, _ := url.Parse(mp.srv.URL) + host, port, _ := net.SplitHostPort(u.Host) + + // update node value (will use for validation) + for _, t := range tds { + t.node = &commonpb.Node{ + Identifier: &commonpb.ProcessIdentifier{ + HostName: host, + }, + ServiceInfo: &commonpb.ServiceInfo{ + Name: t.name, + }, + } + t.resource = &resourcepb.Resource{ + Labels: map[string]string{ + "scheme": "http", + "port": port, + }, + } + } + + cfgStr := strings.ReplaceAll(string(cfg), srvPlaceHolder, u.Host) + pCfg, err := promcfg.Load(cfgStr) + return mp, pCfg, err +} + +func verifyNumScrapeResults(t *testing.T, td *testData, mds []consumerdata.MetricsData) { + want := 0 + for _, p := range td.pages { + if p.code == 200 { + want++ + } + } + if l := len(mds); l != want { + t.Errorf("want %d, but got %d\n", want, l) + } +} + +func doCompare(name string, t *testing.T, want, got interface{}) { + t.Run(name, func(t *testing.T) { + assert.EqualValues(t, want, got) + }) +} + +// Test data and validation functions for EndToEnd test +// Make sure every page has a gauge, we are relying on it to figure out the starttime if needed + +// target1 has one gague, two counts of a same family, one histogram and one summary. We are expecting the first +// successful scrape will produce a metric with only the gauge metric, and the 2nd successful scrape will produce all +// the metrics, with the cumulative types using the firstpage's timestamp as starttime, and values are deltas with the +// one from the first page (summary quantiles will be as it is) +var target1Page1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 +http_requests_total{method="post",code="400"} 5 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 +http_request_duration_seconds_bucket{le="0.5"} 1500 +http_request_duration_seconds_bucket{le="1"} 2000 +http_request_duration_seconds_bucket{le="+Inf"} 2500 +http_request_duration_seconds_sum 5000 +http_request_duration_seconds_count 2500 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 5 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +` + +var target1Page2 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 18 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 199 +http_requests_total{method="post",code="400"} 12 + +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1100 +http_request_duration_seconds_bucket{le="0.5"} 1600 +http_request_duration_seconds_bucket{le="1"} 2100 +http_request_duration_seconds_bucket{le="+Inf"} 2600 +http_request_duration_seconds_sum 5050 +http_request_duration_seconds_count 2600 + +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 6 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5002 +rpc_duration_seconds_count 1001 +` + +func verifyTarget1(t *testing.T, td *testData, mds []consumerdata.MetricsData) { + verifyNumScrapeResults(t, td, mds) + m1 := mds[0] + // m1 shall only have a gauge + if l := len(m1.Metrics); l != 1 { + t.Errorf("want 1, but got %v\n", l) + } + + // only gauge value is returned from the first scrape + wantG1 := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Value: &metricspb.Point_DoubleValue{DoubleValue: 19.0}}, + }, + }, + }, + } + gotG1 := m1.Metrics[0] + // relying on the timestamps from gagues as startTimestamps + ts1 := gotG1.Timeseries[0].Points[0].Timestamp + // set this timestamp to wantG1 + wantG1.Timeseries[0].Points[0].Timestamp = ts1 + doCompare("scrape1", t, wantG1, gotG1) + + // verify the 2nd metricData + m2 := mds[1] + ts2 := m2.Metrics[0].Timeseries[0].Points[0].Timestamp + + want2 := &consumerdata.MetricsData{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE}, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 18.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Description: "The total number of HTTP requests.", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "code"}, {Key: "method"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "200", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 99.0}}, + }, + }, + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "400", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 7.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_request_duration_seconds", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + Description: "A histogram of the request duration.", + Unit: "s", + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + Points: []*metricspb.Point{ + { + Timestamp: ts2, + Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{0.05, 0.5, 1}, + }, + }, + }, + Count: 100, + Sum: 50.0, + Buckets: []*metricspb.DistributionValue_Bucket{ + {Count: 100}, + {Count: 0}, + {Count: 0}, + {Count: 0}, + }, + }}, + }, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "rpc_duration_seconds", + Type: metricspb.MetricDescriptor_SUMMARY, + Description: "A summary of the RPC duration in seconds.", + Unit: "s", + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + Points: []*metricspb.Point{ + { + Timestamp: ts2, + Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 2}, + Count: &wrappers.Int64Value{Value: 1}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + { + Percentile: 1, + Value: 1, + }, + { + Percentile: 90, + Value: 6, + }, + { + Percentile: 99, + Value: 8, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + doCompare("scrape2", t, want2, &m2) +} + +// target2 is going to have 5 pages, and there's a newly appeared item from the 2nd page. we are expecting the new +// metric will appears on the 3rd scrape, and it uses the timestamp from the 2nd page as starttime, and value is delta +// from the 2nd page. +// with the 4th page, we are simulating a reset (values smaller than previous), cumulative types shall be skipped at +// this run. with the 5th page, cumulative types will be delta with the 4th one +var target2Page1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 18 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 10 +http_requests_total{method="post",code="400"} 50 +` + +var target2Page2 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 50 +http_requests_total{method="post",code="400"} 60 +http_requests_total{method="post",code="500"} 3 +` + +var target2Page3 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 50 +http_requests_total{method="post",code="400"} 60 +http_requests_total{method="post",code="500"} 5 +` + +var target2Page4 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 49 +http_requests_total{method="post",code="400"} 59 +http_requests_total{method="post",code="500"} 3 +` + +var target2Page5 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 50 +http_requests_total{method="post",code="400"} 59 +http_requests_total{method="post",code="500"} 5 +` + +func verifyTarget2(t *testing.T, td *testData, mds []consumerdata.MetricsData) { + verifyNumScrapeResults(t, td, mds) + m1 := mds[0] + // m1 shall only have a gauge + if l := len(m1.Metrics); l != 1 { + t.Errorf("want 1, but got %v\n", l) + } + + // only gauge value is returned from the first scrape + wantG1 := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Value: &metricspb.Point_DoubleValue{DoubleValue: 18.0}}, + }, + }, + }, + } + gotG1 := m1.Metrics[0] + ts1 := gotG1.Timeseries[0].Points[0].Timestamp + // set this timestamp to wantG1 + wantG1.Timeseries[0].Points[0].Timestamp = ts1 + doCompare("scrape1", t, wantG1, gotG1) + + // verify the 2nd metricData + m2 := mds[1] + ts2 := m2.Metrics[0].Timeseries[0].Points[0].Timestamp + + want2 := &consumerdata.MetricsData{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 16.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Description: "The total number of HTTP requests.", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "code"}, {Key: "method"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "200", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 40.0}}, + }, + }, + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "400", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 10.0}}, + }, + }, + }, + }, + }, + } + doCompare("scrape2", t, want2, &m2) + + // verify the 3rd metricData, with the new code=500 counter which first appeared on 2nd run + m3 := mds[2] + // its start timestamp shall be from the 2nd run + ts3 := m3.Metrics[0].Timeseries[0].Points[0].Timestamp + + want3 := &consumerdata.MetricsData{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 16.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Description: "The total number of HTTP requests.", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "code"}, {Key: "method"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "200", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 40.0}}, + }, + }, + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "400", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 10.0}}, + }, + }, + { + StartTimestamp: ts2, + LabelValues: []*metricspb.LabelValue{ + {Value: "500", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 2.0}}, + }, + }, + }, + }, + }, + } + doCompare("scrape3", t, want3, &m3) + + // verify the 4th metricData which reset happens, all cumulative types shall be absent + m4 := mds[3] + ts4 := m4.Metrics[0].Timeseries[0].Points[0].Timestamp + + want4 := &consumerdata.MetricsData{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts4, Value: &metricspb.Point_DoubleValue{DoubleValue: 16.0}}, + }, + }, + }, + }, + }, + } + doCompare("scrape4", t, want4, &m4) + + // verify the 4th metricData which reset happens, all cumulative types shall be absent + m5 := mds[4] + // its start timestamp shall be from the 3rd run + ts5 := m5.Metrics[0].Timeseries[0].Points[0].Timestamp + + want5 := &consumerdata.MetricsData{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 16.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Description: "The total number of HTTP requests.", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "code"}, {Key: "method"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts4, + LabelValues: []*metricspb.LabelValue{ + {Value: "200", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 1.0}}, + }, + }, + { + StartTimestamp: ts4, + LabelValues: []*metricspb.LabelValue{ + {Value: "400", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 0.0}}, + }, + }, + { + StartTimestamp: ts4, + LabelValues: []*metricspb.LabelValue{ + {Value: "500", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 2.0}}, + }, + }, + }, + }, + }, + } + doCompare("scrape5", t, want5, &m5) +} + +// target3 for complicated data types, including summaries and histograms. one of the summary and histogram have only +// sum/count, for the summary it's valid, however the histogram one is not, but it shall not cause the scrape to fail +var target3Page1 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 18 + +# A histogram, which has a pretty complex representation in the text format: +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.2"} 10000 +http_request_duration_seconds_bucket{le="0.5"} 11000 +http_request_duration_seconds_bucket{le="1"} 12001 +http_request_duration_seconds_bucket{le="+Inf"} 13003 +http_request_duration_seconds_sum 50000 +http_request_duration_seconds_count 13003 + +# A corrupted histogram with only sum and count +# HELP corrupted_hist A corrupted_hist. +# TYPE corrupted_hist histogram +corrupted_hist_sum 100 +corrupted_hist_count 10 + +# Finally a summary, which has a complex representation, too: +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{foo="bar" quantile="0.01"} 31 +rpc_duration_seconds{foo="bar" quantile="0.05"} 35 +rpc_duration_seconds{foo="bar" quantile="0.5"} 47 +rpc_duration_seconds{foo="bar" quantile="0.9"} 70 +rpc_duration_seconds{foo="bar" quantile="0.99"} 76 +rpc_duration_seconds_sum{foo="bar"} 8000 +rpc_duration_seconds_count{foo="bar"} 900 +rpc_duration_seconds_sum{foo="no_quantile"} 100 +rpc_duration_seconds_count{foo="no_quantile"} 50 +` + +var target3Page2 = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 16 + +# A histogram, which has a pretty complex representation in the text format: +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.2"} 11000 +http_request_duration_seconds_bucket{le="0.5"} 12000 +http_request_duration_seconds_bucket{le="1"} 13001 +http_request_duration_seconds_bucket{le="+Inf"} 14003 +http_request_duration_seconds_sum 50100 +http_request_duration_seconds_count 14003 + +# A corrupted histogram with only sum and count +# HELP corrupted_hist A corrupted_hist. +# TYPE corrupted_hist histogram +corrupted_hist_sum 101 +corrupted_hist_count 15 + +# Finally a summary, which has a complex representation, too: +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{foo="bar" quantile="0.01"} 32 +rpc_duration_seconds{foo="bar" quantile="0.05"} 35 +rpc_duration_seconds{foo="bar" quantile="0.5"} 47 +rpc_duration_seconds{foo="bar" quantile="0.9"} 70 +rpc_duration_seconds{foo="bar" quantile="0.99"} 77 +rpc_duration_seconds_sum{foo="bar"} 8100 +rpc_duration_seconds_count{foo="bar"} 950 +rpc_duration_seconds_sum{foo="no_quantile"} 101 +rpc_duration_seconds_count{foo="no_quantile"} 55 +` + +func verifyTarget3(t *testing.T, td *testData, mds []consumerdata.MetricsData) { + verifyNumScrapeResults(t, td, mds) + m1 := mds[0] + // m1 shall only have a gauge + if l := len(m1.Metrics); l != 1 { + t.Errorf("want 1, but got %v\n", l) + } + + // only gauge value is returned from the first scrape + wantG1 := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE}, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Value: &metricspb.Point_DoubleValue{DoubleValue: 18.0}}, + }, + }, + }, + } + gotG1 := m1.Metrics[0] + ts1 := gotG1.Timeseries[0].Points[0].Timestamp + // set this timestamp to wantG1 + wantG1.Timeseries[0].Points[0].Timestamp = ts1 + doCompare("scrape1", t, wantG1, gotG1) + + // verify the 2nd metricData + m2 := mds[1] + ts2 := m2.Metrics[0].Timeseries[0].Points[0].Timestamp + + want2 := &consumerdata.MetricsData{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 16.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_request_duration_seconds", + Description: "A histogram of the request duration.", + Unit: "s", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + Points: []*metricspb.Point{ + { + Timestamp: ts2, + Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{0.2, 0.5, 1}, + }, + }, + }, + Count: 1000, + Sum: 100, + Buckets: []*metricspb.DistributionValue_Bucket{ + {Count: 1000}, + {Count: 0}, + {Count: 0}, + {Count: 0}, + }, + }, + }, + }, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "rpc_duration_seconds", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}, + Description: "A summary of the RPC duration in seconds.", + Unit: "s", + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + { + Timestamp: ts2, + Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 100}, + Count: &wrappers.Int64Value{Value: 50}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + { + Percentile: 1, + Value: 32, + }, + { + Percentile: 5, + Value: 35, + }, + { + Percentile: 50, + Value: 47, + }, + { + Percentile: 90, + Value: 70, + }, + { + Percentile: 99, + Value: 77, + }, + }, + }, + }, + }, + }, + }, + }, + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{{Value: "no_quantile", HasValue: true}}, + Points: []*metricspb.Point{ + { + Timestamp: ts2, + Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 1}, + Count: &wrappers.Int64Value{Value: 5}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: nil, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + doCompare("scrape2", t, want2, &m2) +} + +// TestEndToEnd end to end test executor +func TestEndToEnd(t *testing.T) { + // 1. setup input data + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: target1Page1}, + {code: 500, data: ""}, + {code: 200, data: target1Page2}, + }, + validateFunc: verifyTarget1, + }, + { + name: "target2", + pages: []mockPrometheusResponse{ + {code: 200, data: target2Page1}, + {code: 200, data: target2Page2}, + {code: 500, data: ""}, + {code: 200, data: target2Page3}, + {code: 200, data: target2Page4}, + {code: 500, data: ""}, + {code: 200, data: target2Page5}, + }, + validateFunc: verifyTarget2, + }, + { + name: "target3", + pages: []mockPrometheusResponse{ + {code: 200, data: target3Page1}, + {code: 200, data: target3Page2}, + }, + validateFunc: verifyTarget3, + }, + } + + testEndToEnd(t, targets, false) +} + +var startTimeMetricPage = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19 +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 +http_requests_total{method="post",code="400"} 5 +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 +http_request_duration_seconds_bucket{le="0.5"} 1500 +http_request_duration_seconds_bucket{le="1"} 2000 +http_request_duration_seconds_bucket{le="+Inf"} 2500 +http_request_duration_seconds_sum 5000 +http_request_duration_seconds_count 2500 +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 5 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 400.8 +` + +var startTimeMetricPageStartTimestamp = ×tamppb.Timestamp{Seconds: 400, Nanos: 800000000} + +const numStartTimeMetricPageTimeseries = 6 + +func verifyStartTimeMetricPage(t *testing.T, _ *testData, mds []consumerdata.MetricsData) { + numTimeseries := 0 + for _, cmd := range mds { + for _, metric := range cmd.Metrics { + timestamp := startTimeMetricPageStartTimestamp + switch metric.GetMetricDescriptor().GetType() { + case metricspb.MetricDescriptor_GAUGE_DOUBLE, metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: + timestamp = nil + } + for _, ts := range metric.GetTimeseries() { + assert.Equal(t, timestamp, ts.GetStartTimestamp()) + numTimeseries++ + } + } + } + assert.Equal(t, numStartTimeMetricPageTimeseries, numTimeseries) +} + +// TestStartTimeMetric validates that timeseries have start time set to 'process_start_time_seconds' +func TestStartTimeMetric(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: startTimeMetricPage}, + }, + validateFunc: verifyStartTimeMetricPage, + }, + } + testEndToEnd(t, targets, true) +} + +func testEndToEnd(t *testing.T, targets []*testData, useStartTimeMetric bool) { + // 1. setup mock server + mp, cfg, err := setupMockPrometheus(targets...) + require.Nilf(t, err, "Failed to create Promtheus config: %v", err) + defer mp.Close() + + cms := new(consumertest.MetricsSink) + rcvr := newPrometheusReceiver(logger, &Config{PrometheusConfig: cfg, UseStartTimeMetric: useStartTimeMetric}, cms) + + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost()), "Failed to invoke Start: %v", err) + defer rcvr.Shutdown(context.Background()) + + // wait for all provided data to be scraped + mp.wg.Wait() + metrics := cms.AllMetrics() + + // split and store results by target name + results := make(map[string][]consumerdata.MetricsData) + for _, m := range metrics { + ocmds := internaldata.MetricsToOC(m) + for _, ocmd := range ocmds { + result, ok := results[ocmd.Node.ServiceInfo.Name] + if !ok { + result = make([]consumerdata.MetricsData, 0) + } + results[ocmd.Node.ServiceInfo.Name] = append(result, ocmd) + } + } + + lres, lep := len(results), len(mp.endpoints) + assert.Equalf(t, lep, lres, "want %d targets, but got %v\n", lep, lres) + + // loop to validate outputs for each targets + for _, target := range targets { + target.validateFunc(t, target, results[target.name]) + } +} + +var startTimeMetricRegexPage = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19 +# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{method="post",code="200"} 100 +http_requests_total{method="post",code="400"} 5 +# HELP http_request_duration_seconds A histogram of the request duration. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 1000 +http_request_duration_seconds_bucket{le="0.5"} 1500 +http_request_duration_seconds_bucket{le="1"} 2000 +http_request_duration_seconds_bucket{le="+Inf"} 2500 +http_request_duration_seconds_sum 5000 +http_request_duration_seconds_count 2500 +# HELP rpc_duration_seconds A summary of the RPC duration in seconds. +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 1 +rpc_duration_seconds{quantile="0.9"} 5 +rpc_duration_seconds{quantile="0.99"} 8 +rpc_duration_seconds_sum 5000 +rpc_duration_seconds_count 1000 +# HELP example_process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE example_process_start_time_seconds gauge +example_process_start_time_seconds 400.8 +` + +// TestStartTimeMetricRegex validates that timeseries have start time regex set to 'process_start_time_seconds' +func TestStartTimeMetricRegex(t *testing.T) { + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: startTimeMetricRegexPage}, + }, + validateFunc: verifyStartTimeMetricPage, + }, + { + name: "target2", + pages: []mockPrometheusResponse{ + {code: 200, data: startTimeMetricPage}, + }, + validateFunc: verifyStartTimeMetricPage, + }, + } + testEndToEndRegex(t, targets, true, "^(.+_)*process_start_time_seconds$") +} + +func testEndToEndRegex(t *testing.T, targets []*testData, useStartTimeMetric bool, startTimeMetricRegex string) { + // 1. setup mock server + mp, cfg, err := setupMockPrometheus(targets...) + require.Nilf(t, err, "Failed to create Promtheus config: %v", err) + defer mp.Close() + + cms := new(consumertest.MetricsSink) + rcvr := newPrometheusReceiver(logger, &Config{PrometheusConfig: cfg, UseStartTimeMetric: useStartTimeMetric, StartTimeMetricRegex: startTimeMetricRegex}, cms) + + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost()), "Failed to invoke Start: %v", err) + defer rcvr.Shutdown(context.Background()) + + // wait for all provided data to be scraped + mp.wg.Wait() + metrics := cms.AllMetrics() + + // split and store results by target name + results := make(map[string][]consumerdata.MetricsData) + for _, m := range metrics { + ocmds := internaldata.MetricsToOC(m) + for _, ocmd := range ocmds { + result, ok := results[ocmd.Node.ServiceInfo.Name] + if !ok { + result = make([]consumerdata.MetricsData, 0) + } + results[ocmd.Node.ServiceInfo.Name] = append(result, ocmd) + } + } + + lres, lep := len(results), len(mp.endpoints) + assert.Equalf(t, lep, lres, "want %d targets, but got %v\n", lep, lres) + + // loop to validate outputs for each targets + for _, target := range targets { + target.validateFunc(t, target, results[target.name]) + } +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/scrapeloop-flowchart.png b/internal/otel_collector/receiver/prometheusreceiver/scrapeloop-flowchart.png new file mode 100644 index 00000000000..5853a9df927 Binary files /dev/null and b/internal/otel_collector/receiver/prometheusreceiver/scrapeloop-flowchart.png differ diff --git a/internal/otel_collector/receiver/prometheusreceiver/testdata/config.yaml b/internal/otel_collector/receiver/prometheusreceiver/testdata/config.yaml new file mode 100644 index 00000000000..32513a9ad79 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/testdata/config.yaml @@ -0,0 +1,24 @@ +receivers: + prometheus: + prometheus/customname: + buffer_period: 234 + buffer_count: 45 + use_start_time_metric: true + start_time_metric_regex: '^(.+_)*process_start_time_seconds$' + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [prometheus] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/prometheusreceiver/testdata/config_env.yaml b/internal/otel_collector/receiver/prometheusreceiver/testdata/config_env.yaml new file mode 100644 index 00000000000..0ca8640781f --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/testdata/config_env.yaml @@ -0,0 +1,19 @@ +receivers: + prometheus: + config: + scrape_configs: + - job_name: ${JOBNAME} + scrape_interval: 5s + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [prometheus] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/prometheusreceiver/testdata/config_k8s.yaml b/internal/otel_collector/receiver/prometheusreceiver/testdata/config_k8s.yaml new file mode 100644 index 00000000000..c73ed6eee55 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/testdata/config_k8s.yaml @@ -0,0 +1,42 @@ +receivers: + prometheus: + config: + scrape_configs: + - job_name: apps + kubernetes_sd_configs: + - role: pod + selectors: + - role: pod + # only scrape data from pods running on the same node as collector + field: "spec.nodeName=$NODE_NAME" + relabel_configs: + # scrape pods annotated with "prometheus.io/scrape: true" + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + regex: "true" + action: keep + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + # escaped $1:$2 + replacement: $$1:$$2 + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [prometheus] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/prometheusreceiver/testdata/config_sd.yaml b/internal/otel_collector/receiver/prometheusreceiver/testdata/config_sd.yaml new file mode 100644 index 00000000000..1179b1b6122 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/testdata/config_sd.yaml @@ -0,0 +1,85 @@ +receivers: + prometheus: + config: + scrape_configs: + - job_name: file + file_sd_configs: + - files: + - 'dummy.json' + - job_name: k8s + kubernetes_sd_configs: + - role: node + - job_name: ec2 + ec2_sd_configs: + - region: us-west-2 + - job_name: gce + gce_sd_configs: + - project: my-project + zone: my-zone + - job_name: dns + dns_sd_configs: + - names: + - name1 + - job_name: openstack + openstack_sd_configs: + - role: hypervisor + region: region + - job_name: hetzner + hetzner_sd_configs: + - role: robot + - job_name: marathon + marathon_sd_configs: + - servers: + - server1 + - job_name: nerve + nerve_sd_configs: + - servers: + - server1 + paths: + - /path1 + - job_name: serverset + serverset_sd_configs: + - servers: + - server1 + paths: + - /path1 + - job_name: triton + triton_sd_configs: + - account: account + dns_suffix: suffix + endpoint: endpoint + - job_name: eureka + eureka_sd_configs: + - server: http://server1 + - job_name: azure + azure_sd_configs: + - subscription_id: subscription + tenant_id: tenant + client_id: client + client_secret: secret + - job_name: consul + consul_sd_configs: + - server: server1 + - job_name: digitalocean + digitalocean_sd_configs: + - basic_auth: + username: username + password: password + - job_name: dockerswarm_sd_config + dockerswarm_sd_configs: + - host: host + role: nodes + + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [prometheus] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml b/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml new file mode 100644 index 00000000000..3f8df1acedd --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml @@ -0,0 +1,20 @@ +receivers: + prometheus: + config: + use_start_time_metric: true + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [prometheus] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-section.yaml b/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-section.yaml new file mode 100644 index 00000000000..9dfe6cebab3 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-section.yaml @@ -0,0 +1,20 @@ +receivers: + prometheus: + unknow_section: 1 + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [prometheus] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/receiver/receiverhelper/factory.go b/internal/otel_collector/receiver/receiverhelper/factory.go new file mode 100644 index 00000000000..206b2777275 --- /dev/null +++ b/internal/otel_collector/receiver/receiverhelper/factory.go @@ -0,0 +1,157 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package receiverhelper + +import ( + "context" + + "github.com/spf13/viper" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +// FactoryOption apply changes to ReceiverOptions. +type FactoryOption func(o *factory) + +// WithCustomUnmarshaler implements component.ConfigUnmarshaler. +func WithCustomUnmarshaler(customUnmarshaler component.CustomUnmarshaler) FactoryOption { + return func(o *factory) { + o.customUnmarshaler = customUnmarshaler + } +} + +// WithTraces overrides the default "error not supported" implementation for CreateTraceReceiver. +func WithTraces(createTraceReceiver CreateTraceReceiver) FactoryOption { + return func(o *factory) { + o.createTraceReceiver = createTraceReceiver + } +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetricsReceiver. +func WithMetrics(createMetricsReceiver CreateMetricsReceiver) FactoryOption { + return func(o *factory) { + o.createMetricsReceiver = createMetricsReceiver + } +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogsReceiver. +func WithLogs(createLogsReceiver CreateLogsReceiver) FactoryOption { + return func(o *factory) { + o.createLogsReceiver = createLogsReceiver + } +} + +// CreateDefaultConfig is the equivalent of component.ReceiverFactory.CreateDefaultConfig() +type CreateDefaultConfig func() configmodels.Receiver + +// CreateTraceReceiver is the equivalent of component.ReceiverFactory.CreateTracesReceiver() +type CreateTraceReceiver func(context.Context, component.ReceiverCreateParams, configmodels.Receiver, consumer.TracesConsumer) (component.TracesReceiver, error) + +// CreateMetricsReceiver is the equivalent of component.ReceiverFactory.CreateMetricsReceiver() +type CreateMetricsReceiver func(context.Context, component.ReceiverCreateParams, configmodels.Receiver, consumer.MetricsConsumer) (component.MetricsReceiver, error) + +// CreateLogsReceiver is the equivalent of component.ReceiverFactory.CreateLogsReceiver() +type CreateLogsReceiver func(context.Context, component.ReceiverCreateParams, configmodels.Receiver, consumer.LogsConsumer) (component.LogsReceiver, error) + +type factory struct { + cfgType configmodels.Type + customUnmarshaler component.CustomUnmarshaler + createDefaultConfig CreateDefaultConfig + createTraceReceiver CreateTraceReceiver + createMetricsReceiver CreateMetricsReceiver + createLogsReceiver CreateLogsReceiver +} + +// NewFactory returns a component.ReceiverFactory. +func NewFactory( + cfgType configmodels.Type, + createDefaultConfig CreateDefaultConfig, + options ...FactoryOption) component.ReceiverFactory { + f := &factory{ + cfgType: cfgType, + createDefaultConfig: createDefaultConfig, + } + for _, opt := range options { + opt(f) + } + var ret component.ReceiverFactory + if f.customUnmarshaler != nil { + ret = &factoryWithUnmarshaler{f} + } else { + ret = f + } + return ret +} + +// Type gets the type of the Receiver config created by this factory. +func (f *factory) Type() configmodels.Type { + return f.cfgType +} + +// CreateDefaultConfig creates the default configuration for receiver. +func (f *factory) CreateDefaultConfig() configmodels.Receiver { + return f.createDefaultConfig() +} + +// CreateTraceReceiver creates a component.TracesReceiver based on this config. +func (f *factory) CreateTracesReceiver( + ctx context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.TracesConsumer) (component.TracesReceiver, error) { + if f.createTraceReceiver != nil { + return f.createTraceReceiver(ctx, params, cfg, nextConsumer) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateMetricsReceiver creates a consumer.MetricsConsumer based on this config. +func (f *factory) CreateMetricsReceiver( + ctx context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.MetricsConsumer) (component.MetricsReceiver, error) { + if f.createMetricsReceiver != nil { + return f.createMetricsReceiver(ctx, params, cfg, nextConsumer) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +// CreateLogsReceiver creates a metrics processor based on this config. +func (f *factory) CreateLogsReceiver( + ctx context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.LogsConsumer, +) (component.LogsReceiver, error) { + if f.createLogsReceiver != nil { + return f.createLogsReceiver(ctx, params, cfg, nextConsumer) + } + return nil, configerror.ErrDataTypeIsNotSupported +} + +var _ component.ConfigUnmarshaler = (*factoryWithUnmarshaler)(nil) + +type factoryWithUnmarshaler struct { + *factory +} + +// Unmarshal un-marshals the config using the provided custom unmarshaler. +func (f *factoryWithUnmarshaler) Unmarshal(componentViperSection *viper.Viper, intoCfg interface{}) error { + return f.customUnmarshaler(componentViperSection, intoCfg) +} diff --git a/internal/otel_collector/receiver/receiverhelper/factory_test.go b/internal/otel_collector/receiver/receiverhelper/factory_test.go new file mode 100644 index 00000000000..a215cd6d060 --- /dev/null +++ b/internal/otel_collector/receiver/receiverhelper/factory_test.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package receiverhelper + +import ( + "context" + "errors" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" +) + +const typeStr = "test" + +var defaultCfg = &configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, +} + +func TestNewFactory(t *testing.T) { + factory := NewFactory( + typeStr, + defaultConfig) + assert.EqualValues(t, typeStr, factory.Type()) + assert.EqualValues(t, defaultCfg, factory.CreateDefaultConfig()) + _, ok := factory.(component.ConfigUnmarshaler) + assert.False(t, ok) + _, err := factory.CreateTracesReceiver(context.Background(), component.ReceiverCreateParams{}, defaultCfg, nil) + assert.Error(t, err) + _, err = factory.CreateMetricsReceiver(context.Background(), component.ReceiverCreateParams{}, defaultCfg, nil) + assert.Error(t, err) + _, err = factory.CreateLogsReceiver(context.Background(), component.ReceiverCreateParams{}, defaultCfg, nil) + assert.Error(t, err) +} + +func TestNewFactory_WithConstructors(t *testing.T) { + factory := NewFactory( + typeStr, + defaultConfig, + WithTraces(createTraceReceiver), + WithMetrics(createMetricsReceiver), + WithLogs(createLogsReceiver), + WithCustomUnmarshaler(customUnmarshaler)) + assert.EqualValues(t, typeStr, factory.Type()) + assert.EqualValues(t, defaultCfg, factory.CreateDefaultConfig()) + + fu, ok := factory.(component.ConfigUnmarshaler) + assert.True(t, ok) + assert.Equal(t, errors.New("my error"), fu.Unmarshal(nil, nil)) + + _, err := factory.CreateTracesReceiver(context.Background(), component.ReceiverCreateParams{}, defaultCfg, nil) + assert.NoError(t, err) + + _, err = factory.CreateMetricsReceiver(context.Background(), component.ReceiverCreateParams{}, defaultCfg, nil) + assert.NoError(t, err) + + _, err = factory.CreateLogsReceiver(context.Background(), component.ReceiverCreateParams{}, defaultCfg, nil) + assert.NoError(t, err) +} + +func defaultConfig() configmodels.Receiver { + return defaultCfg +} + +func createTraceReceiver(context.Context, component.ReceiverCreateParams, configmodels.Receiver, consumer.TracesConsumer) (component.TracesReceiver, error) { + return nil, nil +} + +func createMetricsReceiver(context.Context, component.ReceiverCreateParams, configmodels.Receiver, consumer.MetricsConsumer) (component.MetricsReceiver, error) { + return nil, nil +} + +func createLogsReceiver(context.Context, component.ReceiverCreateParams, configmodels.Receiver, consumer.LogsConsumer) (component.LogsReceiver, error) { + return nil, nil +} + +func customUnmarshaler(*viper.Viper, interface{}) error { + return errors.New("my error") +} diff --git a/internal/otel_collector/receiver/scraperhelper/errors.go b/internal/otel_collector/receiver/scraperhelper/errors.go new file mode 100644 index 00000000000..6134aebd17f --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/errors.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scraperhelper + +import ( + "errors" + "fmt" + "strings" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +// CombineScrapeErrors converts a list of errors into one error. +func CombineScrapeErrors(errs []error) error { + partialScrapeErr := false + for _, err := range errs { + var partialError consumererror.PartialScrapeError + if errors.As(err, &partialError) { + partialScrapeErr = true + break + } + } + + if !partialScrapeErr { + return componenterror.CombineErrors(errs) + } + + errMsgs := make([]string, 0, len(errs)) + failedScrapeCount := 0 + for _, err := range errs { + if partialError, isPartial := err.(consumererror.PartialScrapeError); isPartial { + failedScrapeCount += partialError.Failed + } + + errMsgs = append(errMsgs, err.Error()) + } + + var err error + if len(errs) == 1 { + err = errs[0] + } else { + err = fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) + } + + return consumererror.NewPartialScrapeError(err, failedScrapeCount) +} diff --git a/internal/otel_collector/receiver/scraperhelper/errors_test.go b/internal/otel_collector/receiver/scraperhelper/errors_test.go new file mode 100644 index 00000000000..e5def5a7c44 --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/errors_test.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scraperhelper + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/consumererror" +) + +func TestCombineScrapeErrors(t *testing.T) { + testCases := []struct { + errors []error + expected string + expectNil bool + expectedPartialScrapeErr bool + expectedFailedScrapeCount int + }{ + { + errors: []error{}, + expectNil: true, + }, + { + errors: []error{ + fmt.Errorf("foo"), + }, + expected: "foo", + }, + { + errors: []error{ + fmt.Errorf("foo"), + fmt.Errorf("bar"), + }, + expected: "[foo; bar]", + }, + { + errors: []error{ + fmt.Errorf("foo"), + fmt.Errorf("bar"), + consumererror.NewPartialScrapeError(fmt.Errorf("partial"), 0)}, + expected: "[foo; bar; partial]", + expectedPartialScrapeErr: true, + expectedFailedScrapeCount: 0, + }, + { + errors: []error{ + fmt.Errorf("foo"), + fmt.Errorf("bar"), + consumererror.NewPartialScrapeError(fmt.Errorf("partial 1"), 2), + consumererror.NewPartialScrapeError(fmt.Errorf("partial 2"), 3)}, + expected: "[foo; bar; partial 1; partial 2]", + expectedPartialScrapeErr: true, + expectedFailedScrapeCount: 5, + }, + } + + for _, tc := range testCases { + got := CombineScrapeErrors(tc.errors) + + if tc.expectNil { + assert.NoError(t, got, tc.expected) + } else { + assert.EqualError(t, got, tc.expected) + } + + partialErr, isPartial := got.(consumererror.PartialScrapeError) + assert.Equal(t, tc.expectedPartialScrapeErr, isPartial) + + if tc.expectedPartialScrapeErr && isPartial { + assert.Equal(t, tc.expectedFailedScrapeCount, partialErr.Failed) + } + } +} diff --git a/internal/otel_collector/receiver/scraperhelper/scraper.go b/internal/otel_collector/receiver/scraperhelper/scraper.go new file mode 100644 index 00000000000..06e0aa2b52d --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/scraper.go @@ -0,0 +1,168 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scraperhelper + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +// Scrape metrics. +type ScrapeMetrics func(context.Context) (pdata.MetricSlice, error) + +// Scrape resource metrics. +type ScrapeResourceMetrics func(context.Context) (pdata.ResourceMetricsSlice, error) + +// ScraperOption apply changes to internal options. +type ScraperOption func(*componenthelper.ComponentSettings) + +type BaseScraper interface { + component.Component + + // Name returns the scraper name + Name() string +} + +// MetricsScraper is an interface for scrapers that scrape metrics. +type MetricsScraper interface { + BaseScraper + Scrape(context.Context, string) (pdata.MetricSlice, error) +} + +// ResourceMetricsScraper is an interface for scrapers that scrape resource metrics. +type ResourceMetricsScraper interface { + BaseScraper + Scrape(context.Context, string) (pdata.ResourceMetricsSlice, error) +} + +var _ BaseScraper = (*baseScraper)(nil) + +type baseScraper struct { + component.Component + name string +} + +func (b baseScraper) Name() string { + return b.name +} + +// WithStart sets the function that will be called on startup. +func WithStart(start componenthelper.Start) ScraperOption { + return func(s *componenthelper.ComponentSettings) { + s.Start = start + } +} + +// WithShutdown sets the function that will be called on shutdown. +func WithShutdown(shutdown componenthelper.Shutdown) ScraperOption { + return func(s *componenthelper.ComponentSettings) { + s.Shutdown = shutdown + } +} + +type metricsScraper struct { + baseScraper + ScrapeMetrics +} + +var _ MetricsScraper = (*metricsScraper)(nil) + +// NewMetricsScraper creates a Scraper that calls Scrape at the specified +// collection interval, reports observability information, and passes the +// scraped metrics to the next consumer. +func NewMetricsScraper( + name string, + scrape ScrapeMetrics, + options ...ScraperOption, +) MetricsScraper { + set := componenthelper.DefaultComponentSettings() + for _, op := range options { + op(set) + } + + ms := &metricsScraper{ + baseScraper: baseScraper{ + Component: componenthelper.NewComponent(set), + name: name, + }, + ScrapeMetrics: scrape, + } + + return ms +} + +func (ms metricsScraper) Scrape(ctx context.Context, receiverName string) (pdata.MetricSlice, error) { + ctx = obsreport.ScraperContext(ctx, receiverName, ms.Name()) + ctx = obsreport.StartMetricsScrapeOp(ctx, receiverName, ms.Name()) + metrics, err := ms.ScrapeMetrics(ctx) + obsreport.EndMetricsScrapeOp(ctx, metrics.Len(), err) + return metrics, err +} + +type resourceMetricsScraper struct { + baseScraper + ScrapeResourceMetrics +} + +var _ ResourceMetricsScraper = (*resourceMetricsScraper)(nil) + +// NewResourceMetricsScraper creates a Scraper that calls Scrape at the +// specified collection interval, reports observability information, and +// passes the scraped resource metrics to the next consumer. +func NewResourceMetricsScraper( + name string, + scrape ScrapeResourceMetrics, + options ...ScraperOption, +) ResourceMetricsScraper { + set := componenthelper.DefaultComponentSettings() + for _, op := range options { + op(set) + } + + rms := &resourceMetricsScraper{ + baseScraper: baseScraper{ + Component: componenthelper.NewComponent(set), + name: name, + }, + ScrapeResourceMetrics: scrape, + } + + return rms +} + +func (rms resourceMetricsScraper) Scrape(ctx context.Context, receiverName string) (pdata.ResourceMetricsSlice, error) { + ctx = obsreport.ScraperContext(ctx, receiverName, rms.Name()) + ctx = obsreport.StartMetricsScrapeOp(ctx, receiverName, rms.Name()) + resourceMetrics, err := rms.ScrapeResourceMetrics(ctx) + obsreport.EndMetricsScrapeOp(ctx, metricCount(resourceMetrics), err) + return resourceMetrics, err +} + +func metricCount(resourceMetrics pdata.ResourceMetricsSlice) int { + count := 0 + + for i := 0; i < resourceMetrics.Len(); i++ { + ilm := resourceMetrics.At(i).InstrumentationLibraryMetrics() + for j := 0; j < ilm.Len(); j++ { + count += ilm.At(j).Metrics().Len() + } + } + + return count +} diff --git a/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go b/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go new file mode 100644 index 00000000000..e829f6ca204 --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go @@ -0,0 +1,276 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scraperhelper + +import ( + "context" + "errors" + "time" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +// ScraperControllerSettings defines common settings for a scraper controller +// configuration. Scraper controller receivers can embed this struct, instead +// of configmodels.ReceiverSettings, and extend it with more fields if needed. +type ScraperControllerSettings struct { + configmodels.ReceiverSettings `mapstructure:"squash"` + CollectionInterval time.Duration `mapstructure:"collection_interval"` +} + +// DefaultScraperControllerSettings returns default scraper controller +// settings with a collection interval of one minute. +func DefaultScraperControllerSettings(cfgType configmodels.Type) ScraperControllerSettings { + return ScraperControllerSettings{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: string(cfgType), + TypeVal: cfgType, + }, + CollectionInterval: time.Minute, + } +} + +// ScraperControllerOption apply changes to internal options. +type ScraperControllerOption func(*controller) + +// AddMetricsScraper configures the provided scrape function to be called +// with the specified options, and at the specified collection interval. +// +// Observability information will be reported, and the scraped metrics +// will be passed to the next consumer. +func AddMetricsScraper(scraper MetricsScraper) ScraperControllerOption { + return func(o *controller) { + o.metricsScrapers.scrapers = append(o.metricsScrapers.scrapers, scraper) + } +} + +// AddResourceMetricsScraper configures the provided scrape function to +// be called with the specified options, and at the specified collection +// interval. +// +// Observability information will be reported, and the scraped resource +// metrics will be passed to the next consumer. +func AddResourceMetricsScraper(scraper ResourceMetricsScraper) ScraperControllerOption { + return func(o *controller) { + o.resourceMetricScrapers = append(o.resourceMetricScrapers, scraper) + } +} + +// WithTickerChannel allows you to override the scraper controllers ticker +// channel to specify when scrape is called. This is only expected to be +// used by tests. +func WithTickerChannel(tickerCh <-chan time.Time) ScraperControllerOption { + return func(o *controller) { + o.tickerCh = tickerCh + } +} + +type controller struct { + name string + logger *zap.Logger + collectionInterval time.Duration + nextConsumer consumer.MetricsConsumer + + metricsScrapers *multiMetricScraper + resourceMetricScrapers []ResourceMetricsScraper + + tickerCh <-chan time.Time + + initialized bool + done chan struct{} + terminated chan struct{} +} + +// NewScraperControllerReceiver creates a Receiver with the configured options, that can control multiple scrapers. +func NewScraperControllerReceiver( + cfg *ScraperControllerSettings, + logger *zap.Logger, + nextConsumer consumer.MetricsConsumer, + options ...ScraperControllerOption, +) (component.Receiver, error) { + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + if cfg.CollectionInterval <= 0 { + return nil, errors.New("collection_interval must be a positive duration") + } + + sc := &controller{ + name: cfg.Name(), + logger: logger, + collectionInterval: cfg.CollectionInterval, + nextConsumer: nextConsumer, + metricsScrapers: &multiMetricScraper{}, + done: make(chan struct{}), + terminated: make(chan struct{}), + } + + for _, op := range options { + op(sc) + } + + if len(sc.metricsScrapers.scrapers) > 0 { + sc.resourceMetricScrapers = append(sc.resourceMetricScrapers, sc.metricsScrapers) + } + + return sc, nil +} + +// Start the receiver, invoked during service start. +func (sc *controller) Start(ctx context.Context, host component.Host) error { + for _, scraper := range sc.resourceMetricScrapers { + if err := scraper.Start(ctx, host); err != nil { + return err + } + } + + sc.initialized = true + sc.startScraping() + return nil +} + +// Shutdown the receiver, invoked during service shutdown. +func (sc *controller) Shutdown(ctx context.Context) error { + sc.stopScraping() + + // wait until scraping ticker has terminated + if sc.initialized { + <-sc.terminated + } + + var errs []error + for _, scraper := range sc.resourceMetricScrapers { + if err := scraper.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + return componenterror.CombineErrors(errs) +} + +// startScraping initiates a ticker that calls Scrape based on the configured +// collection interval. +func (sc *controller) startScraping() { + go func() { + if sc.tickerCh == nil { + ticker := time.NewTicker(sc.collectionInterval) + defer ticker.Stop() + + sc.tickerCh = ticker.C + } + + for { + select { + case <-sc.tickerCh: + sc.scrapeMetricsAndReport(context.Background()) + case <-sc.done: + sc.terminated <- struct{}{} + return + } + } + }() +} + +// scrapeMetricsAndReport calls the Scrape function for each of the configured +// Scrapers, records observability information, and passes the scraped metrics +// to the next component. +func (sc *controller) scrapeMetricsAndReport(ctx context.Context) { + ctx = obsreport.ReceiverContext(ctx, sc.name, "") + + metrics := pdata.NewMetrics() + + for _, rms := range sc.resourceMetricScrapers { + resourceMetrics, err := rms.Scrape(ctx, sc.name) + if err != nil { + sc.logger.Error("Error scraping metrics", zap.Error(err)) + + if !consumererror.IsPartialScrapeError(err) { + continue + } + } + resourceMetrics.MoveAndAppendTo(metrics.ResourceMetrics()) + } + + _, dataPointCount := metrics.MetricAndDataPointCount() + + ctx = obsreport.StartMetricsReceiveOp(ctx, sc.name, "") + err := sc.nextConsumer.ConsumeMetrics(ctx, metrics) + obsreport.EndMetricsReceiveOp(ctx, "", dataPointCount, err) +} + +// stopScraping stops the ticker +func (sc *controller) stopScraping() { + close(sc.done) +} + +var _ ResourceMetricsScraper = (*multiMetricScraper)(nil) + +type multiMetricScraper struct { + scrapers []MetricsScraper +} + +func (mms *multiMetricScraper) Name() string { + return "" +} + +func (mms *multiMetricScraper) Start(ctx context.Context, host component.Host) error { + for _, scraper := range mms.scrapers { + if err := scraper.Start(ctx, host); err != nil { + return err + } + } + return nil +} + +func (mms *multiMetricScraper) Shutdown(ctx context.Context) error { + var errs []error + for _, scraper := range mms.scrapers { + if err := scraper.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + return componenterror.CombineErrors(errs) +} + +func (mms *multiMetricScraper) Scrape(ctx context.Context, receiverName string) (pdata.ResourceMetricsSlice, error) { + rms := pdata.NewResourceMetricsSlice() + rms.Resize(1) + rm := rms.At(0) + ilms := rm.InstrumentationLibraryMetrics() + ilms.Resize(1) + ilm := ilms.At(0) + + var errs []error + for _, scraper := range mms.scrapers { + metrics, err := scraper.Scrape(ctx, receiverName) + if err != nil { + errs = append(errs, err) + if !consumererror.IsPartialScrapeError(err) { + continue + } + } + + metrics.MoveAndAppendTo(ilm.Metrics()) + } + return rms, CombineScrapeErrors(errs) +} diff --git a/internal/otel_collector/receiver/scraperhelper/scrapercontroller_test.go b/internal/otel_collector/receiver/scraperhelper/scrapercontroller_test.go new file mode 100644 index 00000000000..6218e1f6c4c --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/scrapercontroller_test.go @@ -0,0 +1,477 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scraperhelper + +import ( + "context" + "errors" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport/obsreporttest" +) + +type testInitialize struct { + ch chan bool + err error +} + +func (ts *testInitialize) start(context.Context, component.Host) error { + ts.ch <- true + return ts.err +} + +type testClose struct { + ch chan bool + err error +} + +func (ts *testClose) shutdown(context.Context) error { + ts.ch <- true + return ts.err +} + +type testScrapeMetrics struct { + ch chan int + timesScrapeCalled int + err error +} + +func (ts *testScrapeMetrics) scrape(_ context.Context) (pdata.MetricSlice, error) { + ts.timesScrapeCalled++ + ts.ch <- ts.timesScrapeCalled + + if ts.err != nil { + return pdata.NewMetricSlice(), ts.err + } + + return singleMetric(), nil +} + +type testScrapeResourceMetrics struct { + ch chan int + timesScrapeCalled int + err error +} + +func (ts *testScrapeResourceMetrics) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) { + ts.timesScrapeCalled++ + ts.ch <- ts.timesScrapeCalled + + if ts.err != nil { + return pdata.NewResourceMetricsSlice(), ts.err + } + + return singleResourceMetric(), nil +} + +type metricsTestCase struct { + name string + + scrapers int + resourceScrapers int + scraperControllerSettings *ScraperControllerSettings + nilNextConsumer bool + scrapeErr error + expectedNewErr string + expectScraped bool + + initialize bool + close bool + initializeErr error + closeErr error +} + +func TestScrapeController(t *testing.T) { + testCases := []metricsTestCase{ + { + name: "NoScrapers", + }, + { + name: "AddMetricsScrapersWithCollectionInterval", + scrapers: 2, + expectScraped: true, + }, + { + name: "AddMetricsScrapers_NilNextConsumerError", + scrapers: 2, + nilNextConsumer: true, + expectedNewErr: "nil nextConsumer", + }, + { + name: "AddMetricsScrapersWithCollectionInterval_InvalidCollectionIntervalError", + scrapers: 2, + scraperControllerSettings: &ScraperControllerSettings{CollectionInterval: -time.Millisecond}, + expectedNewErr: "collection_interval must be a positive duration", + }, + { + name: "AddMetricsScrapers_ScrapeError", + scrapers: 2, + scrapeErr: errors.New("err1"), + }, + { + name: "AddMetricsScrapersWithInitializeAndClose", + scrapers: 2, + initialize: true, + close: true, + }, + { + name: "AddMetricsScrapersWithInitializeAndCloseErrors", + scrapers: 2, + initialize: true, + close: true, + initializeErr: errors.New("err1"), + closeErr: errors.New("err2"), + }, + { + name: "AddResourceMetricsScrapersWithCollectionInterval", + resourceScrapers: 2, + expectScraped: true, + }, + { + name: "AddResourceMetricsScrapers_NewError", + resourceScrapers: 2, + nilNextConsumer: true, + expectedNewErr: "nil nextConsumer", + }, + { + name: "AddResourceMetricsScrapers_ScrapeError", + resourceScrapers: 2, + scrapeErr: errors.New("err1"), + }, + { + name: "AddResourceMetricsScrapersWithInitializeAndClose", + resourceScrapers: 2, + initialize: true, + close: true, + }, + { + name: "AddResourceMetricsScrapersWithInitializeAndCloseErrors", + resourceScrapers: 2, + initialize: true, + close: true, + initializeErr: errors.New("err1"), + closeErr: errors.New("err2"), + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + ss := &spanStore{} + trace.RegisterExporter(ss) + defer trace.UnregisterExporter(ss) + + done, err := obsreporttest.SetupRecordedMetricsTest() + require.NoError(t, err) + defer done() + + initializeChs := make([]chan bool, test.scrapers+test.resourceScrapers) + scrapeMetricsChs := make([]chan int, test.scrapers) + scrapeResourceMetricsChs := make([]chan int, test.resourceScrapers) + closeChs := make([]chan bool, test.scrapers+test.resourceScrapers) + options := configureMetricOptions(test, initializeChs, scrapeMetricsChs, scrapeResourceMetricsChs, closeChs) + + tickerCh := make(chan time.Time) + options = append(options, WithTickerChannel(tickerCh)) + + var nextConsumer consumer.MetricsConsumer + sink := new(consumertest.MetricsSink) + if !test.nilNextConsumer { + nextConsumer = sink + } + defaultCfg := DefaultScraperControllerSettings("receiver") + cfg := &defaultCfg + if test.scraperControllerSettings != nil { + cfg = test.scraperControllerSettings + cfg.NameVal = "receiver" + } + + mr, err := NewScraperControllerReceiver(cfg, zap.NewNop(), nextConsumer, options...) + if test.expectedNewErr != "" { + assert.EqualError(t, err, test.expectedNewErr) + return + } + require.NoError(t, err) + + err = mr.Start(context.Background(), componenttest.NewNopHost()) + expectedStartErr := getExpectedStartErr(test) + if expectedStartErr != nil { + assert.Equal(t, expectedStartErr, err) + } else if test.initialize { + assertChannelsCalled(t, initializeChs, "start was not called") + } + + const iterations = 5 + + if test.expectScraped || test.scrapeErr != nil { + // validate that scrape is called at least N times for each configured scraper + for i := 0; i < iterations; i++ { + tickerCh <- time.Now() + + for _, ch := range scrapeMetricsChs { + <-ch + } + for _, ch := range scrapeResourceMetricsChs { + <-ch + } + } + + // wait until all calls to scrape have completed + if test.scrapeErr == nil { + require.Eventually(t, func() bool { + return sink.MetricsCount() == iterations*(test.scrapers+test.resourceScrapers) + }, time.Second, time.Millisecond) + } + + if test.expectScraped { + assert.GreaterOrEqual(t, sink.MetricsCount(), iterations) + } + + spans := ss.PullAllSpans() + assertReceiverSpan(t, spans) + assertReceiverViews(t, sink) + assertScraperSpan(t, test.scrapeErr, spans) + assertScraperViews(t, test.scrapeErr, sink) + } + + err = mr.Shutdown(context.Background()) + expectedShutdownErr := getExpectedShutdownErr(test) + if expectedShutdownErr != nil { + assert.EqualError(t, err, expectedShutdownErr.Error()) + } else if test.close { + assertChannelsCalled(t, closeChs, "shutdown was not called") + } + }) + } +} + +func configureMetricOptions(test metricsTestCase, initializeChs []chan bool, scrapeMetricsChs, testScrapeResourceMetricsChs []chan int, closeChs []chan bool) []ScraperControllerOption { + var metricOptions []ScraperControllerOption + + for i := 0; i < test.scrapers; i++ { + var scraperOptions []ScraperOption + if test.initialize { + initializeChs[i] = make(chan bool, 1) + ti := &testInitialize{ch: initializeChs[i], err: test.initializeErr} + scraperOptions = append(scraperOptions, WithStart(ti.start)) + } + if test.close { + closeChs[i] = make(chan bool, 1) + tc := &testClose{ch: closeChs[i], err: test.closeErr} + scraperOptions = append(scraperOptions, WithShutdown(tc.shutdown)) + } + + scrapeMetricsChs[i] = make(chan int) + tsm := &testScrapeMetrics{ch: scrapeMetricsChs[i], err: test.scrapeErr} + metricOptions = append(metricOptions, AddMetricsScraper(NewMetricsScraper("scraper", tsm.scrape, scraperOptions...))) + } + + for i := 0; i < test.resourceScrapers; i++ { + var scraperOptions []ScraperOption + if test.initialize { + initializeChs[test.scrapers+i] = make(chan bool, 1) + ti := &testInitialize{ch: initializeChs[test.scrapers+i], err: test.initializeErr} + scraperOptions = append(scraperOptions, WithStart(ti.start)) + } + if test.close { + closeChs[test.scrapers+i] = make(chan bool, 1) + tc := &testClose{ch: closeChs[test.scrapers+i], err: test.closeErr} + scraperOptions = append(scraperOptions, WithShutdown(tc.shutdown)) + } + + testScrapeResourceMetricsChs[i] = make(chan int) + tsrm := &testScrapeResourceMetrics{ch: testScrapeResourceMetricsChs[i], err: test.scrapeErr} + metricOptions = append(metricOptions, AddResourceMetricsScraper(NewResourceMetricsScraper("scraper", tsrm.scrape, scraperOptions...))) + } + + return metricOptions +} + +func getExpectedStartErr(test metricsTestCase) error { + return test.initializeErr +} + +func getExpectedShutdownErr(test metricsTestCase) error { + var errs []error + + if test.closeErr != nil { + for i := 0; i < test.scrapers; i++ { + errs = append(errs, test.closeErr) + } + } + + return componenterror.CombineErrors(errs) +} + +func assertChannelsCalled(t *testing.T, chs []chan bool, message string) { + for _, ic := range chs { + assertChannelCalled(t, ic, message) + } +} + +func assertChannelCalled(t *testing.T, ch chan bool, message string) { + select { + case <-ch: + default: + assert.Fail(t, message) + } +} + +func assertReceiverSpan(t *testing.T, spans []*trace.SpanData) { + receiverSpan := false + for _, span := range spans { + if span.Name == "receiver/receiver/MetricsReceived" { + receiverSpan = true + break + } + } + assert.True(t, receiverSpan) +} + +func assertReceiverViews(t *testing.T, sink *consumertest.MetricsSink) { + dataPointCount := 0 + for _, md := range sink.AllMetrics() { + _, dpc := md.MetricAndDataPointCount() + dataPointCount += dpc + } + obsreporttest.CheckReceiverMetricsViews(t, "receiver", "", int64(dataPointCount), 0) +} + +func assertScraperSpan(t *testing.T, expectedErr error, spans []*trace.SpanData) { + expectedScrapeTraceStatus := trace.Status{Code: trace.StatusCodeOK} + expectedScrapeTraceMessage := "" + if expectedErr != nil { + expectedScrapeTraceStatus = trace.Status{Code: trace.StatusCodeUnknown, Message: expectedErr.Error()} + expectedScrapeTraceMessage = expectedErr.Error() + } + + scraperSpan := false + for _, span := range spans { + if span.Name == "scraper/receiver/scraper/MetricsScraped" { + scraperSpan = true + assert.Equal(t, expectedScrapeTraceStatus, span.Status) + assert.Equal(t, expectedScrapeTraceMessage, span.Message) + break + } + } + assert.True(t, scraperSpan) +} + +func assertScraperViews(t *testing.T, expectedErr error, sink *consumertest.MetricsSink) { + expectedScraped := int64(sink.MetricsCount()) + expectedErrored := int64(0) + if expectedErr != nil { + if partialError, isPartial := expectedErr.(consumererror.PartialScrapeError); isPartial { + expectedErrored = int64(partialError.Failed) + } else { + expectedScraped = int64(0) + expectedErrored = int64(sink.MetricsCount()) + } + } + + obsreporttest.CheckScraperMetricsViews(t, "receiver", "scraper", expectedScraped, expectedErrored) +} + +func singleMetric() pdata.MetricSlice { + metrics := pdata.NewMetricSlice() + metrics.Resize(1) + metrics.At(0).SetDataType(pdata.MetricDataTypeIntGauge) + metrics.At(0).IntGauge().DataPoints().Resize(1) + return metrics +} + +func singleResourceMetric() pdata.ResourceMetricsSlice { + rms := pdata.NewResourceMetricsSlice() + rms.Resize(1) + rm := rms.At(0) + ilms := rm.InstrumentationLibraryMetrics() + ilms.Resize(1) + ilm := ilms.At(0) + singleMetric().MoveAndAppendTo(ilm.Metrics()) + return rms +} + +func TestSingleScrapePerTick(t *testing.T) { + scrapeMetricsCh := make(chan int, 10) + tsm := &testScrapeMetrics{ch: scrapeMetricsCh} + + scrapeResourceMetricsCh := make(chan int, 10) + tsrm := &testScrapeResourceMetrics{ch: scrapeResourceMetricsCh} + + defaultCfg := DefaultScraperControllerSettings("") + cfg := &defaultCfg + + tickerCh := make(chan time.Time) + + receiver, err := NewScraperControllerReceiver( + cfg, + zap.NewNop(), + new(consumertest.MetricsSink), + AddMetricsScraper(NewMetricsScraper("", tsm.scrape)), + AddResourceMetricsScraper(NewResourceMetricsScraper("", tsrm.scrape)), + WithTickerChannel(tickerCh), + ) + require.NoError(t, err) + + require.NoError(t, receiver.Start(context.Background(), componenttest.NewNopHost())) + + tickerCh <- time.Now() + + assert.Equal(t, 1, <-scrapeMetricsCh) + assert.Equal(t, 1, <-scrapeResourceMetricsCh) + + select { + case <-scrapeMetricsCh: + assert.Fail(t, "Scrape was called more than once") + case <-scrapeResourceMetricsCh: + assert.Fail(t, "Scrape was called more than once") + case <-time.After(100 * time.Millisecond): + return + } +} + +type spanStore struct { + sync.Mutex + spans []*trace.SpanData +} + +func (ss *spanStore) ExportSpan(sd *trace.SpanData) { + ss.Lock() + ss.spans = append(ss.spans, sd) + ss.Unlock() +} + +func (ss *spanStore) PullAllSpans() []*trace.SpanData { + ss.Lock() + capturedSpans := ss.spans + ss.spans = nil + ss.Unlock() + return capturedSpans +} diff --git a/internal/otel_collector/receiver/zipkinreceiver/README.md b/internal/otel_collector/receiver/zipkinreceiver/README.md new file mode 100644 index 00000000000..9c73ad6d53f --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/README.md @@ -0,0 +1,29 @@ +# Zipkin Receiver + +This receiver receives spans from [Zipkin](https://zipkin.io/) (V1 and V2). + +Supported pipeline types: traces + +## Getting Started + +All that is required to enable the Zipkin receiver is to include it in the +receiver definitions. + +```yaml +receivers: + zipkin: +``` + +The following settings are configurable: + +- `endpoint` (default = 0.0.0.0:9411): host:port to which the receiver is going + to receive data. The valid syntax is described at + https://github.com/grpc/grpc/blob/master/doc/naming.md. + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configgrpc/README.md) including CORS +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/master/exporter/exporterhelper/README.md) diff --git a/internal/otel_collector/receiver/zipkinreceiver/config.go b/internal/otel_collector/receiver/zipkinreceiver/config.go new file mode 100644 index 00000000000..9df6c11df11 --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/config.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinreceiver + +import ( + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" +) + +// Config defines configuration for Zipkin receiver. +type Config struct { + configmodels.ReceiverSettings `mapstructure:",squash"` + + // Configures the receiver server protocol. + confighttp.HTTPServerSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // If enabled the zipkin receiver will attempt to parse string tags/binary annotations into int/bool/float. + // Disabled by default + ParseStringTags bool `mapstructure:"parse_string_tags"` +} diff --git a/internal/otel_collector/receiver/zipkinreceiver/config_test.go b/internal/otel_collector/receiver/zipkinreceiver/config_test.go new file mode 100644 index 00000000000..04d5d0eb6c5 --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/config_test.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinreceiver + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" +) + +func TestLoadConfig(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + factory := NewFactory() + factories.Receivers[typeStr] = factory + cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) + + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, len(cfg.Receivers), 3) + + r0 := cfg.Receivers["zipkin"] + assert.Equal(t, r0, factory.CreateDefaultConfig()) + + r1 := cfg.Receivers["zipkin/customname"].(*Config) + assert.Equal(t, r1, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "zipkin/customname", + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "localhost:8765", + }, + }) + + r2 := cfg.Receivers["zipkin/parse_strings"].(*Config) + assert.Equal(t, r2, + &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: "zipkin/parse_strings", + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "0.0.0.0:9411", + }, + ParseStringTags: true, + }) +} diff --git a/internal/otel_collector/receiver/zipkinreceiver/factory.go b/internal/otel_collector/receiver/zipkinreceiver/factory.go new file mode 100644 index 00000000000..8d10e5cbc10 --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/factory.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinreceiver + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +// This file implements factory for Zipkin receiver. + +const ( + // The value of "type" key in configuration. + typeStr = "zipkin" + + defaultBindEndpoint = "0.0.0.0:9411" +) + +// NewFactory creates a new Zipkin receiver factory +func NewFactory() component.ReceiverFactory { + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithTraces(createTraceReceiver), + ) +} + +// createDefaultConfig creates the default configuration for Zipkin receiver. +func createDefaultConfig() configmodels.Receiver { + return &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + TypeVal: typeStr, + NameVal: typeStr, + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: defaultBindEndpoint, + }, + ParseStringTags: false, + } +} + +// createTraceReceiver creates a trace receiver based on provided config. +func createTraceReceiver( + _ context.Context, + _ component.ReceiverCreateParams, + cfg configmodels.Receiver, + nextConsumer consumer.TracesConsumer, +) (component.TracesReceiver, error) { + rCfg := cfg.(*Config) + return New(rCfg, nextConsumer) +} diff --git a/internal/otel_collector/receiver/zipkinreceiver/factory_test.go b/internal/otel_collector/receiver/zipkinreceiver/factory_test.go new file mode 100644 index 00000000000..db1d1fe85b4 --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/factory_test.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, configcheck.ValidateConfig(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + cfg := createDefaultConfig() + + tReceiver, err := createTraceReceiver( + context.Background(), + component.ReceiverCreateParams{Logger: zap.NewNop()}, + cfg, + consumertest.NewTracesNop()) + assert.NoError(t, err, "receiver creation failed") + assert.NotNil(t, tReceiver, "receiver creation failed") + + tReceiver, err = createTraceReceiver( + context.Background(), + component.ReceiverCreateParams{Logger: zap.NewNop()}, + cfg, + consumertest.NewTracesNop()) + assert.NoError(t, err, "receiver creation failed") + assert.NotNil(t, tReceiver, "receiver creation failed") +} diff --git a/internal/otel_collector/receiver/zipkinreceiver/proto_parse_test.go b/internal/otel_collector/receiver/zipkinreceiver/proto_parse_test.go new file mode 100644 index 00000000000..dfba6521b7e --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/proto_parse_test.go @@ -0,0 +1,305 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinreceiver + +import ( + "net/http" + "testing" + "time" + + "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/data" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func TestConvertSpansToTraceSpans_protobuf(t *testing.T) { + // TODO: (@odeke-em) examine the entire codepath that time goes through + // in Zipkin-Go to ensure that all rounding matches. Otherwise + // for now we need to round Annotation times to seconds for comparison. + cmpTimestamp := func(t time.Time) time.Time { + return t.Round(time.Second) + } + + now := cmpTimestamp(time.Date(2018, 10, 31, 19, 43, 35, 789, time.UTC)) + minus10hr5ms := cmpTimestamp(now.Add(-(10*time.Hour + 5*time.Millisecond))) + + // 1. Generate some spans then serialize them with protobuf + payloadFromWild := &zipkin_proto3.ListOfSpans{ + Spans: []*zipkin_proto3.Span{ + { + TraceId: []byte{0x7F, 0x6F, 0x5F, 0x4F, 0x3F, 0x2F, 0x1F, 0x0F, 0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0}, + Id: []byte{0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0}, + ParentId: []byte{0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0}, + Name: "ProtoSpan1", + Kind: zipkin_proto3.Span_CONSUMER, + Timestamp: uint64(now.UnixNano() / 1e3), + Duration: 12e6, // 12 seconds + LocalEndpoint: &zipkin_proto3.Endpoint{ + ServiceName: "svc-1", + Ipv4: []byte{0xC0, 0xA8, 0x00, 0x01}, + Port: 8009, + }, + RemoteEndpoint: &zipkin_proto3.Endpoint{ + ServiceName: "memcached", + Ipv6: []byte{0xFE, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x53, 0xa7, 0x7c, 0xda, 0x4d, 0xd2, 0x1b}, + Port: 11211, + }, + }, + { + TraceId: []byte{0x7A, 0x6A, 0x5A, 0x4A, 0x3A, 0x2A, 0x1A, 0x0A, 0xC7, 0xC6, 0xC5, 0xC4, 0xC3, 0xC2, 0xC1, 0xC0}, + Id: []byte{0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0x61, 0x60}, + ParentId: []byte{0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10}, + Name: "CacheWarmUp", + Kind: zipkin_proto3.Span_PRODUCER, + Timestamp: uint64(minus10hr5ms.UnixNano() / 1e3), + Duration: 7e6, // 7 seconds + LocalEndpoint: &zipkin_proto3.Endpoint{ + ServiceName: "search", + Ipv4: []byte{0x0A, 0x00, 0x00, 0x0D}, + Port: 8009, + }, + RemoteEndpoint: &zipkin_proto3.Endpoint{ + ServiceName: "redis", + Ipv6: []byte{0xFE, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x53, 0xa7, 0x7c, 0xda, 0x4d, 0xd2, 0x1b}, + Port: 6379, + }, + Annotations: []*zipkin_proto3.Annotation{ + { + Timestamp: uint64(minus10hr5ms.UnixNano() / 1e3), + Value: "DB reset", + }, + { + Timestamp: uint64(minus10hr5ms.UnixNano() / 1e3), + Value: "GC Cycle 39", + }, + }, + }, + }, + } + + // 2. Serialize it + protoBlob, err := proto.Marshal(payloadFromWild) + require.NoError(t, err, "Failed to protobuf serialize payload: %v", err) + zi := new(ZipkinReceiver) + zi.config = createDefaultConfig().(*Config) + hdr := make(http.Header) + hdr.Set("Content-Type", "application/x-protobuf") + + // 3. Get that payload converted to OpenCensus proto spans. + reqs, err := zi.v2ToTraceSpans(protoBlob, hdr) + require.NoError(t, err, "Failed to parse convert Zipkin spans in Protobuf to Trace spans: %v", err) + require.Equal(t, reqs.ResourceSpans().Len(), 2, "Expecting exactly 2 requests since spans have different node/localEndpoint: %v", reqs.ResourceSpans().Len()) + + want := pdata.TracesFromOtlp([]*otlptrace.ResourceSpans{ + { + Resource: otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: conventions.AttributeServiceName, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "svc-1", + }, + }, + }, + }, + }, + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + { + TraceId: data.NewTraceID([16]byte{0x7F, 0x6F, 0x5F, 0x4F, 0x3F, 0x2F, 0x1F, 0x0F, 0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0}), + SpanId: data.NewSpanID([8]byte{0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0}), + ParentSpanId: data.NewSpanID([8]byte{0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0}), + Name: "ProtoSpan1", + StartTimeUnixNano: uint64(now.UnixNano()), + EndTimeUnixNano: uint64(now.Add(12 * time.Second).UnixNano()), + Attributes: []otlpcommon.KeyValue{ + { + Key: conventions.AttributeNetHostIP, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "192.168.0.1", + }, + }, + }, + { + Key: conventions.AttributeNetHostPort, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_IntValue{ + IntValue: 8009, + }, + }, + }, + { + Key: conventions.AttributeNetPeerName, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "memcached", + }, + }, + }, + { + Key: conventions.AttributeNetPeerIP, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "fe80::1453:a77c:da4d:d21b", + }, + }, + }, + { + Key: conventions.AttributeNetPeerPort, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_IntValue{ + IntValue: 11211, + }, + }, + }, + { + Key: tracetranslator.TagSpanKind, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: string(tracetranslator.OpenTracingSpanKindConsumer), + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + Resource: otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: conventions.AttributeServiceName, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "search", + }, + }, + }, + }, + }, + InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ + { + Spans: []*otlptrace.Span{ + { + TraceId: data.NewTraceID([16]byte{0x7A, 0x6A, 0x5A, 0x4A, 0x3A, 0x2A, 0x1A, 0x0A, 0xC7, 0xC6, 0xC5, 0xC4, 0xC3, 0xC2, 0xC1, 0xC0}), + SpanId: data.NewSpanID([8]byte{0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0x61, 0x60}), + ParentSpanId: data.NewSpanID([8]byte{0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10}), + Name: "CacheWarmUp", + StartTimeUnixNano: uint64(now.Add(-10 * time.Hour).UnixNano()), + EndTimeUnixNano: uint64(now.Add(-10 * time.Hour).Add(7 * time.Second).UnixNano()), + Attributes: []otlpcommon.KeyValue{ + { + Key: conventions.AttributeNetHostIP, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "10.0.0.13", + }, + }, + }, + { + Key: conventions.AttributeNetHostPort, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_IntValue{ + IntValue: 8009, + }, + }, + }, + { + Key: conventions.AttributeNetPeerName, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "redis", + }, + }, + }, + { + Key: conventions.AttributeNetPeerIP, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "fe80::1453:a77c:da4d:d21b", + }, + }, + }, + { + Key: conventions.AttributeNetPeerPort, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_IntValue{ + IntValue: 6379, + }, + }, + }, + { + Key: tracetranslator.TagSpanKind, + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: string(tracetranslator.OpenTracingSpanKindProducer), + }, + }, + }, + }, + Events: []*otlptrace.Span_Event{ + { + TimeUnixNano: uint64(now.Add(-10 * time.Hour).UnixNano()), + Name: "DB reset", + }, + { + TimeUnixNano: uint64(now.Add(-10 * time.Hour).UnixNano()), + Name: "GC Cycle 39", + }, + }, + }, + }, + }, + }, + }, + }) + + assert.Equal(t, want.SpanCount(), reqs.SpanCount()) + assert.Equal(t, want.ResourceSpans().Len(), reqs.ResourceSpans().Len()) + for i := 0; i < want.ResourceSpans().Len(); i++ { + wantRS := want.ResourceSpans().At(i) + wSvcName, ok := wantRS.Resource().Attributes().Get(conventions.AttributeServiceName) + assert.True(t, ok) + for j := 0; j < reqs.ResourceSpans().Len(); j++ { + reqsRS := reqs.ResourceSpans().At(j) + rSvcName, ok := reqsRS.Resource().Attributes().Get(conventions.AttributeServiceName) + assert.True(t, ok) + if rSvcName.StringVal() == wSvcName.StringVal() { + compareResourceSpans(t, wantRS, reqsRS) + } + } + } +} + +func compareResourceSpans(t *testing.T, wantRS pdata.ResourceSpans, reqsRS pdata.ResourceSpans) { + assert.Equal(t, wantRS.InstrumentationLibrarySpans().Len(), reqsRS.InstrumentationLibrarySpans().Len()) + wantIL := wantRS.InstrumentationLibrarySpans().At(0) + reqsIL := reqsRS.InstrumentationLibrarySpans().At(0) + assert.Equal(t, wantIL.Spans().Len(), reqsIL.Spans().Len()) +} diff --git a/internal/otel_collector/receiver/zipkinreceiver/testdata/config.yaml b/internal/otel_collector/receiver/zipkinreceiver/testdata/config.yaml new file mode 100644 index 00000000000..b2e4ccbcbdd --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/testdata/config.yaml @@ -0,0 +1,20 @@ +receivers: + zipkin: + zipkin/customname: + endpoint: "localhost:8765" + zipkin/parse_strings: + parse_string_tags: true + +processors: + exampleprocessor: + +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [zipkin] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/receiver/zipkinreceiver/testdata/sample1.json b/internal/otel_collector/receiver/zipkinreceiver/testdata/sample1.json new file mode 100644 index 00000000000..5339b471566 --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/testdata/sample1.json @@ -0,0 +1,288 @@ +[{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } +}] diff --git a/internal/otel_collector/receiver/zipkinreceiver/testdata/sample2.json b/internal/otel_collector/receiver/zipkinreceiver/testdata/sample2.json new file mode 100644 index 00000000000..5dcb5b013be --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/testdata/sample2.json @@ -0,0 +1,32 @@ +[ + { + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0" + } + } +] diff --git a/internal/otel_collector/receiver/zipkinreceiver/trace_receiver.go b/internal/otel_collector/receiver/zipkinreceiver/trace_receiver.go new file mode 100644 index 00000000000..dec39db0a96 --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/trace_receiver.go @@ -0,0 +1,309 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinreceiver + +import ( + "compress/gzip" + "compress/zlib" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "strings" + "sync" + + "github.com/apache/thrift/lib/go/thrift" + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + zipkinmodel "github.com/openzipkin/zipkin-go/model" + "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/translator/trace/zipkin" +) + +const ( + receiverTransportV1Thrift = "http_v1_thrift" + receiverTransportV1JSON = "http_v1_json" + receiverTransportV2JSON = "http_v2_json" + receiverTransportV2PROTO = "http_v2_proto" +) + +var errNextConsumerRespBody = []byte(`"Internal Server Error"`) + +// ZipkinReceiver type is used to handle spans received in the Zipkin format. +type ZipkinReceiver struct { + // mu protects the fields of this struct + mu sync.Mutex + + // addr is the address onto which the HTTP server will be bound + host component.Host + nextConsumer consumer.TracesConsumer + instanceName string + + startOnce sync.Once + stopOnce sync.Once + server *http.Server + config *Config +} + +var _ http.Handler = (*ZipkinReceiver)(nil) + +// New creates a new zipkinreceiver.ZipkinReceiver reference. +func New(config *Config, nextConsumer consumer.TracesConsumer) (*ZipkinReceiver, error) { + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + zr := &ZipkinReceiver{ + nextConsumer: nextConsumer, + instanceName: config.Name(), + config: config, + } + return zr, nil +} + +// Start spins up the receiver's HTTP server and makes the receiver start its processing. +func (zr *ZipkinReceiver) Start(ctx context.Context, host component.Host) error { + if host == nil { + return errors.New("nil host") + } + + zr.mu.Lock() + defer zr.mu.Unlock() + + var err = componenterror.ErrAlreadyStarted + + zr.startOnce.Do(func() { + err = nil + zr.host = host + zr.server = zr.config.HTTPServerSettings.ToServer(zr) + var listener net.Listener + listener, err = zr.config.HTTPServerSettings.ToListener() + if err != nil { + host.ReportFatalError(err) + return + } + go func() { + err = zr.server.Serve(listener) + if err != nil { + host.ReportFatalError(err) + } + }() + }) + + return err +} + +// v1ToTraceSpans parses Zipkin v1 JSON traces and converts them to OpenCensus Proto spans. +func (zr *ZipkinReceiver) v1ToTraceSpans(blob []byte, hdr http.Header) (reqs pdata.Traces, err error) { + if hdr.Get("Content-Type") == "application/x-thrift" { + zSpans, err := deserializeThrift(blob) + if err != nil { + return pdata.NewTraces(), err + } + + return zipkin.V1ThriftBatchToInternalTraces(zSpans) + } + return zipkin.V1JSONBatchToInternalTraces(blob, zr.config.ParseStringTags) +} + +// deserializeThrift decodes Thrift bytes to a list of spans. +// This code comes from jaegertracing/jaeger, ideally we should have imported +// it but this was creating many conflicts so brought the code to here. +// https://github.com/jaegertracing/jaeger/blob/6bc0c122bfca8e737a747826ae60a22a306d7019/model/converter/thrift/zipkin/deserialize.go#L36 +func deserializeThrift(b []byte) ([]*zipkincore.Span, error) { + buffer := thrift.NewTMemoryBuffer() + buffer.Write(b) + + transport := thrift.NewTBinaryProtocolTransport(buffer) + _, size, err := transport.ReadListBegin() // Ignore the returned element type + if err != nil { + return nil, err + } + + // We don't depend on the size returned by ReadListBegin to preallocate the array because it + // sometimes returns a nil error on bad input and provides an unreasonably large int for size + var spans []*zipkincore.Span + for i := 0; i < size; i++ { + zs := &zipkincore.Span{} + if err = zs.Read(transport); err != nil { + return nil, err + } + spans = append(spans, zs) + } + + return spans, nil +} + +// v2ToTraceSpans parses Zipkin v2 JSON or Protobuf traces and converts them to OpenCensus Proto spans. +func (zr *ZipkinReceiver) v2ToTraceSpans(blob []byte, hdr http.Header) (reqs pdata.Traces, err error) { + // This flag's reference is from: + // https://github.com/openzipkin/zipkin-go/blob/3793c981d4f621c0e3eb1457acffa2c1cc591384/proto/v2/zipkin.proto#L154 + debugWasSet := hdr.Get("X-B3-Flags") == "1" + + var zipkinSpans []*zipkinmodel.SpanModel + + // Zipkin can send protobuf via http + switch hdr.Get("Content-Type") { + // TODO: (@odeke-em) record the unique types of Content-Type uploads + case "application/x-protobuf": + zipkinSpans, err = zipkin_proto3.ParseSpans(blob, debugWasSet) + + default: // By default, we'll assume using JSON + zipkinSpans, err = zr.deserializeFromJSON(blob) + } + + if err != nil { + return pdata.Traces{}, err + } + + return zipkin.V2SpansToInternalTraces(zipkinSpans, zr.config.ParseStringTags) +} + +func (zr *ZipkinReceiver) deserializeFromJSON(jsonBlob []byte) (zs []*zipkinmodel.SpanModel, err error) { + if err = json.Unmarshal(jsonBlob, &zs); err != nil { + return nil, err + } + return zs, nil +} + +// Shutdown tells the receiver that should stop reception, +// giving it a chance to perform any necessary clean-up and shutting down +// its HTTP server. +func (zr *ZipkinReceiver) Shutdown(context.Context) error { + var err = componenterror.ErrAlreadyStopped + zr.stopOnce.Do(func() { + err = zr.server.Close() + }) + return err +} + +// processBodyIfNecessary checks the "Content-Encoding" HTTP header and if +// a compression such as "gzip", "deflate", "zlib", is found, the body will +// be uncompressed accordingly or return the body untouched if otherwise. +// Clients such as Zipkin-Java do this behavior e.g. +// send "Content-Encoding":"gzip" of the JSON content. +func processBodyIfNecessary(req *http.Request) io.Reader { + switch req.Header.Get("Content-Encoding") { + default: + return req.Body + + case "gzip": + return gunzippedBodyIfPossible(req.Body) + + case "deflate", "zlib": + return zlibUncompressedbody(req.Body) + } +} + +func gunzippedBodyIfPossible(r io.Reader) io.Reader { + gzr, err := gzip.NewReader(r) + if err != nil { + // Just return the old body as was + return r + } + return gzr +} + +func zlibUncompressedbody(r io.Reader) io.Reader { + zr, err := zlib.NewReader(r) + if err != nil { + // Just return the old body as was + return r + } + return zr +} + +const ( + zipkinV1TagValue = "zipkinV1" + zipkinV2TagValue = "zipkinV2" +) + +// The ZipkinReceiver receives spans from endpoint /api/v2 as JSON, +// unmarshals them and sends them along to the nextConsumer. +func (zr *ZipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if c, ok := client.FromHTTP(r); ok { + ctx = client.NewContext(ctx, c) + } + + // Now deserialize and process the spans. + asZipkinv1 := r.URL != nil && strings.Contains(r.URL.Path, "api/v1/spans") + + transportTag := transportType(r) + ctx = obsreport.ReceiverContext(ctx, zr.instanceName, transportTag) + ctx = obsreport.StartTraceDataReceiveOp(ctx, zr.instanceName, transportTag) + + pr := processBodyIfNecessary(r) + slurp, _ := ioutil.ReadAll(pr) + if c, ok := pr.(io.Closer); ok { + _ = c.Close() + } + _ = r.Body.Close() + + var td pdata.Traces + var err error + if asZipkinv1 { + td, err = zr.v1ToTraceSpans(slurp, r.Header) + } else { + td, err = zr.v2ToTraceSpans(slurp, r.Header) + } + + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + consumerErr := zr.nextConsumer.ConsumeTraces(ctx, td) + + receiverTagValue := zipkinV2TagValue + if asZipkinv1 { + receiverTagValue = zipkinV1TagValue + } + obsreport.EndTraceDataReceiveOp(ctx, receiverTagValue, td.SpanCount(), consumerErr) + + if consumerErr != nil { + // Transient error, due to some internal condition. + w.WriteHeader(http.StatusInternalServerError) + w.Write(errNextConsumerRespBody) + return + } + + // Finally send back the response "Accepted" as + // required at https://zipkin.io/zipkin-api/#/default/post_spans + w.WriteHeader(http.StatusAccepted) +} + +func transportType(r *http.Request) string { + v1 := r.URL != nil && strings.Contains(r.URL.Path, "api/v1/spans") + if v1 { + if r.Header.Get("Content-Type") == "application/x-thrift" { + return receiverTransportV1Thrift + } + return receiverTransportV1JSON + } + if r.Header.Get("Content-Type") == "application/x-protobuf" { + return receiverTransportV2PROTO + } + return receiverTransportV2JSON +} diff --git a/internal/otel_collector/receiver/zipkinreceiver/trace_receiver_test.go b/internal/otel_collector/receiver/zipkinreceiver/trace_receiver_test.go new file mode 100644 index 00000000000..b466277f579 --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/trace_receiver_test.go @@ -0,0 +1,601 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinreceiver + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + zipkin2 "github.com/jaegertracing/jaeger/model/converter/thrift/zipkin" + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/zipkinexporter" + "go.opentelemetry.io/collector/testutil" + "go.opentelemetry.io/collector/translator/conventions" +) + +const zipkinReceiverName = "zipkin_receiver_test" + +func TestNew(t *testing.T) { + type args struct { + address string + nextConsumer consumer.TracesConsumer + } + tests := []struct { + name string + args args + wantErr error + }{ + { + name: "nil nextConsumer", + args: args{}, + wantErr: componenterror.ErrNilNextConsumer, + }, + { + name: "happy path", + args: args{ + nextConsumer: consumertest.NewTracesNop(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: zipkinReceiverName, + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: tt.args.address, + }, + } + got, err := New(cfg, tt.args.nextConsumer) + require.Equal(t, tt.wantErr, err) + if tt.wantErr == nil { + require.NotNil(t, got) + } else { + require.Nil(t, got) + } + }) + } +} + +func TestZipkinReceiverPortAlreadyInUse(t *testing.T) { + l, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "failed to open a port: %v", err) + defer l.Close() + _, portStr, err := net.SplitHostPort(l.Addr().String()) + require.NoError(t, err, "failed to split listener address: %v", err) + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: zipkinReceiverName, + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "localhost:" + portStr, + }, + } + traceReceiver, err := New(cfg, consumertest.NewTracesNop()) + require.NoError(t, err, "Failed to create receiver: %v", err) + err = traceReceiver.Start(context.Background(), componenttest.NewNopHost()) + require.Error(t, err) +} + +func TestConvertSpansToTraceSpans_json(t *testing.T) { + // Using Adrian Cole's sample at https://gist.github.com/adriancole/e8823c19dfed64e2eb71 + blob, err := ioutil.ReadFile("./testdata/sample1.json") + require.NoError(t, err, "Failed to read sample JSON file: %v", err) + zi := new(ZipkinReceiver) + zi.config = createDefaultConfig().(*Config) + reqs, err := zi.v2ToTraceSpans(blob, nil) + require.NoError(t, err, "Failed to parse convert Zipkin spans in JSON to Trace spans: %v", err) + + require.Equal(t, 1, reqs.ResourceSpans().Len(), "Expecting only one request since all spans share same node/localEndpoint: %v", reqs.ResourceSpans().Len()) + + req := reqs.ResourceSpans().At(0) + sn, _ := req.Resource().Attributes().Get(conventions.AttributeServiceName) + assert.Equal(t, "frontend", sn.StringVal()) + + // Expecting 9 non-nil spans + require.Equal(t, 9, reqs.SpanCount(), "Incorrect non-nil spans count") +} + +func TestConversionRoundtrip(t *testing.T) { + // The goal is to convert from: + // 1. Original Zipkin JSON as that's the format that Zipkin receivers will receive + // 2. Into TraceProtoSpans + // 3. Into SpanData + // 4. Back into Zipkin JSON (in this case the Zipkin exporter has been configured) + receiverInputJSON := []byte(` +[{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::80:807f" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0", + "status.code": "STATUS_CODE_UNSET" + } +}, +{ + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91386", + "id": "4d1e00c0db9010db", + "kind": "SERVER", + "name": "put", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::80:807f" + }, + "remoteEndpoint": { + "serviceName": "frontend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "clnt/finagle.version": "6.45.0", + "status.code": "STATUS_CODE_UNSET" + } +}]`) + + zi := &ZipkinReceiver{nextConsumer: consumertest.NewTracesNop()} + zi.config = &Config{} + ereqs, err := zi.v2ToTraceSpans(receiverInputJSON, nil) + require.NoError(t, err) + + require.Equal(t, 2, ereqs.SpanCount()) + + // Now the last phase is to transmit them over the wire and then compare the JSONs + + buf := new(bytes.Buffer) + // This will act as the final Zipkin server. + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.Copy(buf, r.Body) + _ = r.Body.Close() + })) + defer backend.Close() + + factory := zipkinexporter.NewFactory() + config := factory.CreateDefaultConfig().(*zipkinexporter.Config) + config.Endpoint = backend.URL + params := component.ExporterCreateParams{Logger: zap.NewNop()} + ze, err := factory.CreateTracesExporter(context.Background(), params, config) + require.NoError(t, err) + require.NotNil(t, ze) + require.NoError(t, ze.Start(context.Background(), componenttest.NewNopHost())) + + require.NoError(t, ze.ConsumeTraces(context.Background(), ereqs)) + + // Shutdown the exporter so it can flush any remaining data. + assert.NoError(t, ze.Shutdown(context.Background())) + backend.Close() + + // The received JSON messages are inside arrays, so reading then directly will + // fail with error. Use a small hack to transform the multiple arrays into a + // single one. + accumulatedJSONMsgs := strings.ReplaceAll(buf.String(), "][", ",") + gj := testutil.GenerateNormalizedJSON(t, accumulatedJSONMsgs) + wj := testutil.GenerateNormalizedJSON(t, string(receiverInputJSON)) + // translation to OTLP sorts spans so do a span-by-span comparison + gj = gj[1 : len(gj)-1] + wj = wj[1 : len(wj)-1] + gjSpans := strings.Split(gj, "{\"annotations\":") + wjSpans := strings.Split(wj, "{\"annotations\":") + assert.Equal(t, len(wjSpans), len(gjSpans)) + for _, wjspan := range wjSpans { + if len(wjspan) > 3 && wjspan[len(wjspan)-1:] == "," { + wjspan = wjspan[0 : len(wjspan)-1] + } + matchFound := false + for _, gjspan := range gjSpans { + if len(gjspan) > 3 && gjspan[len(gjspan)-1:] == "," { + gjspan = gjspan[0 : len(gjspan)-1] + } + if wjspan == gjspan { + matchFound = true + } + } + assert.True(t, matchFound, fmt.Sprintf("no match found for {\"annotations\":%s %v", wjspan, gjSpans)) + } +} + +func TestStartTraceReception(t *testing.T) { + tests := []struct { + name string + host component.Host + wantErr bool + }{ + { + name: "nil_host", + wantErr: true, + }, + { + name: "valid_host", + host: componenttest.NewNopHost(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := new(consumertest.TracesSink) + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: zipkinReceiverName, + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "localhost:0", + }, + } + zr, err := New(cfg, sink) + require.Nil(t, err) + require.NotNil(t, zr) + + err = zr.Start(context.Background(), tt.host) + assert.Equal(t, tt.wantErr, err != nil) + if !tt.wantErr { + require.Nil(t, zr.Shutdown(context.Background())) + } + }) + } +} + +func TestReceiverContentTypes(t *testing.T) { + tests := []struct { + endpoint string + content string + encoding string + bodyFn func() ([]byte, error) + }{ + { + endpoint: "/api/v1/spans", + content: "application/json", + encoding: "gzip", + bodyFn: func() ([]byte, error) { + return ioutil.ReadFile("../../translator/trace/zipkin/testdata/zipkin_v1_single_batch.json") + }, + }, + + { + endpoint: "/api/v1/spans", + content: "application/x-thrift", + encoding: "gzip", + bodyFn: func() ([]byte, error) { + return thriftExample(), nil + }, + }, + + { + endpoint: "/api/v2/spans", + content: "application/json", + encoding: "gzip", + bodyFn: func() ([]byte, error) { + return ioutil.ReadFile("../../translator/trace/zipkin/testdata/zipkin_v2_single.json") + }, + }, + + { + endpoint: "/api/v2/spans", + content: "application/json", + encoding: "zlib", + bodyFn: func() ([]byte, error) { + return ioutil.ReadFile("../../translator/trace/zipkin/testdata/zipkin_v2_single.json") + }, + }, + + { + endpoint: "/api/v2/spans", + content: "application/json", + encoding: "", + bodyFn: func() ([]byte, error) { + return ioutil.ReadFile("../../translator/trace/zipkin/testdata/zipkin_v2_single.json") + }, + }, + } + + for _, test := range tests { + name := fmt.Sprintf("%v %v %v", test.endpoint, test.content, test.encoding) + t.Run(name, func(t *testing.T) { + body, err := test.bodyFn() + require.NoError(t, err, "Failed to generate test body: %v", err) + + var requestBody *bytes.Buffer + switch test.encoding { + case "": + requestBody = bytes.NewBuffer(body) + case "zlib": + requestBody, err = compressZlib(body) + case "gzip": + requestBody, err = compressGzip(body) + } + require.NoError(t, err) + + r := httptest.NewRequest("POST", test.endpoint, requestBody) + r.Header.Add("content-type", test.content) + r.Header.Add("content-encoding", test.encoding) + + next := &zipkinMockTraceConsumer{ + ch: make(chan pdata.Traces, 10), + } + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: zipkinReceiverName, + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "", + }, + } + zr, err := New(cfg, next) + require.NoError(t, err) + + req := httptest.NewRecorder() + zr.ServeHTTP(req, r) + + select { + case td := <-next.ch: + require.NotNil(t, td) + require.Equal(t, 202, req.Code) + break + case <-time.After(time.Second * 2): + t.Error("next consumer did not receive the batch") + break + } + }) + } +} + +func TestReceiverInvalidContentType(t *testing.T) { + body := `{ invalid json ` + + r := httptest.NewRequest("POST", "/api/v2/spans", + bytes.NewBuffer([]byte(body))) + r.Header.Add("content-type", "application/json") + + next := &zipkinMockTraceConsumer{ + ch: make(chan pdata.Traces, 10), + } + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: zipkinReceiverName, + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "", + }, + } + zr, err := New(cfg, next) + require.NoError(t, err) + + req := httptest.NewRecorder() + zr.ServeHTTP(req, r) + + require.Equal(t, 400, req.Code) + require.Equal(t, "invalid character 'i' looking for beginning of object key string\n", req.Body.String()) +} + +func TestReceiverConsumerError(t *testing.T) { + body, err := ioutil.ReadFile("../../translator/trace/zipkin/testdata/zipkin_v2_single.json") + require.NoError(t, err) + + r := httptest.NewRequest("POST", "/api/v2/spans", bytes.NewBuffer(body)) + r.Header.Add("content-type", "application/json") + + next := &zipkinMockTraceConsumer{ + ch: make(chan pdata.Traces, 10), + err: errors.New("consumer error"), + } + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: zipkinReceiverName, + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "localhost:9411", + }, + } + zr, err := New(cfg, next) + require.NoError(t, err) + + req := httptest.NewRecorder() + zr.ServeHTTP(req, r) + + require.Equal(t, 500, req.Code) + require.Equal(t, "\"Internal Server Error\"", req.Body.String()) +} + +func thriftExample() []byte { + now := time.Now().Unix() + zSpans := []*zipkincore.Span{ + { + TraceID: 1, + Name: "test", + ID: 2, + BinaryAnnotations: []*zipkincore.BinaryAnnotation{ + { + Key: "http.path", + Value: []byte("/"), + }, + }, + Timestamp: &now, + }, + } + + return zipkin2.SerializeThrift(zSpans) +} + +func compressGzip(body []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + + _, err := zw.Write(body) + if err != nil { + return nil, err + } + + if err := zw.Close(); err != nil { + return nil, err + } + + return &buf, nil +} + +func compressZlib(body []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + zw := zlib.NewWriter(&buf) + + _, err := zw.Write(body) + if err != nil { + return nil, err + } + + if err := zw.Close(); err != nil { + return nil, err + } + + return &buf, nil +} + +type zipkinMockTraceConsumer struct { + ch chan pdata.Traces + err error +} + +func (m *zipkinMockTraceConsumer) ConsumeTraces(_ context.Context, td pdata.Traces) error { + m.ch <- td + return m.err +} + +func TestConvertSpansToTraceSpans_JSONWithoutSerivceName(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/sample2.json") + require.NoError(t, err, "Failed to read sample JSON file: %v", err) + zi := new(ZipkinReceiver) + zi.config = createDefaultConfig().(*Config) + reqs, err := zi.v2ToTraceSpans(blob, nil) + require.NoError(t, err, "Failed to parse convert Zipkin spans in JSON to Trace spans: %v", err) + + require.Equal(t, 1, reqs.ResourceSpans().Len(), "Expecting only one request since all spans share same node/localEndpoint: %v", reqs.ResourceSpans().Len()) + + // Expecting 1 non-nil spans + require.Equal(t, 1, reqs.SpanCount(), "Incorrect non-nil spans count") +} + +func TestReceiverConvertsStringsToTypes(t *testing.T) { + body, err := ioutil.ReadFile("../../translator/trace/zipkin/testdata/zipkin_v2_single.json") + require.NoError(t, err, "Failed to read sample JSON file: %v", err) + + r := httptest.NewRequest("POST", "/api/v2/spans", bytes.NewBuffer(body)) + r.Header.Add("content-type", "application/json") + + next := &zipkinMockTraceConsumer{ + ch: make(chan pdata.Traces, 10), + } + cfg := &Config{ + ReceiverSettings: configmodels.ReceiverSettings{ + NameVal: zipkinReceiverName, + }, + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "", + }, + ParseStringTags: true, + } + zr, err := New(cfg, next) + require.NoError(t, err) + + req := httptest.NewRecorder() + zr.ServeHTTP(req, r) + + select { + case td := <-next.ch: + require.NotNil(t, td) + require.Equal(t, 202, req.Code) + + span := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) + + expected := pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + "cache_hit": pdata.NewAttributeValueBool(true), + "ping_count": pdata.NewAttributeValueInt(25), + "timeout": pdata.NewAttributeValueDouble(12.3), + "clnt/finagle.version": pdata.NewAttributeValueString("6.45.0"), + "http.path": pdata.NewAttributeValueString("/api"), + "http.status_code": pdata.NewAttributeValueInt(500), + "net.host.ip": pdata.NewAttributeValueString("7::80:807f"), + "peer.service": pdata.NewAttributeValueString("backend"), + "net.peer.ip": pdata.NewAttributeValueString("192.168.99.101"), + "net.peer.port": pdata.NewAttributeValueInt(9000), + }).Sort() + + actual := span.Attributes().Sort() + + assert.EqualValues(t, expected, actual) + break + case <-time.After(time.Second * 2): + t.Error("next consumer did not receive the batch") + break + } +} diff --git a/internal/otel_collector/service/builder/builder.go b/internal/otel_collector/service/builder/builder.go new file mode 100644 index 00000000000..be65afd39fe --- /dev/null +++ b/internal/otel_collector/service/builder/builder.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "flag" + "fmt" +) + +const ( + // flags + configCfg = "config" + memBallastFlag = "mem-ballast-size-mib" + + kindLogKey = "component_kind" + kindLogsReceiver = "receiver" + kindLogsProcessor = "processor" + kindLogsExporter = "exporter" + kindLogExtension = "extension" + typeLogKey = "component_type" + nameLogKey = "component_name" +) + +var ( + configFile *string + memBallastSize *uint +) + +// Flags adds flags related to basic building of the collector application to the given flagset. +func Flags(flags *flag.FlagSet) { + configFile = flags.String(configCfg, "", "Path to the config file") + memBallastSize = flags.Uint(memBallastFlag, 0, + fmt.Sprintf("Flag to specify size of memory (MiB) ballast to set. Ballast is not used when this is not specified. "+ + "default settings: 0")) +} + +// GetConfigFile gets the config file from the config file flag. +func GetConfigFile() string { + return *configFile +} + +// MemBallastSize returns the size of memory ballast to use in MBs +func MemBallastSize() int { + return int(*memBallastSize) +} diff --git a/internal/otel_collector/service/builder/doc.go b/internal/otel_collector/service/builder/doc.go new file mode 100644 index 00000000000..c5bd4259703 --- /dev/null +++ b/internal/otel_collector/service/builder/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package builder handles the options to build the OpenTelemetry collector +// pipeline. +package builder diff --git a/internal/otel_collector/service/builder/exporters_builder.go b/internal/otel_collector/service/builder/exporters_builder.go new file mode 100644 index 00000000000..2fcd9174b65 --- /dev/null +++ b/internal/otel_collector/service/builder/exporters_builder.go @@ -0,0 +1,320 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" +) + +// builtExporter is an exporter that is built based on a config. It can have +// a trace and/or a metrics consumer and have a shutdown function. +type builtExporter struct { + logger *zap.Logger + expByDataType map[configmodels.DataType]component.Exporter +} + +// Start the exporter. +func (bexp *builtExporter) Start(ctx context.Context, host component.Host) error { + var errors []error + for _, exporter := range bexp.expByDataType { + err := exporter.Start(ctx, host) + if err != nil { + errors = append(errors, err) + } + } + + return componenterror.CombineErrors(errors) +} + +// Shutdown the trace component and the metrics component of an exporter. +func (bexp *builtExporter) Shutdown(ctx context.Context) error { + var errors []error + for _, exporter := range bexp.expByDataType { + err := exporter.Shutdown(ctx) + if err != nil { + errors = append(errors, err) + } + } + + return componenterror.CombineErrors(errors) +} + +func (bexp *builtExporter) getTraceExporter() component.TracesExporter { + exp := bexp.expByDataType[configmodels.TracesDataType] + if exp == nil { + return nil + } + return exp.(component.TracesExporter) +} + +func (bexp *builtExporter) getMetricExporter() component.MetricsExporter { + exp := bexp.expByDataType[configmodels.MetricsDataType] + if exp == nil { + return nil + } + return exp.(component.MetricsExporter) +} + +func (bexp *builtExporter) getLogExporter() component.LogsExporter { + exp := bexp.expByDataType[configmodels.LogsDataType] + if exp == nil { + return nil + } + return exp.(component.LogsExporter) +} + +// Exporters is a map of exporters created from exporter configs. +type Exporters map[configmodels.Exporter]*builtExporter + +// StartAll starts all exporters. +func (exps Exporters) StartAll(ctx context.Context, host component.Host) error { + for _, exp := range exps { + exp.logger.Info("Exporter is starting...") + + if err := exp.Start(ctx, host); err != nil { + return err + } + exp.logger.Info("Exporter started.") + } + return nil +} + +// ShutdownAll stops all exporters. +func (exps Exporters) ShutdownAll(ctx context.Context) error { + var errs []error + for _, exp := range exps { + err := exp.Shutdown(ctx) + if err != nil { + errs = append(errs, err) + } + } + + return componenterror.CombineErrors(errs) +} + +func (exps Exporters) ToMapByDataType() map[configmodels.DataType]map[configmodels.Exporter]component.Exporter { + + exportersMap := make(map[configmodels.DataType]map[configmodels.Exporter]component.Exporter) + + exportersMap[configmodels.TracesDataType] = make(map[configmodels.Exporter]component.Exporter, len(exps)) + exportersMap[configmodels.MetricsDataType] = make(map[configmodels.Exporter]component.Exporter, len(exps)) + exportersMap[configmodels.LogsDataType] = make(map[configmodels.Exporter]component.Exporter, len(exps)) + + for cfg, bexp := range exps { + for t, exp := range bexp.expByDataType { + exportersMap[t][cfg] = exp + } + } + + return exportersMap +} + +type dataTypeRequirement struct { + // Pipeline that requires the data type. + requiredBy *configmodels.Pipeline +} + +// Map of data type requirements. +type dataTypeRequirements map[configmodels.DataType]dataTypeRequirement + +// Data type requirements for all exporters. +type exportersRequiredDataTypes map[configmodels.Exporter]dataTypeRequirements + +// ExportersBuilder builds exporters from config. +type ExportersBuilder struct { + logger *zap.Logger + appInfo component.ApplicationStartInfo + config *configmodels.Config + factories map[configmodels.Type]component.ExporterFactory +} + +// NewExportersBuilder creates a new ExportersBuilder. Call BuildExporters() on the returned value. +func NewExportersBuilder( + logger *zap.Logger, + appInfo component.ApplicationStartInfo, + config *configmodels.Config, + factories map[configmodels.Type]component.ExporterFactory, +) *ExportersBuilder { + return &ExportersBuilder{logger.With(zap.String(kindLogKey, kindLogsExporter)), appInfo, config, factories} +} + +// BuildExporters exporters from config. +func (eb *ExportersBuilder) Build() (Exporters, error) { + exporters := make(Exporters) + + // We need to calculate required input data types for each exporter so that we know + // which data type must be started for each exporter. + exporterInputDataTypes := eb.calcExportersRequiredDataTypes() + + // BuildExporters exporters based on configuration and required input data types. + for _, cfg := range eb.config.Exporters { + componentLogger := eb.logger.With(zap.String(typeLogKey, string(cfg.Type())), zap.String(nameLogKey, cfg.Name())) + exp, err := eb.buildExporter(context.Background(), componentLogger, eb.appInfo, cfg, exporterInputDataTypes) + if err != nil { + return nil, err + } + + exporters[cfg] = exp + } + + return exporters, nil +} + +func (eb *ExportersBuilder) calcExportersRequiredDataTypes() exportersRequiredDataTypes { + + // Go over all pipelines. The data type of the pipeline defines what data type + // each exporter is expected to receive. Collect all required types for each + // exporter. + // + // We also remember the last pipeline that requested the particular data type. + // This is only needed for logging purposes in error cases when we need to + // print that a particular exporter does not support the data type required for + // a particular pipeline. + + result := make(exportersRequiredDataTypes) + + // Iterate over pipelines. + for _, pipeline := range eb.config.Service.Pipelines { + // Iterate over all exporters for this pipeline. + for _, expName := range pipeline.Exporters { + // Find the exporter config by name. + exporter := eb.config.Exporters[expName] + + // Create the data type requirement for the exporter if it does not exist. + if result[exporter] == nil { + result[exporter] = make(dataTypeRequirements) + } + + // Remember that this data type is required for the exporter and also which + // pipeline the requirement is coming from. + result[exporter][pipeline.InputType] = dataTypeRequirement{pipeline} + } + } + return result +} + +func (eb *ExportersBuilder) buildExporter( + ctx context.Context, + logger *zap.Logger, + appInfo component.ApplicationStartInfo, + config configmodels.Exporter, + exportersInputDataTypes exportersRequiredDataTypes, +) (*builtExporter, error) { + factory := eb.factories[config.Type()] + if factory == nil { + return nil, fmt.Errorf("exporter factory not found for type: %s", config.Type()) + } + + exporter := &builtExporter{ + logger: logger, + expByDataType: make(map[configmodels.DataType]component.Exporter, 3), + } + + inputDataTypes := exportersInputDataTypes[config] + if inputDataTypes == nil { + eb.logger.Info("Ignoring exporter as it is not used by any pipeline") + return exporter, nil + } + + creationParams := component.ExporterCreateParams{ + Logger: logger, + ApplicationStartInfo: appInfo, + } + + for dataType, requirement := range inputDataTypes { + switch dataType { + case configmodels.TracesDataType: + // Traces data type is required. Create a trace exporter based on config. + te, err := factory.CreateTracesExporter(ctx, creationParams, config) + if err != nil { + if err == configerror.ErrDataTypeIsNotSupported { + // Could not create because this exporter does not support this data type. + return nil, exporterTypeMismatchErr(config, requirement.requiredBy, dataType) + } + return nil, fmt.Errorf("error creating %s exporter: %v", config.Name(), err) + } + + // Check if the factory really created the exporter. + if te == nil { + return nil, fmt.Errorf("factory for %q produced a nil exporter", config.Name()) + } + + exporter.expByDataType[configmodels.TracesDataType] = te + + case configmodels.MetricsDataType: + // Metrics data type is required. Create a trace exporter based on config. + me, err := factory.CreateMetricsExporter(ctx, creationParams, config) + if err != nil { + if err == configerror.ErrDataTypeIsNotSupported { + // Could not create because this exporter does not support this data type. + return nil, exporterTypeMismatchErr(config, requirement.requiredBy, dataType) + } + return nil, fmt.Errorf("error creating %s exporter: %v", config.Name(), err) + } + + // The factories can be implemented by third parties, check if they really + // created the exporter. + if me == nil { + return nil, fmt.Errorf("factory for %q produced a nil exporter", config.Name()) + } + + exporter.expByDataType[configmodels.MetricsDataType] = me + + case configmodels.LogsDataType: + le, err := factory.CreateLogsExporter(ctx, creationParams, config) + if err != nil { + if err == configerror.ErrDataTypeIsNotSupported { + // Could not create because this exporter does not support this data type. + return nil, exporterTypeMismatchErr(config, requirement.requiredBy, dataType) + } + return nil, fmt.Errorf("error creating %s exporter: %v", config.Name(), err) + } + + // Check if the factory really created the exporter. + if le == nil { + return nil, fmt.Errorf("factory for %q produced a nil exporter", config.Name()) + } + + exporter.expByDataType[configmodels.LogsDataType] = le + + default: + // Could not create because this exporter does not support this data type. + return nil, exporterTypeMismatchErr(config, requirement.requiredBy, dataType) + } + } + + eb.logger.Info("Exporter is enabled.", zap.String("exporter", config.Name())) + + return exporter, nil +} + +func exporterTypeMismatchErr( + config configmodels.Exporter, + requiredByPipeline *configmodels.Pipeline, + dataType configmodels.DataType, +) error { + return fmt.Errorf("pipeline %q of data type %q has an exporter %q, which does not support that data type", + requiredByPipeline.Name, dataType, + config.Name(), + ) +} diff --git a/internal/otel_collector/service/builder/exporters_builder_test.go b/internal/otel_collector/service/builder/exporters_builder_test.go new file mode 100644 index 00000000000..68f92e2647f --- /dev/null +++ b/internal/otel_collector/service/builder/exporters_builder_test.go @@ -0,0 +1,274 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/opencensusexporter" +) + +func TestExportersBuilder_Build(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + oceFactory := opencensusexporter.NewFactory() + factories.Exporters[oceFactory.Type()] = oceFactory + cfg := &configmodels.Config{ + Exporters: map[string]configmodels.Exporter{ + "opencensus": &opencensusexporter.Config{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "opencensus", + TypeVal: "opencensus", + }, + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Endpoint: "0.0.0.0:12345", + }, + NumWorkers: 2, + }, + }, + + Service: configmodels.Service{ + Pipelines: map[string]*configmodels.Pipeline{ + "trace": { + Name: "trace", + InputType: configmodels.TracesDataType, + Exporters: []string{"opencensus"}, + }, + }, + }, + } + + exporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + + assert.NoError(t, err) + require.NotNil(t, exporters) + + e1 := exporters[cfg.Exporters["opencensus"]] + + // Ensure exporter has its fields correctly populated. + require.NotNil(t, e1) + assert.NotNil(t, e1.getTraceExporter()) + assert.Nil(t, e1.getMetricExporter()) + assert.Nil(t, e1.getLogExporter()) + + // Ensure it can be started. + assert.NoError(t, exporters.StartAll(context.Background(), componenttest.NewNopHost())) + + // Ensure it can be stopped. + if err = e1.Shutdown(context.Background()); err != nil { + // TODO Find a better way to handle this case + // Since the endpoint of opencensus exporter doesn't actually exist, e1 may + // already stop because it cannot connect. + // The test should stop running if this isn't the error cause. + require.Equal(t, err.Error(), "rpc error: code = Canceled desc = grpc: the client connection is closing") + } + + // Remove the pipeline so that the exporter is not attached to any pipeline. + // This should result in creating an exporter that has none of consumption + // functions set. + delete(cfg.Service.Pipelines, "trace") + exporters, err = NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + assert.NotNil(t, exporters) + assert.NoError(t, err) + + e1 = exporters[cfg.Exporters["opencensus"]] + + // Ensure exporter has its fields correctly populated, ie Trace Exporter and + // Metrics Exporter are nil. + require.NotNil(t, e1) + assert.Nil(t, e1.getTraceExporter()) + assert.Nil(t, e1.getMetricExporter()) + assert.Nil(t, e1.getLogExporter()) + + // TODO: once we have an exporter that supports metrics data type test it too. +} + +func TestExportersBuilder_BuildLogs(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.Nil(t, err) + + cfg := &configmodels.Config{ + Exporters: map[string]configmodels.Exporter{ + "exampleexporter": &componenttest.ExampleExporter{ + ExporterSettings: configmodels.ExporterSettings{ + NameVal: "exampleexporter", + TypeVal: "exampleexporter", + }, + }, + }, + + Service: configmodels.Service{ + Pipelines: map[string]*configmodels.Pipeline{ + "logs": { + Name: "logs", + InputType: "logs", + Exporters: []string{"exampleexporter"}, + }, + }, + }, + } + + exporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + + assert.NoError(t, err) + require.NotNil(t, exporters) + + e1 := exporters[cfg.Exporters["exampleexporter"]] + + // Ensure exporter has its fields correctly populated. + require.NotNil(t, e1) + assert.NotNil(t, e1.getLogExporter()) + assert.Nil(t, e1.getTraceExporter()) + assert.Nil(t, e1.getMetricExporter()) + + // Ensure it can be started. + err = exporters.StartAll(context.Background(), componenttest.NewNopHost()) + assert.NoError(t, err) + + // Ensure it can be stopped. + err = e1.Shutdown(context.Background()) + assert.NoError(t, err) + + // Remove the pipeline so that the exporter is not attached to any pipeline. + // This should result in creating an exporter that has none of consumption + // functions set. + delete(cfg.Service.Pipelines, "logs") + exporters, err = NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + assert.NotNil(t, exporters) + assert.Nil(t, err) + + e1 = exporters[cfg.Exporters["exampleexporter"]] + + // Ensure exporter has its fields correctly populated, ie Trace Exporter and + // Metrics Exporter are nil. + require.NotNil(t, e1) + assert.Nil(t, e1.getTraceExporter()) + assert.Nil(t, e1.getMetricExporter()) + assert.Nil(t, e1.getLogExporter()) +} + +func TestExportersBuilder_StartAll(t *testing.T) { + exporters := make(Exporters) + expCfg := &configmodels.ExporterSettings{} + traceExporter := &componenttest.ExampleExporterConsumer{} + metricExporter := &componenttest.ExampleExporterConsumer{} + logsExporter := &componenttest.ExampleExporterConsumer{} + exporters[expCfg] = &builtExporter{ + logger: zap.NewNop(), + expByDataType: map[configmodels.DataType]component.Exporter{ + configmodels.TracesDataType: traceExporter, + configmodels.MetricsDataType: metricExporter, + configmodels.LogsDataType: logsExporter, + }, + } + assert.False(t, traceExporter.ExporterStarted) + assert.False(t, metricExporter.ExporterStarted) + assert.False(t, logsExporter.ExporterStarted) + + assert.NoError(t, exporters.StartAll(context.Background(), componenttest.NewNopHost())) + + assert.True(t, traceExporter.ExporterStarted) + assert.True(t, metricExporter.ExporterStarted) + assert.True(t, logsExporter.ExporterStarted) +} + +func TestExportersBuilder_StopAll(t *testing.T) { + exporters := make(Exporters) + expCfg := &configmodels.ExporterSettings{} + traceExporter := &componenttest.ExampleExporterConsumer{} + metricExporter := &componenttest.ExampleExporterConsumer{} + logsExporter := &componenttest.ExampleExporterConsumer{} + exporters[expCfg] = &builtExporter{ + logger: zap.NewNop(), + expByDataType: map[configmodels.DataType]component.Exporter{ + configmodels.TracesDataType: traceExporter, + configmodels.MetricsDataType: metricExporter, + configmodels.LogsDataType: logsExporter, + }, + } + assert.False(t, traceExporter.ExporterShutdown) + assert.False(t, metricExporter.ExporterShutdown) + assert.False(t, logsExporter.ExporterShutdown) + assert.NoError(t, exporters.ShutdownAll(context.Background())) + + assert.True(t, traceExporter.ExporterShutdown) + assert.True(t, metricExporter.ExporterShutdown) + assert.True(t, logsExporter.ExporterShutdown) +} + +func TestExportersBuilder_ErrorOnNilExporter(t *testing.T) { + bf := newBadExporterFactory() + fm := map[configmodels.Type]component.ExporterFactory{ + bf.Type(): bf, + } + + pipelines := []*configmodels.Pipeline{ + { + Name: "trace", + InputType: configmodels.TracesDataType, + Exporters: []string{string(bf.Type())}, + }, + { + Name: "metrics", + InputType: configmodels.MetricsDataType, + Exporters: []string{string(bf.Type())}, + }, + { + Name: "logs", + InputType: configmodels.LogsDataType, + Exporters: []string{string(bf.Type())}, + }, + } + + for _, pipeline := range pipelines { + t.Run(pipeline.Name, func(t *testing.T) { + + cfg := &configmodels.Config{ + Exporters: map[string]configmodels.Exporter{ + string(bf.Type()): &configmodels.ExporterSettings{ + TypeVal: bf.Type(), + }, + }, + + Service: configmodels.Service{ + Pipelines: map[string]*configmodels.Pipeline{ + pipeline.Name: pipeline, + }, + }, + } + + exporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, fm).Build() + assert.Error(t, err) + assert.Zero(t, len(exporters)) + }) + } +} + +func newBadExporterFactory() component.ExporterFactory { + return exporterhelper.NewFactory("bf", func() configmodels.Exporter { + return &configmodels.ExporterSettings{} + }) +} diff --git a/internal/otel_collector/service/builder/extensions_builder.go b/internal/otel_collector/service/builder/extensions_builder.go new file mode 100644 index 00000000000..8c10d9fb633 --- /dev/null +++ b/internal/otel_collector/service/builder/extensions_builder.go @@ -0,0 +1,181 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configmodels" +) + +// builtExporter is an exporter that is built based on a config. It can have +// a trace and/or a metrics consumer and have a shutdown function. +type builtExtension struct { + logger *zap.Logger + extension component.ServiceExtension +} + +// Start the receiver. +func (ext *builtExtension) Start(ctx context.Context, host component.Host) error { + return ext.extension.Start(ctx, host) +} + +// Stop the receiver. +func (ext *builtExtension) Shutdown(ctx context.Context) error { + return ext.extension.Shutdown(ctx) +} + +var _ component.ServiceExtension = (*builtExtension)(nil) + +// Exporters is a map of exporters created from exporter configs. +type Extensions map[configmodels.Extension]*builtExtension + +// StartAll starts all exporters. +func (exts Extensions) StartAll(ctx context.Context, host component.Host) error { + for _, ext := range exts { + ext.logger.Info("Extension is starting...") + + if err := ext.Start(ctx, host); err != nil { + return err + } + + ext.logger.Info("Extension started.") + } + return nil +} + +// ShutdownAll stops all exporters. +func (exts Extensions) ShutdownAll(ctx context.Context) error { + var errs []error + for _, ext := range exts { + err := ext.Shutdown(ctx) + if err != nil { + errs = append(errs, err) + } + } + + return componenterror.CombineErrors(errs) +} + +func (exts Extensions) NotifyPipelineReady() error { + for _, ext := range exts { + if pw, ok := ext.extension.(component.PipelineWatcher); ok { + if err := pw.Ready(); err != nil { + ext.logger.Error("Error notifying extension that the pipeline was started.") + return err + } + } + } + + return nil +} + +func (exts Extensions) NotifyPipelineNotReady() error { + // Notify extensions in reverse order. + var errs []error + for _, ext := range exts { + if pw, ok := ext.extension.(component.PipelineWatcher); ok { + if err := pw.NotReady(); err != nil { + ext.logger.Error("Error notifying extension that the pipeline was shutdown.") + errs = append(errs, err) + } + } + } + + return componenterror.CombineErrors(errs) +} + +func (exts Extensions) ToMap() map[configmodels.Extension]component.ServiceExtension { + result := make(map[configmodels.Extension]component.ServiceExtension, len(exts)) + for k, v := range exts { + result[k] = v.extension + } + return result +} + +// ExportersBuilder builds exporters from config. +type ExtensionsBuilder struct { + logger *zap.Logger + appInfo component.ApplicationStartInfo + config *configmodels.Config + factories map[configmodels.Type]component.ExtensionFactory +} + +// NewExportersBuilder creates a new ExportersBuilder. Call BuildExporters() on the returned value. +func NewExtensionsBuilder( + logger *zap.Logger, + appInfo component.ApplicationStartInfo, + config *configmodels.Config, + factories map[configmodels.Type]component.ExtensionFactory, +) *ExtensionsBuilder { + return &ExtensionsBuilder{logger.With(zap.String(kindLogKey, kindLogExtension)), appInfo, config, factories} +} + +// Build extensions from config. +func (eb *ExtensionsBuilder) Build() (Extensions, error) { + extensions := make(Extensions) + + for _, extName := range eb.config.Service.Extensions { + extCfg, exists := eb.config.Extensions[extName] + if !exists { + return nil, fmt.Errorf("extension %q is not configured", extName) + } + + componentLogger := eb.logger.With(zap.String(typeLogKey, string(extCfg.Type())), zap.String(nameLogKey, extCfg.Name())) + ext, err := eb.buildExtension(componentLogger, eb.appInfo, extCfg) + if err != nil { + return nil, err + } + + extensions[extCfg] = ext + } + + return extensions, nil +} + +func (eb *ExtensionsBuilder) buildExtension(logger *zap.Logger, appInfo component.ApplicationStartInfo, cfg configmodels.Extension) (*builtExtension, error) { + factory := eb.factories[cfg.Type()] + if factory == nil { + return nil, fmt.Errorf("extension factory for type %q is not configured", cfg.Type()) + } + + ext := &builtExtension{ + logger: logger, + } + + creationParams := component.ExtensionCreateParams{ + Logger: logger, + ApplicationStartInfo: appInfo, + } + + ex, err := factory.CreateExtension(context.Background(), creationParams, cfg) + if err != nil { + return nil, fmt.Errorf("failed to create extension %q: %w", cfg.Name(), err) + } + + // Check if the factory really created the extension. + if ex == nil { + return nil, fmt.Errorf("factory for %q produced a nil extension", cfg.Name()) + } + + ext.extension = ex + + return ext, nil +} diff --git a/internal/otel_collector/service/builder/pipelines_builder.go b/internal/otel_collector/service/builder/pipelines_builder.go new file mode 100644 index 00000000000..bf13b97563c --- /dev/null +++ b/internal/otel_collector/service/builder/pipelines_builder.go @@ -0,0 +1,266 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" +) + +// builtPipeline is a pipeline that is built based on a config. +// It can have a trace and/or a metrics consumer (the consumer is either the first +// processor in the pipeline or the exporter if pipeline has no processors). +type builtPipeline struct { + logger *zap.Logger + firstTC consumer.TracesConsumer + firstMC consumer.MetricsConsumer + firstLC consumer.LogsConsumer + + // MutatesConsumedData is set to true if any processors in the pipeline + // can mutate the TraceData or MetricsData input argument. + MutatesConsumedData bool + + processors []component.Processor +} + +// BuiltPipelines is a map of build pipelines created from pipeline configs. +type BuiltPipelines map[*configmodels.Pipeline]*builtPipeline + +func (bps BuiltPipelines) StartProcessors(ctx context.Context, host component.Host) error { + for _, bp := range bps { + bp.logger.Info("Pipeline is starting...") + // Start in reverse order, starting from the back of processors pipeline. + // This is important so that processors that are earlier in the pipeline and + // reference processors that are later in the pipeline do not start sending + // data to later pipelines which are not yet started. + for i := len(bp.processors) - 1; i >= 0; i-- { + if err := bp.processors[i].Start(ctx, host); err != nil { + return err + } + } + bp.logger.Info("Pipeline is started.") + } + return nil +} + +func (bps BuiltPipelines) ShutdownProcessors(ctx context.Context) error { + var errs []error + for _, bp := range bps { + bp.logger.Info("Pipeline is shutting down...") + for _, p := range bp.processors { + if err := p.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + bp.logger.Info("Pipeline is shutdown.") + } + + return componenterror.CombineErrors(errs) +} + +// PipelinesBuilder builds pipelines from config. +type PipelinesBuilder struct { + logger *zap.Logger + appInfo component.ApplicationStartInfo + config *configmodels.Config + exporters Exporters + factories map[configmodels.Type]component.ProcessorFactory +} + +// NewPipelinesBuilder creates a new PipelinesBuilder. Requires exporters to be already +// built via ExportersBuilder. Call BuildProcessors() on the returned value. +func NewPipelinesBuilder( + logger *zap.Logger, + appInfo component.ApplicationStartInfo, + config *configmodels.Config, + exporters Exporters, + factories map[configmodels.Type]component.ProcessorFactory, +) *PipelinesBuilder { + return &PipelinesBuilder{logger, appInfo, config, exporters, factories} +} + +// BuildProcessors pipeline processors from config. +func (pb *PipelinesBuilder) Build() (BuiltPipelines, error) { + pipelineProcessors := make(BuiltPipelines) + + for _, pipeline := range pb.config.Service.Pipelines { + firstProcessor, err := pb.buildPipeline(context.Background(), pipeline) + if err != nil { + return nil, err + } + pipelineProcessors[pipeline] = firstProcessor + } + + return pipelineProcessors, nil +} + +// Builds a pipeline of processors. Returns the first processor in the pipeline. +// The last processor in the pipeline will be plugged to fan out the data into exporters +// that are configured for this pipeline. +func (pb *PipelinesBuilder) buildPipeline(ctx context.Context, pipelineCfg *configmodels.Pipeline) (*builtPipeline, error) { + + // BuildProcessors the pipeline backwards. + + // First create a consumer junction point that fans out the data to all exporters. + var tc consumer.TracesConsumer + var mc consumer.MetricsConsumer + var lc consumer.LogsConsumer + + switch pipelineCfg.InputType { + case configmodels.TracesDataType: + tc = pb.buildFanoutExportersTraceConsumer(pipelineCfg.Exporters) + case configmodels.MetricsDataType: + mc = pb.buildFanoutExportersMetricsConsumer(pipelineCfg.Exporters) + case configmodels.LogsDataType: + lc = pb.buildFanoutExportersLogConsumer(pipelineCfg.Exporters) + } + + mutatesConsumedData := false + + processors := make([]component.Processor, len(pipelineCfg.Processors)) + + // Now build the processors backwards, starting from the last one. + // The last processor points to consumer which fans out to exporters, then + // the processor itself becomes a consumer for the one that precedes it in + // in the pipeline and so on. + for i := len(pipelineCfg.Processors) - 1; i >= 0; i-- { + procName := pipelineCfg.Processors[i] + procCfg := pb.config.Processors[procName] + + factory := pb.factories[procCfg.Type()] + + // This processor must point to the next consumer and then + // it becomes the next for the previous one (previous in the pipeline, + // which we will build in the next loop iteration). + var err error + componentLogger := pb.logger.With(zap.String(kindLogKey, kindLogsProcessor), zap.String(typeLogKey, string(procCfg.Type())), zap.String(nameLogKey, procCfg.Name())) + creationParams := component.ProcessorCreateParams{ + Logger: componentLogger, + ApplicationStartInfo: pb.appInfo, + } + + switch pipelineCfg.InputType { + case configmodels.TracesDataType: + var proc component.TracesProcessor + proc, err = factory.CreateTracesProcessor(ctx, creationParams, procCfg, tc) + if proc != nil { + mutatesConsumedData = mutatesConsumedData || proc.GetCapabilities().MutatesConsumedData + } + processors[i] = proc + tc = proc + case configmodels.MetricsDataType: + var proc component.MetricsProcessor + proc, err = factory.CreateMetricsProcessor(ctx, creationParams, procCfg, mc) + if proc != nil { + mutatesConsumedData = mutatesConsumedData || proc.GetCapabilities().MutatesConsumedData + } + processors[i] = proc + mc = proc + + case configmodels.LogsDataType: + var proc component.LogsProcessor + proc, err = factory.CreateLogsProcessor(ctx, creationParams, procCfg, lc) + if proc != nil { + mutatesConsumedData = mutatesConsumedData || proc.GetCapabilities().MutatesConsumedData + } + processors[i] = proc + lc = proc + + default: + return nil, fmt.Errorf("error creating processor %q in pipeline %q, data type %s is not supported", + procName, pipelineCfg.Name, pipelineCfg.InputType) + } + + if err != nil { + return nil, fmt.Errorf("error creating processor %q in pipeline %q: %v", + procName, pipelineCfg.Name, err) + } + + // Check if the factory really created the processor. + if tc == nil && mc == nil && lc == nil { + return nil, fmt.Errorf("factory for %q produced a nil processor", procCfg.Name()) + } + } + + pipelineLogger := pb.logger.With(zap.String("pipeline_name", pipelineCfg.Name), + zap.String("pipeline_datatype", string(pipelineCfg.InputType))) + pipelineLogger.Info("Pipeline is enabled.") + + bp := &builtPipeline{ + pipelineLogger, + tc, + mc, + lc, + mutatesConsumedData, + processors, + } + + return bp, nil +} + +// Converts the list of exporter names to a list of corresponding builtExporters. +func (pb *PipelinesBuilder) getBuiltExportersByNames(exporterNames []string) []*builtExporter { + var result []*builtExporter + for _, name := range exporterNames { + exporter := pb.exporters[pb.config.Exporters[name]] + result = append(result, exporter) + } + + return result +} + +func (pb *PipelinesBuilder) buildFanoutExportersTraceConsumer(exporterNames []string) consumer.TracesConsumer { + builtExporters := pb.getBuiltExportersByNames(exporterNames) + + var exporters []consumer.TracesConsumer + for _, builtExp := range builtExporters { + exporters = append(exporters, builtExp.getTraceExporter()) + } + + // Create a junction point that fans out to all exporters. + return processor.NewTracesFanOutConnector(exporters) +} + +func (pb *PipelinesBuilder) buildFanoutExportersMetricsConsumer(exporterNames []string) consumer.MetricsConsumer { + builtExporters := pb.getBuiltExportersByNames(exporterNames) + + var exporters []consumer.MetricsConsumer + for _, builtExp := range builtExporters { + exporters = append(exporters, builtExp.getMetricExporter()) + } + + // Create a junction point that fans out to all exporters. + return processor.NewMetricsFanOutConnector(exporters) +} + +func (pb *PipelinesBuilder) buildFanoutExportersLogConsumer(exporterNames []string) consumer.LogsConsumer { + builtExporters := pb.getBuiltExportersByNames(exporterNames) + + exporters := make([]consumer.LogsConsumer, len(builtExporters)) + for i, builtExp := range builtExporters { + exporters[i] = builtExp.getLogExporter() + } + + // Create a junction point that fans out to all exporters. + return processor.NewLogsFanOutConnector(exporters) +} diff --git a/internal/otel_collector/service/builder/pipelines_builder_test.go b/internal/otel_collector/service/builder/pipelines_builder_test.go new file mode 100644 index 00000000000..c8adfb2da3a --- /dev/null +++ b/internal/otel_collector/service/builder/pipelines_builder_test.go @@ -0,0 +1,309 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +func TestPipelinesBuilder_Build(t *testing.T) { + tests := []struct { + name string + pipelineName string + exporterNames []string + }{ + { + name: "one-exporter", + pipelineName: "traces", + exporterNames: []string{"exampleexporter"}, + }, + { + name: "multi-exporter", + pipelineName: "traces/2", + exporterNames: []string{"exampleexporter", "exampleexporter/2"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testPipeline(t, test.pipelineName, test.exporterNames) + }) + } +} + +func createExampleFactories() component.Factories { + exampleReceiverFactory := &componenttest.ExampleReceiverFactory{} + exampleProcessorFactory := &componenttest.ExampleProcessorFactory{} + exampleExporterFactory := &componenttest.ExampleExporterFactory{} + + factories := component.Factories{ + Receivers: map[configmodels.Type]component.ReceiverFactory{ + exampleReceiverFactory.Type(): exampleReceiverFactory, + }, + Processors: map[configmodels.Type]component.ProcessorFactory{ + exampleProcessorFactory.Type(): exampleProcessorFactory, + }, + Exporters: map[configmodels.Type]component.ExporterFactory{ + exampleExporterFactory.Type(): exampleExporterFactory, + }, + } + + return factories +} + +func createExampleConfig(dataType string) *configmodels.Config { + + exampleReceiverFactory := &componenttest.ExampleReceiverFactory{} + exampleProcessorFactory := &componenttest.ExampleProcessorFactory{} + exampleExporterFactory := &componenttest.ExampleExporterFactory{} + + cfg := &configmodels.Config{ + Receivers: map[string]configmodels.Receiver{ + string(exampleReceiverFactory.Type()): exampleReceiverFactory.CreateDefaultConfig(), + }, + Processors: map[string]configmodels.Processor{ + string(exampleProcessorFactory.Type()): exampleProcessorFactory.CreateDefaultConfig(), + }, + Exporters: map[string]configmodels.Exporter{ + string(exampleExporterFactory.Type()): exampleExporterFactory.CreateDefaultConfig(), + }, + Service: configmodels.Service{ + Pipelines: map[string]*configmodels.Pipeline{ + dataType: { + Name: dataType, + InputType: configmodels.DataType(dataType), + Receivers: []string{string(exampleReceiverFactory.Type())}, + Processors: []string{string(exampleProcessorFactory.Type())}, + Exporters: []string{string(exampleExporterFactory.Type())}, + }, + }, + }, + } + return cfg +} + +func TestPipelinesBuilder_BuildVarious(t *testing.T) { + + factories := createExampleFactories() + + tests := []struct { + dataType string + shouldFail bool + }{ + { + dataType: "logs", + shouldFail: false, + }, + { + dataType: "nosuchdatatype", + shouldFail: true, + }, + } + + for _, test := range tests { + t.Run(test.dataType, func(t *testing.T) { + dataType := test.dataType + + cfg := createExampleConfig(dataType) + + // BuildProcessors the pipeline + allExporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + if test.shouldFail { + assert.Error(t, err) + return + } + + require.NoError(t, err) + require.EqualValues(t, 1, len(allExporters)) + pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + + assert.NoError(t, err) + require.NotNil(t, pipelineProcessors) + + err = pipelineProcessors.StartProcessors(context.Background(), componenttest.NewNopHost()) + assert.NoError(t, err) + + pipelineName := dataType + processor := pipelineProcessors[cfg.Service.Pipelines[pipelineName]] + + // Ensure pipeline has its fields correctly populated. + require.NotNil(t, processor) + assert.Nil(t, processor.firstTC) + assert.Nil(t, processor.firstMC) + assert.NotNil(t, processor.firstLC) + + // Compose the list of created exporters. + exporterNames := []string{"exampleexporter"} + var exporters []*builtExporter + for _, name := range exporterNames { + // Ensure exporter is created. + exp := allExporters[cfg.Exporters[name]] + require.NotNil(t, exp) + exporters = append(exporters, exp) + } + + // Send Logs via processor and verify that all exporters of the pipeline receive it. + + // First check that there are no logs in the exporters yet. + var exporterConsumers []*componenttest.ExampleExporterConsumer + for _, exporter := range exporters { + consumer := exporter.getLogExporter().(*componenttest.ExampleExporterConsumer) + exporterConsumers = append(exporterConsumers, consumer) + require.Equal(t, len(consumer.Logs), 0) + } + + // Send one custom data. + log := pdata.Logs{} + processor.firstLC.(consumer.LogsConsumer).ConsumeLogs(context.Background(), log) + + // Now verify received data. + for _, consumer := range exporterConsumers { + // Check that the trace is received by exporter. + require.Equal(t, 1, len(consumer.Logs)) + + // Verify that span is successfully delivered. + assert.EqualValues(t, log, consumer.Logs[0]) + } + + err = pipelineProcessors.ShutdownProcessors(context.Background()) + assert.NoError(t, err) + }) + } +} + +func testPipeline(t *testing.T, pipelineName string, exporterNames []string) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + cfg, err := configtest.LoadConfigFile(t, "testdata/pipelines_builder.yaml", factories) + // Load the config + require.Nil(t, err) + + // BuildProcessors the pipeline + allExporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + assert.NoError(t, err) + pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + + assert.NoError(t, err) + require.NotNil(t, pipelineProcessors) + + assert.NoError(t, pipelineProcessors.StartProcessors(context.Background(), componenttest.NewNopHost())) + + processor := pipelineProcessors[cfg.Service.Pipelines[pipelineName]] + + // Ensure pipeline has its fields correctly populated. + require.NotNil(t, processor) + assert.NotNil(t, processor.firstTC) + assert.Nil(t, processor.firstMC) + + // Compose the list of created exporters. + var exporters []*builtExporter + for _, name := range exporterNames { + // Ensure exporter is created. + exp := allExporters[cfg.Exporters[name]] + require.NotNil(t, exp) + exporters = append(exporters, exp) + } + + // Send TraceData via processor and verify that all exporters of the pipeline receive it. + + // First check that there are no traces in the exporters yet. + var exporterConsumers []*componenttest.ExampleExporterConsumer + for _, exporter := range exporters { + consumer := exporter.getTraceExporter().(*componenttest.ExampleExporterConsumer) + exporterConsumers = append(exporterConsumers, consumer) + require.Equal(t, len(consumer.Traces), 0) + } + + td := testdata.GenerateTraceDataOneSpan() + processor.firstTC.(consumer.TracesConsumer).ConsumeTraces(context.Background(), td) + + // Now verify received data. + for _, consumer := range exporterConsumers { + // Check that the trace is received by exporter. + require.Equal(t, 1, len(consumer.Traces)) + + // Verify that span is successfully delivered. + assert.EqualValues(t, td, consumer.Traces[0]) + } + + err = pipelineProcessors.ShutdownProcessors(context.Background()) + assert.NoError(t, err) +} + +func TestProcessorsBuilder_ErrorOnUnsupportedProcessor(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + bf := newBadProcessorFactory() + factories.Processors[bf.Type()] = bf + + cfg, err := configtest.LoadConfigFile(t, "testdata/bad_processor_factory.yaml", factories) + require.Nil(t, err) + + allExporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + assert.NoError(t, err) + + // First test only trace receivers by removing the metrics pipeline. + metricsPipeline := cfg.Service.Pipelines["metrics"] + logsPipeline := cfg.Service.Pipelines["logs"] + delete(cfg.Service.Pipelines, "metrics") + delete(cfg.Service.Pipelines, "logs") + require.Equal(t, 1, len(cfg.Service.Pipelines)) + + pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + assert.Error(t, err) + assert.Zero(t, len(pipelineProcessors)) + + // Now test the metric pipeline. + delete(cfg.Service.Pipelines, "traces") + cfg.Service.Pipelines["metrics"] = metricsPipeline + require.Equal(t, 1, len(cfg.Service.Pipelines)) + + pipelineProcessors, err = NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + assert.Error(t, err) + assert.Zero(t, len(pipelineProcessors)) + + // Now test the logs pipeline. + delete(cfg.Service.Pipelines, "metrics") + cfg.Service.Pipelines["logs"] = logsPipeline + require.Equal(t, 1, len(cfg.Service.Pipelines)) + + pipelineProcessors, err = NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + assert.Error(t, err) + assert.Zero(t, len(pipelineProcessors)) +} + +func newBadProcessorFactory() component.ProcessorFactory { + return processorhelper.NewFactory("bf", func() configmodels.Processor { + return &configmodels.ProcessorSettings{ + TypeVal: "bf", + NameVal: "bf", + } + }) +} diff --git a/internal/otel_collector/service/builder/receivers_builder.go b/internal/otel_collector/service/builder/receivers_builder.go new file mode 100644 index 00000000000..48724359f07 --- /dev/null +++ b/internal/otel_collector/service/builder/receivers_builder.go @@ -0,0 +1,352 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "errors" + "fmt" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configerror" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" +) + +var errUnusedReceiver = errors.New("receiver defined but not used by any pipeline") + +// builtReceiver is a receiver that is built based on a config. It can have +// a trace and/or a metrics component. +type builtReceiver struct { + logger *zap.Logger + receiver component.Receiver +} + +// Start the receiver. +func (rcv *builtReceiver) Start(ctx context.Context, host component.Host) error { + return rcv.receiver.Start(ctx, host) +} + +// Stop the receiver. +func (rcv *builtReceiver) Shutdown(ctx context.Context) error { + return rcv.receiver.Shutdown(ctx) +} + +// Receivers is a map of receivers created from receiver configs. +type Receivers map[configmodels.Receiver]*builtReceiver + +// StopAll stops all receivers. +func (rcvs Receivers) ShutdownAll(ctx context.Context) error { + var errs []error + for _, rcv := range rcvs { + err := rcv.Shutdown(ctx) + if err != nil { + errs = append(errs, err) + } + } + + return componenterror.CombineErrors(errs) +} + +// StartAll starts all receivers. +func (rcvs Receivers) StartAll(ctx context.Context, host component.Host) error { + for _, rcv := range rcvs { + rcv.logger.Info("Receiver is starting...") + + if err := rcv.Start(ctx, host); err != nil { + return err + } + rcv.logger.Info("Receiver started.") + } + return nil +} + +// ReceiversBuilder builds receivers from config. +type ReceiversBuilder struct { + logger *zap.Logger + appInfo component.ApplicationStartInfo + config *configmodels.Config + builtPipelines BuiltPipelines + factories map[configmodels.Type]component.ReceiverFactory +} + +// NewReceiversBuilder creates a new ReceiversBuilder. Call BuildProcessors() on the returned value. +func NewReceiversBuilder( + logger *zap.Logger, + appInfo component.ApplicationStartInfo, + config *configmodels.Config, + builtPipelines BuiltPipelines, + factories map[configmodels.Type]component.ReceiverFactory, +) *ReceiversBuilder { + return &ReceiversBuilder{logger.With(zap.String(kindLogKey, kindLogsReceiver)), appInfo, config, builtPipelines, factories} +} + +// BuildProcessors receivers from config. +func (rb *ReceiversBuilder) Build() (Receivers, error) { + receivers := make(Receivers) + + // BuildProcessors receivers based on configuration. + for _, cfg := range rb.config.Receivers { + logger := rb.logger.With(zap.String(typeLogKey, string(cfg.Type())), zap.String(nameLogKey, cfg.Name())) + rcv, err := rb.buildReceiver(context.Background(), logger, rb.appInfo, cfg) + if err != nil { + if err == errUnusedReceiver { + logger.Info("Ignoring receiver as it is not used by any pipeline", zap.String("receiver", cfg.Name())) + continue + } + return nil, err + } + receivers[cfg] = rcv + } + + return receivers, nil +} + +// hasReceiver returns true if the pipeline is attached to specified receiver. +func hasReceiver(pipeline *configmodels.Pipeline, receiverName string) bool { + for _, name := range pipeline.Receivers { + if name == receiverName { + return true + } + } + return false +} + +type attachedPipelines map[configmodels.DataType][]*builtPipeline + +func (rb *ReceiversBuilder) findPipelinesToAttach(config configmodels.Receiver) (attachedPipelines, error) { + // A receiver may be attached to multiple pipelines. Pipelines may consume different + // data types. We need to compile the list of pipelines of each type that must be + // attached to this receiver according to configuration. + + pipelinesToAttach := make(attachedPipelines) + pipelinesToAttach[configmodels.TracesDataType] = make([]*builtPipeline, 0) + pipelinesToAttach[configmodels.MetricsDataType] = make([]*builtPipeline, 0) + + // Iterate over all pipelines. + for _, pipelineCfg := range rb.config.Service.Pipelines { + // Get the first processor of the pipeline. + pipelineProcessor := rb.builtPipelines[pipelineCfg] + if pipelineProcessor == nil { + return nil, fmt.Errorf("cannot find pipeline processor for pipeline %s", + pipelineCfg.Name) + } + + // Is this receiver attached to the pipeline? + if hasReceiver(pipelineCfg, config.Name()) { + if _, exists := pipelinesToAttach[pipelineCfg.InputType]; !exists { + pipelinesToAttach[pipelineCfg.InputType] = make([]*builtPipeline, 0) + } + + // Yes, add it to the list of pipelines of corresponding data type. + pipelinesToAttach[pipelineCfg.InputType] = + append(pipelinesToAttach[pipelineCfg.InputType], pipelineProcessor) + } + } + + return pipelinesToAttach, nil +} + +func (rb *ReceiversBuilder) attachReceiverToPipelines( + ctx context.Context, + logger *zap.Logger, + appInfo component.ApplicationStartInfo, + factory component.ReceiverFactory, + dataType configmodels.DataType, + config configmodels.Receiver, + rcv *builtReceiver, + builtPipelines []*builtPipeline, +) error { + // There are pipelines of the specified data type that must be attached to + // the receiver. Create the receiver of corresponding data type and make + // sure its output is fanned out to all attached pipelines. + var err error + var createdReceiver component.Receiver + creationParams := component.ReceiverCreateParams{ + Logger: logger, + ApplicationStartInfo: appInfo, + } + + switch dataType { + case configmodels.TracesDataType: + junction := buildFanoutTraceConsumer(builtPipelines) + createdReceiver, err = factory.CreateTracesReceiver(ctx, creationParams, config, junction) + + case configmodels.MetricsDataType: + junction := buildFanoutMetricConsumer(builtPipelines) + createdReceiver, err = factory.CreateMetricsReceiver(ctx, creationParams, config, junction) + + case configmodels.LogsDataType: + junction := buildFanoutLogConsumer(builtPipelines) + createdReceiver, err = factory.CreateLogsReceiver(ctx, creationParams, config, junction) + + default: + err = configerror.ErrDataTypeIsNotSupported + } + + if err != nil { + if err == configerror.ErrDataTypeIsNotSupported { + return fmt.Errorf( + "receiver %s does not support %s but it was used in a "+ + "%s pipeline", + config.Name(), + dataType, + dataType) + } + return fmt.Errorf("cannot create receiver %s: %s", config.Name(), err.Error()) + } + + // Check if the factory really created the receiver. + if createdReceiver == nil { + return fmt.Errorf("factory for %q produced a nil receiver", config.Name()) + } + + if rcv.receiver != nil { + // The receiver was previously created for this config. This can happen if the + // same receiver type supports more than one data type. In that case we expect + // that CreateTracesReceiver and CreateMetricsReceiver return the same value. + if rcv.receiver != createdReceiver { + return fmt.Errorf( + "factory for %q is implemented incorrectly: "+ + "CreateTracesReceiver and CreateMetricsReceiver must return the same "+ + "receiver pointer when creating receivers of different data types", + config.Name(), + ) + } + } + rcv.receiver = createdReceiver + + logger.Info("Receiver is enabled.", zap.String("datatype", string(dataType))) + + return nil +} + +func (rb *ReceiversBuilder) buildReceiver(ctx context.Context, logger *zap.Logger, appInfo component.ApplicationStartInfo, config configmodels.Receiver) (*builtReceiver, error) { + + // First find pipelines that must be attached to this receiver. + pipelinesToAttach, err := rb.findPipelinesToAttach(config) + if err != nil { + return nil, err + } + + // Prepare to build the receiver. + factory := rb.factories[config.Type()] + if factory == nil { + return nil, fmt.Errorf("receiver factory not found for type: %s", config.Type()) + } + rcv := &builtReceiver{ + logger: logger, + } + + // Now we have list of pipelines broken down by data type. Iterate for each data type. + for dataType, pipelines := range pipelinesToAttach { + if len(pipelines) == 0 { + // No pipelines of this data type are attached to this receiver. + continue + } + + // Attach the corresponding part of the receiver to all pipelines that require + // this data type. + err := rb.attachReceiverToPipelines(ctx, logger, appInfo, factory, dataType, config, rcv, pipelines) + if err != nil { + return nil, err + } + } + + if rcv.receiver == nil { + return nil, errUnusedReceiver + } + + return rcv, nil +} + +func buildFanoutTraceConsumer(pipelines []*builtPipeline) consumer.TracesConsumer { + // Optimize for the case when there is only one processor, no need to create junction point. + if len(pipelines) == 1 { + return pipelines[0].firstTC + } + + var pipelineConsumers []consumer.TracesConsumer + anyPipelineMutatesData := false + for _, pipeline := range pipelines { + pipelineConsumers = append(pipelineConsumers, pipeline.firstTC) + anyPipelineMutatesData = anyPipelineMutatesData || pipeline.MutatesConsumedData + } + + // Create a junction point that fans out to all pipelines. + if anyPipelineMutatesData { + // If any pipeline mutates data use a cloning fan out connector + // so that it is safe to modify fanned out data. + // TODO: if there are more than 2 pipelines only clone data for pipelines that + // declare the intent to mutate the data. Pipelines that do not mutate the data + // can consume shared data. + return processor.NewTracesCloningFanOutConnector(pipelineConsumers) + } + return processor.NewTracesFanOutConnector(pipelineConsumers) +} + +func buildFanoutMetricConsumer(pipelines []*builtPipeline) consumer.MetricsConsumer { + // Optimize for the case when there is only one processor, no need to create junction point. + if len(pipelines) == 1 { + return pipelines[0].firstMC + } + + var pipelineConsumers []consumer.MetricsConsumer + anyPipelineMutatesData := false + for _, pipeline := range pipelines { + pipelineConsumers = append(pipelineConsumers, pipeline.firstMC) + anyPipelineMutatesData = anyPipelineMutatesData || pipeline.MutatesConsumedData + } + + // Create a junction point that fans out to all pipelines. + if anyPipelineMutatesData { + // If any pipeline mutates data use a cloning fan out connector + // so that it is safe to modify fanned out data. + // TODO: if there are more than 2 pipelines only clone data for pipelines that + // declare the intent to mutate the data. Pipelines that do not mutate the data + // can consume shared data. + return processor.NewMetricsCloningFanOutConnector(pipelineConsumers) + } + return processor.NewMetricsFanOutConnector(pipelineConsumers) +} + +func buildFanoutLogConsumer(pipelines []*builtPipeline) consumer.LogsConsumer { + // Optimize for the case when there is only one processor, no need to create junction point. + if len(pipelines) == 1 { + return pipelines[0].firstLC + } + + var pipelineConsumers []consumer.LogsConsumer + anyPipelineMutatesData := false + for _, pipeline := range pipelines { + pipelineConsumers = append(pipelineConsumers, pipeline.firstLC) + anyPipelineMutatesData = anyPipelineMutatesData || pipeline.MutatesConsumedData + } + + // Create a junction point that fans out to all pipelines. + if anyPipelineMutatesData { + // If any pipeline mutates data use a cloning fan out connector + // so that it is safe to modify fanned out data. + // TODO: if there are more than 2 pipelines only clone data for pipelines that + // declare the intent to mutate the data. Pipelines that do not mutate the data + // can consume shared data. + return processor.NewLogsCloningFanOutConnector(pipelineConsumers) + } + return processor.NewLogsFanOutConnector(pipelineConsumers) +} diff --git a/internal/otel_collector/service/builder/receivers_builder_test.go b/internal/otel_collector/service/builder/receivers_builder_test.go new file mode 100644 index 00000000000..e23e0b47317 --- /dev/null +++ b/internal/otel_collector/service/builder/receivers_builder_test.go @@ -0,0 +1,396 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/processor/attributesprocessor" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +type testCase struct { + name string + receiverName string + exporterNames []string + spanDuplicationByExporter map[string]int + hasTraces bool + hasMetrics bool +} + +func TestReceiversBuilder_Build(t *testing.T) { + tests := []testCase{ + { + name: "one-exporter", + receiverName: "examplereceiver", + exporterNames: []string{"exampleexporter"}, + hasTraces: true, + hasMetrics: true, + }, + { + name: "multi-exporter", + receiverName: "examplereceiver/2", + exporterNames: []string{"exampleexporter", "exampleexporter/2"}, + hasTraces: true, + }, + { + name: "multi-metrics-receiver", + receiverName: "examplereceiver/3", + exporterNames: []string{"exampleexporter", "exampleexporter/2"}, + hasTraces: false, + hasMetrics: true, + }, + { + name: "multi-receiver-multi-exporter", + receiverName: "examplereceiver/multi", + exporterNames: []string{"exampleexporter", "exampleexporter/2"}, + + // Check pipelines_builder.yaml to understand this case. + // We have 2 pipelines, one exporting to one exporter, the other + // exporting to both exporters, so we expect a duplication on + // one of the exporters, but not on the other. + spanDuplicationByExporter: map[string]int{ + "exampleexporter": 2, "exampleexporter/2": 1, + }, + hasTraces: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testReceivers(t, test) + }) + } +} + +func testReceivers( + t *testing.T, + test testCase, +) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + attrFactory := attributesprocessor.NewFactory() + factories.Processors[attrFactory.Type()] = attrFactory + cfg, err := configtest.LoadConfigFile(t, "testdata/pipelines_builder.yaml", factories) + require.Nil(t, err) + + // Build the pipeline + allExporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + assert.NoError(t, err) + pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + assert.NoError(t, err) + receivers, err := NewReceiversBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, pipelineProcessors, factories.Receivers).Build() + + assert.NoError(t, err) + require.NotNil(t, receivers) + + receiver := receivers[cfg.Receivers[test.receiverName]] + + // Ensure receiver has its fields correctly populated. + require.NotNil(t, receiver) + + assert.NotNil(t, receiver.receiver) + + // Compose the list of created exporters. + var exporters []*builtExporter + for _, name := range test.exporterNames { + // Ensure exporter is created. + exp := allExporters[cfg.Exporters[name]] + require.NotNil(t, exp) + exporters = append(exporters, exp) + } + + // Send TraceData via receiver and verify that all exporters of the pipeline receive it. + + // First check that there are no traces in the exporters yet. + for _, exporter := range exporters { + consumer := exporter.getTraceExporter().(*componenttest.ExampleExporterConsumer) + require.Equal(t, len(consumer.Traces), 0) + require.Equal(t, len(consumer.Metrics), 0) + } + + if test.hasTraces { + traceProducer := receiver.receiver.(*componenttest.ExampleReceiverProducer) + traceProducer.TraceConsumer.ConsumeTraces(context.Background(), testdata.GenerateTraceDataOneSpan()) + } + + metrics := testdata.GenerateMetricsOneMetric() + if test.hasMetrics { + metricsProducer := receiver.receiver.(*componenttest.ExampleReceiverProducer) + metricsProducer.MetricsConsumer.ConsumeMetrics(context.Background(), metrics) + } + + // Now verify received data. + for _, name := range test.exporterNames { + // Check that the data is received by exporter. + exporter := allExporters[cfg.Exporters[name]] + + // Validate traces. + if test.hasTraces { + var spanDuplicationCount int + if test.spanDuplicationByExporter != nil { + spanDuplicationCount = test.spanDuplicationByExporter[name] + } else { + spanDuplicationCount = 1 + } + + traceConsumer := exporter.getTraceExporter().(*componenttest.ExampleExporterConsumer) + require.Equal(t, spanDuplicationCount, len(traceConsumer.Traces)) + + for i := 0; i < spanDuplicationCount; i++ { + assert.EqualValues(t, testdata.GenerateTraceDataOneSpan(), traceConsumer.Traces[i]) + } + } + + // Validate metrics. + if test.hasMetrics { + metricsConsumer := exporter.getMetricExporter().(*componenttest.ExampleExporterConsumer) + require.Equal(t, 1, len(metricsConsumer.Metrics)) + assert.EqualValues(t, metrics, metricsConsumer.Metrics[0]) + } + } +} + +func TestReceiversBuilder_BuildCustom(t *testing.T) { + factories := createExampleFactories() + + tests := []struct { + dataType string + shouldFail bool + }{ + { + dataType: "logs", + shouldFail: false, + }, + { + dataType: "nosuchdatatype", + shouldFail: true, + }, + } + + for _, test := range tests { + t.Run(test.dataType, func(t *testing.T) { + dataType := test.dataType + + cfg := createExampleConfig(dataType) + + // Build the pipeline + allExporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + if test.shouldFail { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + assert.NoError(t, err) + receivers, err := NewReceiversBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, pipelineProcessors, factories.Receivers).Build() + + assert.NoError(t, err) + require.NotNil(t, receivers) + + receiver := receivers[cfg.Receivers["examplereceiver"]] + + // Ensure receiver has its fields correctly populated. + require.NotNil(t, receiver) + + assert.NotNil(t, receiver.receiver) + + // Compose the list of created exporters. + exporterNames := []string{"exampleexporter"} + var exporters []*builtExporter + for _, name := range exporterNames { + // Ensure exporter is created. + exp := allExporters[cfg.Exporters[name]] + require.NotNil(t, exp) + exporters = append(exporters, exp) + } + + // Send Data via receiver and verify that all exporters of the pipeline receive it. + + // First check that there are no traces in the exporters yet. + for _, exporter := range exporters { + consumer := exporter.getLogExporter().(*componenttest.ExampleExporterConsumer) + require.Equal(t, len(consumer.Logs), 0) + } + + // Send one data. + log := pdata.Logs{} + producer := receiver.receiver.(*componenttest.ExampleReceiverProducer) + producer.LogConsumer.ConsumeLogs(context.Background(), log) + + // Now verify received data. + for _, name := range exporterNames { + // Check that the data is received by exporter. + exporter := allExporters[cfg.Exporters[name]] + + // Validate exported data. + consumer := exporter.getLogExporter().(*componenttest.ExampleExporterConsumer) + require.Equal(t, 1, len(consumer.Logs)) + assert.EqualValues(t, log, consumer.Logs[0]) + } + }) + } +} + +func TestReceiversBuilder_DataTypeError(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + attrFactory := attributesprocessor.NewFactory() + factories.Processors[attrFactory.Type()] = attrFactory + cfg, err := configtest.LoadConfigFile(t, "testdata/pipelines_builder.yaml", factories) + assert.NoError(t, err) + + // Make examplereceiver to "unsupport" trace data type. + receiver := cfg.Receivers["examplereceiver"] + receiver.(*componenttest.ExampleReceiver).FailTraceCreation = true + + // Build the pipeline + allExporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + assert.NoError(t, err) + pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + assert.NoError(t, err) + receivers, err := NewReceiversBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, pipelineProcessors, factories.Receivers).Build() + + // This should fail because "examplereceiver" is attached to "traces" pipeline + // which is a configuration error. + assert.NotNil(t, err) + assert.Nil(t, receivers) +} + +func TestReceiversBuilder_StartAll(t *testing.T) { + receivers := make(Receivers) + rcvCfg := &configmodels.ReceiverSettings{} + + receiver := &componenttest.ExampleReceiverProducer{} + + receivers[rcvCfg] = &builtReceiver{ + logger: zap.NewNop(), + receiver: receiver, + } + + assert.False(t, receiver.Started) + + err := receivers.StartAll(context.Background(), componenttest.NewNopHost()) + assert.NoError(t, err) + + assert.True(t, receiver.Started) +} + +func TestReceiversBuilder_StopAll(t *testing.T) { + receivers := make(Receivers) + rcvCfg := &configmodels.ReceiverSettings{} + + receiver := &componenttest.ExampleReceiverProducer{} + + receivers[rcvCfg] = &builtReceiver{ + logger: zap.NewNop(), + receiver: receiver, + } + + assert.False(t, receiver.Stopped) + + assert.NoError(t, receivers.ShutdownAll(context.Background())) + + assert.True(t, receiver.Stopped) +} + +func TestReceiversBuilder_ErrorOnNilReceiver(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + bf := newBadReceiverFactory() + factories.Receivers[bf.Type()] = bf + + cfg, err := configtest.LoadConfigFile(t, "testdata/bad_receiver_factory.yaml", factories) + require.Nil(t, err) + + // Build the pipeline + allExporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + assert.NoError(t, err) + pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + assert.NoError(t, err) + + // First test only trace receivers by removing the metrics pipeline. + metricsPipeline := cfg.Service.Pipelines["metrics"] + logsPipeline := cfg.Service.Pipelines["logs"] + delete(cfg.Service.Pipelines, "metrics") + delete(cfg.Service.Pipelines, "logs") + require.Equal(t, 1, len(cfg.Service.Pipelines)) + + receivers, err := NewReceiversBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, pipelineProcessors, factories.Receivers).Build() + assert.Error(t, err) + assert.Zero(t, len(receivers)) + + // Now test the metric pipeline. + delete(cfg.Service.Pipelines, "traces") + cfg.Service.Pipelines["metrics"] = metricsPipeline + require.Equal(t, 1, len(cfg.Service.Pipelines)) + + receivers, err = NewReceiversBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, pipelineProcessors, factories.Receivers).Build() + assert.Error(t, err) + assert.Zero(t, len(receivers)) + + // Now test the metric pipeline. + delete(cfg.Service.Pipelines, "metrics") + cfg.Service.Pipelines["logs"] = logsPipeline + require.Equal(t, 1, len(cfg.Service.Pipelines)) + + receivers, err = NewReceiversBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, pipelineProcessors, factories.Receivers).Build() + assert.Error(t, err) + assert.Zero(t, len(receivers)) +} + +func TestReceiversBuilder_Unused(t *testing.T) { + factories, err := componenttest.ExampleComponents() + assert.NoError(t, err) + + cfg, err := configtest.LoadConfigFile(t, "testdata/unused_receiver.yaml", factories) + assert.NoError(t, err) + + // Build the pipeline + allExporters, err := NewExportersBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, factories.Exporters).Build() + assert.NoError(t, err) + pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, allExporters, factories.Processors).Build() + assert.NoError(t, err) + receivers, err := NewReceiversBuilder(zap.NewNop(), componenttest.TestApplicationStartInfo(), cfg, pipelineProcessors, factories.Receivers).Build() + assert.NoError(t, err) + assert.NotNil(t, receivers) + + assert.NoError(t, receivers.StartAll(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, receivers.ShutdownAll(context.Background())) +} + +func newBadReceiverFactory() component.ReceiverFactory { + return receiverhelper.NewFactory("bf", func() configmodels.Receiver { + return &configmodels.ReceiverSettings{ + TypeVal: "bf", + NameVal: "bf", + } + }) +} diff --git a/internal/otel_collector/service/builder/testdata/bad_processor_factory.yaml b/internal/otel_collector/service/builder/testdata/bad_processor_factory.yaml new file mode 100644 index 00000000000..0a1deac5c56 --- /dev/null +++ b/internal/otel_collector/service/builder/testdata/bad_processor_factory.yaml @@ -0,0 +1,23 @@ +receivers: + examplereceiver: +processors: + bf/traces: # this is the bad processor factory + bf/metrics: + bf/logs: +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [bf/traces] + exporters: [exampleexporter] + metrics: + receivers: [examplereceiver] + processors: [bf/metrics] + exporters: [exampleexporter] + logs: + receivers: [examplereceiver] + processors: [bf/logs] + exporters: [exampleexporter] diff --git a/internal/otel_collector/service/builder/testdata/bad_receiver_factory.yaml b/internal/otel_collector/service/builder/testdata/bad_receiver_factory.yaml new file mode 100644 index 00000000000..3a6c5cecbc2 --- /dev/null +++ b/internal/otel_collector/service/builder/testdata/bad_receiver_factory.yaml @@ -0,0 +1,16 @@ +receivers: + bf: # this is the bad receiver factory +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [bf] + exporters: [exampleexporter] + metrics: + receivers: [bf] + exporters: [exampleexporter] + logs: + receivers: [bf] + exporters: [exampleexporter] \ No newline at end of file diff --git a/internal/otel_collector/service/builder/testdata/pipelines_builder.yaml b/internal/otel_collector/service/builder/testdata/pipelines_builder.yaml new file mode 100644 index 00000000000..ef965adb5fa --- /dev/null +++ b/internal/otel_collector/service/builder/testdata/pipelines_builder.yaml @@ -0,0 +1,40 @@ +receivers: + examplereceiver: + examplereceiver/2: + examplereceiver/3: + examplereceiver/multi: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + exampleexporter/2: + +service: + pipelines: + traces: + receivers: [examplereceiver, examplereceiver/multi] + processors: [exampleprocessor] + exporters: [exampleexporter] + + traces/2: + receivers: [examplereceiver/2, examplereceiver/multi] + processors: [exampleprocessor] + exporters: [exampleexporter, exampleexporter/2] + + metrics: + receivers: [examplereceiver] + exporters: [exampleexporter] + + metrics/2: + receivers: [examplereceiver/3] + exporters: [exampleexporter] + + metrics/3: + receivers: [examplereceiver/3] + exporters: [exampleexporter/2] + + logs: + receivers: [examplereceiver/3] + exporters: [exampleexporter/2] diff --git a/internal/otel_collector/service/builder/testdata/unused_receiver.yaml b/internal/otel_collector/service/builder/testdata/unused_receiver.yaml new file mode 100644 index 00000000000..4465c1e39c9 --- /dev/null +++ b/internal/otel_collector/service/builder/testdata/unused_receiver.yaml @@ -0,0 +1,12 @@ +receivers: + examplereceiver: + multireceiver: +processors: +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] \ No newline at end of file diff --git a/internal/otel_collector/service/defaultcomponents/defaults.go b/internal/otel_collector/service/defaultcomponents/defaults.go new file mode 100644 index 00000000000..23dbabfbd49 --- /dev/null +++ b/internal/otel_collector/service/defaultcomponents/defaults.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package defaultcomponents composes the default set of components used by the otel service +package defaultcomponents + +import ( + "go.opentelemetry.io/collector/component" +) + +// Components returns the default set of components used by the +// OpenTelemetry collector. +func Components() (component.Factories, error) { + return component.Factories{}, nil +} diff --git a/internal/otel_collector/service/defaultcomponents/defaults_test.go b/internal/otel_collector/service/defaultcomponents/defaults_test.go new file mode 100644 index 00000000000..8284a07ad5f --- /dev/null +++ b/internal/otel_collector/service/defaultcomponents/defaults_test.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Program otelcol is the OpenTelemetry Collector that collects stats +// and traces and exports to a configured backend. +package defaultcomponents + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/config/configmodels" +) + +func TestDefaultComponents(t *testing.T) { + expectedExtensions := []configmodels.Type{ + "health_check", + "pprof", + "zpages", + "fluentbit", + } + expectedReceivers := []configmodels.Type{ + "jaeger", + "zipkin", + "prometheus", + "opencensus", + "otlp", + "hostmetrics", + "fluentforward", + "kafka", + } + expectedProcessors := []configmodels.Type{ + "attributes", + "resource", + "queued_retry", + "batch", + "memory_limiter", + "probabilistic_sampler", + "span", + "filter", + } + expectedExporters := []configmodels.Type{ + "opencensus", + "prometheus", + "prometheusremotewrite", + "logging", + "zipkin", + "jaeger", + "file", + "otlp", + "otlphttp", + "kafka", + } + + factories, err := Components() + assert.NoError(t, err) + + exts := factories.Extensions + assert.Equal(t, len(expectedExtensions), len(exts)) + for _, k := range expectedExtensions { + v, ok := exts[k] + assert.True(t, ok) + assert.Equal(t, k, v.Type()) + } + + recvs := factories.Receivers + assert.Equal(t, len(expectedReceivers), len(recvs)) + for _, k := range expectedReceivers { + v, ok := recvs[k] + require.True(t, ok) + assert.Equal(t, k, v.Type()) + assert.Equal(t, k, v.CreateDefaultConfig().Type()) + } + + procs := factories.Processors + assert.Equal(t, len(expectedProcessors), len(procs)) + for _, k := range expectedProcessors { + v, ok := procs[k] + require.True(t, ok) + assert.Equal(t, k, v.Type()) + assert.Equal(t, k, v.CreateDefaultConfig().Type()) + } + + exps := factories.Exporters + assert.Equal(t, len(expectedExporters), len(exps)) + for _, k := range expectedExporters { + v, ok := exps[k] + require.True(t, ok) + assert.Equal(t, k, v.Type()) + assert.Equal(t, k, v.CreateDefaultConfig().Type()) + } +} diff --git a/internal/otel_collector/service/defaultcomponents/docs_test.go b/internal/otel_collector/service/defaultcomponents/docs_test.go new file mode 100644 index 00000000000..79dfaf2dc48 --- /dev/null +++ b/internal/otel_collector/service/defaultcomponents/docs_test.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaultcomponents + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" +) + +const ( + relativeDefaultComponentsPath = "service/defaultcomponents/defaults.go" + projectGoModule = "go.opentelemetry.io/collector" +) + +// TestComponentDocs verifies existence of READMEs for components specified as +// default components in the collector. Looking for default components being enabled +// in the collector gives a reasonable measure of the components that need to be +// documented. Note, that for this test to work, the underlying assumption is +// the imports in "service/defaultcomponents/defaults.go" are indicative +// of components that require documentation. +func TestComponentDocs(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err, "failed to get working directory: %v") + + // Absolute path to the project root directory + projectPath := filepath.Join(wd, "../../") + + err = componenttest.CheckDocs( + projectPath, + relativeDefaultComponentsPath, + projectGoModule, + ) + require.NoError(t, err) +} diff --git a/internal/otel_collector/service/internal/gen.go b/internal/otel_collector/service/internal/gen.go new file mode 100644 index 00000000000..cfba1fb0031 --- /dev/null +++ b/internal/otel_collector/service/internal/gen.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +//go:generate esc -pkg internal -o resources.go -modtime "0" templates/ +//go:generate addlicense -y "" -c "The OpenTelemetry Authors" resources.go diff --git a/internal/otel_collector/service/internal/resources.go b/internal/otel_collector/service/internal/resources.go new file mode 100644 index 00000000000..1efaed7a73c --- /dev/null +++ b/internal/otel_collector/service/internal/resources.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "esc -pkg internal -o resources.go -modtime 0 templates/"; DO NOT EDIT. + +package internal + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "sync" + "time" +) + +type _escLocalFS struct{} + +var _escLocal _escLocalFS + +type _escStaticFS struct{} + +var _escStatic _escStaticFS + +type _escDirectory struct { + fs http.FileSystem + name string +} + +type _escFile struct { + compressed string + size int64 + modtime int64 + local string + isDir bool + + once sync.Once + data []byte + name string +} + +func (_escLocalFS) Open(name string) (http.File, error) { + f, present := _escData[path.Clean(name)] + if !present { + return nil, os.ErrNotExist + } + return os.Open(f.local) +} + +func (_escStaticFS) prepare(name string) (*_escFile, error) { + f, present := _escData[path.Clean(name)] + if !present { + return nil, os.ErrNotExist + } + var err error + f.once.Do(func() { + f.name = path.Base(name) + if f.size == 0 { + return + } + var gr *gzip.Reader + b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) + gr, err = gzip.NewReader(b64) + if err != nil { + return + } + f.data, err = ioutil.ReadAll(gr) + }) + if err != nil { + return nil, err + } + return f, nil +} + +func (fs _escStaticFS) Open(name string) (http.File, error) { + f, err := fs.prepare(name) + if err != nil { + return nil, err + } + return f.File() +} + +func (dir _escDirectory) Open(name string) (http.File, error) { + return dir.fs.Open(dir.name + name) +} + +func (f *_escFile) File() (http.File, error) { + type httpFile struct { + *bytes.Reader + *_escFile + } + return &httpFile{ + Reader: bytes.NewReader(f.data), + _escFile: f, + }, nil +} + +func (f *_escFile) Close() error { + return nil +} + +func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { + if !f.isDir { + return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name) + } + + fis, ok := _escDirs[f.local] + if !ok { + return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local) + } + limit := count + if count <= 0 || limit > len(fis) { + limit = len(fis) + } + + if len(fis) == 0 && count > 0 { + return nil, io.EOF + } + + return fis[0:limit], nil +} + +func (f *_escFile) Stat() (os.FileInfo, error) { + return f, nil +} + +func (f *_escFile) Name() string { + return f.name +} + +func (f *_escFile) Size() int64 { + return f.size +} + +func (f *_escFile) Mode() os.FileMode { + return 0 +} + +func (f *_escFile) ModTime() time.Time { + return time.Unix(f.modtime, 0) +} + +func (f *_escFile) IsDir() bool { + return f.isDir +} + +func (f *_escFile) Sys() interface{} { + return f +} + +// FS returns a http.Filesystem for the embedded assets. If useLocal is true, +// the filesystem's contents are instead used. +func FS(useLocal bool) http.FileSystem { + if useLocal { + return _escLocal + } + return _escStatic +} + +// Dir returns a http.Filesystem for the embedded assets on a given prefix dir. +// If useLocal is true, the filesystem's contents are instead used. +func Dir(useLocal bool, name string) http.FileSystem { + if useLocal { + return _escDirectory{fs: _escLocal, name: name} + } + return _escDirectory{fs: _escStatic, name: name} +} + +// FSByte returns the named file from the embedded assets. If useLocal is +// true, the filesystem's contents are instead used. +func FSByte(useLocal bool, name string) ([]byte, error) { + if useLocal { + f, err := _escLocal.Open(name) + if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(f) + _ = f.Close() + return b, err + } + f, err := _escStatic.prepare(name) + if err != nil { + return nil, err + } + return f.data, nil +} + +// FSMustByte is the same as FSByte, but panics if name is not present. +func FSMustByte(useLocal bool, name string) []byte { + b, err := FSByte(useLocal, name) + if err != nil { + panic(err) + } + return b +} + +// FSString is the string version of FSByte. +func FSString(useLocal bool, name string) (string, error) { + b, err := FSByte(useLocal, name) + return string(b), err +} + +// FSMustString is the string version of FSMustByte. +func FSMustString(useLocal bool, name string) string { + return string(FSMustByte(useLocal, name)) +} + +var _escData = map[string]*_escFile{ + + "/templates/component_header.html": { + name: "component_header.html", + local: "templates/component_header.html", + size: 156, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/1SMsQqDMBRFd7/iIq7q5lBiltKt9B8CPklQX6R1e9x/L6ZQ2vXcc65ZE3AZ0V3ztmcV +PW467TnpQVZmzZp0Kfs96VJQizTjw1uyAgAXB+8C4lPmsT4fydqbdY+wCen64F0fB19iWV/yF/54X0en +U3kHAAD//zT+SdCcAAAA +`, + }, + + "/templates/extensions_table.html": { + name: "extensions_table.html", + local: "templates/extensions_table.html", + size: 353, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/2SQwU7DMBBE7/2KlemRNJwjxxwQHDnwB248DRbOOnK2tGD531HTQIvqk1fzZjU7Wuw2 +gCb5CmjVNiaHVE2j7Tz3DT0osyIiynltqWlp8xSHMTJYntmN0bOUsgDJcg9ap3jw7HC8n7+z5y0epgU7 +oxX5HeETfMGv9NPTkv4i2e6jT3HPrqE7AEui8yaECbdWkzPYUXWlaHFkg++5VR1YkJTRlt4Tdq06HVfK +4zeOAp58ZLYD2pw3L/sQXu2AUpT5N+raGl2Lu0TRtaTfqsCulJWu52bNTwAAAP//sz5qjmEBAAA= +`, + }, + + "/templates/footer.html": { + name: "footer.html", + local: "templates/footer.html", + size: 15, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/7LRT8pPqbTjstHPKMnNsQMEAAD//wEFevAPAAAA +`, + }, + + "/templates/header.html": { + name: "header.html", + local: "templates/header.html", + size: 467, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/5TRMU8sIRAH8P4+BY/25eC9szGGxUItLIwW11giO7uMB8wG5rxsLvfdDdnTxNhoBeFP +fpnM3/y5fbzZPj/dicAp2pVph4guj52ELK0J4Hq7EkIIk4Cd8MGVCtzJPQ/rS3mOGDmCPR7Vtl1OJ6OX +lyWNmHeiQOxkDVTY71mgpyxFKDB0UuvD4aBogswQIQGXWSHpwb21Xwo9Sf1d4jlCDQD8wQTmqV5pPVDm +qkaiMYKbsCpPSTfpenAJ49w9OIaCLv6995Sr/AXtqQc1Aqc+tgn/qwv1T6czpzD3ONJ6wrxTCbPy9ROv +vuDEoocBiqjF/5RszGuV1uhFsCujl0bMC/Vz62vzZe1hY98DAAD//7qRGmLTAQAA +`, + }, + + "/templates/pipelines_table.html": { + name: "pipelines_table.html", + local: "templates/pipelines_table.html", + size: 1946, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/7SVwXLTMBCG7zyFxnRyIjVcU1scSpnhAMN0eAFZ2gRNlZVmJbdujd+dsWyrTp0LtL5k +rOjX/tlv/8hFEJUB5sOjgTKrLCmgrXdCajzs2MeMv2OMsSLQ8DAsFJPWeCew/MSE0QcsDewDLyr+tTbm +hzhCkVe8yIM6OcU3WHl3NXz+mS8W0oWBBAxAvcU3dHX49ejW9PheBxHAX1v09RHUFxHEim63IEHfA/kV +PX6SleC9XdXkpnGWwqKRIp/i07YXgu1Kdnltj84iYLhB5azG0HWjgAQegF2QfdCooPkQH+OZW/vgR9kg +3TK9Z3AP+Cyf7Y+5TdEW8u5Atka1Y+8BIOOzSmA8LI/ytgVUbDvb6VG1bW93OUW962Kn/wZxKpLC/Koq +Z+L6X/XGgWbDRGeETkcDMo0GZD+a+CNSil+AjLUF+02wL7M+AF33+clpB0YjoDhCuQC6eZJTPpIA5Mn3 +dxpVSaNlxidFkQu+dK/oZSuAaj7VN0a1IUF0dZ6eIzvRc2QTvef/5yr4HNklPjd5Rn5Rcpbf2XbWJZhw +QeMmXNC4hCvdNKvQgsYtacFoGWFFxSvCNl+lu3HQFXl8JfO/AQAA//9We3KLmgcAAA== +`, + }, + + "/templates/properties_table.html": { + name: "properties_table.html", + local: "templates/properties_table.html", + size: 420, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/2SRwW7DIBBE7/6KVRr1VMc5u5gfqFT11Ds2U8sqWVuwqRoR/r1yTCpb4YAEO48ZDarV +MR7ezQkp1apqdaHEtA4U5OLQ7NrRW/gyTKYbuK/puNMFEVGMtB/Y4pfqho6UUr71hnvk0Qvt4XACyyw6 +fPhxgpcBIasXoqThi/ADztRqOC8l/j+L6b57P57Z1vQEIEdZnoELeER1jGBL5WqixJJxQ89NBxZ4favg +nvTaQ95wSWnuQlVi9RrUz9yG6XXZr+vDg3TrsTX4NO6M2WLDVOLv3YJtSoWqbl+h/wIAAP//aLmk3KQB +AAA= +`, + }, + + "/templates": { + name: "templates", + local: `templates/`, + isDir: true, + }, +} + +var _escDirs = map[string][]os.FileInfo{ + + "templates/": { + _escData["/templates/component_header.html"], + _escData["/templates/extensions_table.html"], + _escData["/templates/footer.html"], + _escData["/templates/header.html"], + _escData["/templates/pipelines_table.html"], + _escData["/templates/properties_table.html"], + }, +} diff --git a/internal/otel_collector/service/internal/telemetry/process_telemetry.go b/internal/otel_collector/service/internal/telemetry/process_telemetry.go new file mode 100644 index 00000000000..43c6ff1942a --- /dev/null +++ b/internal/otel_collector/service/internal/telemetry/process_telemetry.go @@ -0,0 +1,183 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package telemetry + +import ( + "context" + "os" + "runtime" + "time" + + "github.com/shirou/gopsutil/process" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +// ProcessMetricsViews is a struct that contains views related to process metrics (cpu, mem, etc) +type ProcessMetricsViews struct { + prevTimeUnixNano int64 + ballastSizeBytes uint64 + views []*view.View + done chan struct{} + proc *process.Process +} + +var mUptime = stats.Float64( + "process/uptime", + "Uptime of the process", + stats.UnitSeconds) +var viewProcessUptime = &view.View{ + Name: mUptime.Name(), + Description: mUptime.Description(), + Measure: mUptime, + Aggregation: view.Sum(), + TagKeys: nil, +} + +var mRuntimeAllocMem = stats.Int64( + "process/runtime/heap_alloc_bytes", + "Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc')", + stats.UnitBytes) +var viewAllocMem = &view.View{ + Name: mRuntimeAllocMem.Name(), + Description: mRuntimeAllocMem.Description(), + Measure: mRuntimeAllocMem, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +var mRuntimeTotalAllocMem = stats.Int64( + "process/runtime/total_alloc_bytes", + "Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc')", + stats.UnitBytes) +var viewTotalAllocMem = &view.View{ + Name: mRuntimeTotalAllocMem.Name(), + Description: mRuntimeTotalAllocMem.Description(), + Measure: mRuntimeTotalAllocMem, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +var mRuntimeSysMem = stats.Int64( + "process/runtime/total_sys_memory_bytes", + "Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys')", + stats.UnitBytes) +var viewSysMem = &view.View{ + Name: mRuntimeSysMem.Name(), + Description: mRuntimeSysMem.Description(), + Measure: mRuntimeSysMem, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +var mCPUSeconds = stats.Float64( + "process/cpu_seconds", + "Total CPU user and system time in seconds", + stats.UnitSeconds) +var viewCPUSeconds = &view.View{ + Name: mCPUSeconds.Name(), + Description: mCPUSeconds.Description(), + Measure: mCPUSeconds, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +var mRSSMemory = stats.Int64( + "process/memory/rss", + "Total physical memory (resident set size)", + stats.UnitBytes) +var viewRSSMemory = &view.View{ + Name: mRSSMemory.Name(), + Description: mRSSMemory.Description(), + Measure: mRSSMemory, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +// NewProcessMetricsViews creates a new set of ProcessMetrics (mem, cpu) that can be used to measure +// basic information about this process. +func NewProcessMetricsViews(ballastSizeBytes uint64) (*ProcessMetricsViews, error) { + pmv := &ProcessMetricsViews{ + prevTimeUnixNano: time.Now().UnixNano(), + ballastSizeBytes: ballastSizeBytes, + views: []*view.View{viewProcessUptime, viewAllocMem, viewTotalAllocMem, viewSysMem, viewCPUSeconds, viewRSSMemory}, + done: make(chan struct{}), + } + + pid := os.Getpid() + + var err error + pmv.proc, err = process.NewProcess(int32(pid)) + if err != nil { + return nil, err + } + + return pmv, nil +} + +// StartCollection starts a ticker'd goroutine that will update the PMV measurements every 5 seconds +func (pmv *ProcessMetricsViews) StartCollection() { + go func() { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + pmv.updateViews() + case <-pmv.done: + return + } + } + }() +} + +// Views returns the views internal to the PMV. +func (pmv *ProcessMetricsViews) Views() []*view.View { + return pmv.views +} + +// StopCollection stops the collection of the process metric information. +func (pmv *ProcessMetricsViews) StopCollection() { + close(pmv.done) +} + +func (pmv *ProcessMetricsViews) updateViews() { + now := time.Now().UnixNano() + stats.Record(context.Background(), mUptime.M(float64(now-pmv.prevTimeUnixNano)/1e9)) + pmv.prevTimeUnixNano = now + + ms := &runtime.MemStats{} + pmv.readMemStats(ms) + stats.Record(context.Background(), mRuntimeAllocMem.M(int64(ms.Alloc))) + stats.Record(context.Background(), mRuntimeTotalAllocMem.M(int64(ms.TotalAlloc))) + stats.Record(context.Background(), mRuntimeSysMem.M(int64(ms.Sys))) + + if pmv.proc != nil { + if times, err := pmv.proc.Times(); err == nil { + stats.Record(context.Background(), mCPUSeconds.M(times.Total())) + } + if mem, err := pmv.proc.MemoryInfo(); err == nil { + stats.Record(context.Background(), mRSSMemory.M(int64(mem.RSS))) + } + } +} + +func (pmv *ProcessMetricsViews) readMemStats(ms *runtime.MemStats) { + runtime.ReadMemStats(ms) + ms.Alloc -= pmv.ballastSizeBytes + ms.HeapAlloc -= pmv.ballastSizeBytes + ms.HeapSys -= pmv.ballastSizeBytes + ms.HeapInuse -= pmv.ballastSizeBytes +} diff --git a/internal/otel_collector/service/internal/telemetry/process_telemetry_test.go b/internal/otel_collector/service/internal/telemetry/process_telemetry_test.go new file mode 100644 index 00000000000..593e8296119 --- /dev/null +++ b/internal/otel_collector/service/internal/telemetry/process_telemetry_test.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package telemetry + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opencensus.io/stats/view" +) + +func TestProcessTelemetry(t *testing.T) { + const ballastSizeBytes uint64 = 0 + + pmv, err := NewProcessMetricsViews(ballastSizeBytes) + require.NoError(t, err) + assert.NotNil(t, pmv) + + expectedViews := []string{ + // Changing a metric name is a breaking change. + // Adding new metrics is ok as long it follows the conventions described at + // https://pkg.go.dev/go.opentelemetry.io/collector/obsreport?tab=doc#hdr-Naming_Convention_for_New_Metrics + "process/uptime", + "process/runtime/heap_alloc_bytes", + "process/runtime/total_alloc_bytes", + "process/runtime/total_sys_memory_bytes", + "process/cpu_seconds", + "process/memory/rss", + } + processViews := pmv.Views() + assert.Len(t, processViews, len(expectedViews)) + + require.NoError(t, view.Register(processViews...)) + defer view.Unregister(processViews...) + + // Check that the views are actually filled. + pmv.updateViews() + <-time.After(200 * time.Millisecond) + + for _, viewName := range expectedViews { + rows, err := view.RetrieveData(viewName) + require.NoError(t, err, viewName) + + require.Len(t, rows, 1, viewName) + row := rows[0] + assert.Len(t, row.Tags, 0) + + var value float64 + if viewName == "process/uptime" { + value = row.Data.(*view.SumData).Value + } else { + value = row.Data.(*view.LastValueData).Value + } + + if viewName == "process/uptime" || viewName == "process/cpu_seconds" { + // This likely will still be zero when running the test. + assert.True(t, value >= 0, viewName) + continue + } + + assert.True(t, value > 0, viewName) + } +} diff --git a/internal/otel_collector/service/internal/templates.go b/internal/otel_collector/service/internal/templates.go new file mode 100644 index 00000000000..0d165b7a76c --- /dev/null +++ b/internal/otel_collector/service/internal/templates.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "html/template" + "io" + "io/ioutil" + "log" +) + +var ( + fs = FS(false) + templateFunctions = template.FuncMap{ + "even": even, + "getKey": getKey, + "getValue": getValue, + } + componentHeaderTemplate = parseTemplate("component_header") + extensionsTableTemplate = parseTemplate("extensions_table") + headerTemplate = parseTemplate("header") + footerTemplate = parseTemplate("footer") + pipelinesTableTemplate = parseTemplate("pipelines_table") + propertiesTableTemplate = parseTemplate("properties_table") +) + +func parseTemplate(name string) *template.Template { + f, err := fs.Open("/templates/" + name + ".html") + if err != nil { + log.Panicf("%v: %v", name, err) + } + defer f.Close() + text, err := ioutil.ReadAll(f) + if err != nil { + log.Panicf("%v: %v", name, err) + } + return template.Must(template.New(name).Funcs(templateFunctions).Parse(string(text))) +} + +// HeaderData contains data for the header template. +type HeaderData struct { + Title string +} + +// WriteHTMLFooter writes the header. +func WriteHTMLHeader(w io.Writer, hd HeaderData) { + if err := headerTemplate.Execute(w, hd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// SummaryExtensionsTableData contains data for extensions summary table template. +type SummaryExtensionsTableData struct { + ComponentEndpoint string + Rows []SummaryExtensionsTableRowData +} + +// SummaryExtensionsTableData contains data for one row in extensions summary table template. +type SummaryExtensionsTableRowData struct { + FullName string + Enabled bool +} + +// WriteHTMLSummaryTable writes the summary table for one component type (receivers, processors, exporters). +// Id does not write the header or footer. +func WriteHTMLExtensionsSummaryTable(w io.Writer, spd SummaryExtensionsTableData) { + if err := extensionsTableTemplate.Execute(w, spd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// SummaryPipelinesTableData contains data for pipelines summary table template. +type SummaryPipelinesTableData struct { + ComponentEndpoint string + Rows []SummaryPipelinesTableRowData +} + +// SummaryPipelinesTableRowData contains data for one row in pipelines summary table template. +type SummaryPipelinesTableRowData struct { + FullName string + InputType string + MutatesConsumedData bool + Receivers []string + Processors []string + Exporters []string +} + +// WriteHTMLSummaryTable writes the summary table for one component type (receivers, processors, exporters). +// Id does not write the header or footer. +func WriteHTMLPipelinesSummaryTable(w io.Writer, spd SummaryPipelinesTableData) { + if err := pipelinesTableTemplate.Execute(w, spd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// ComponentHeaderData contains data for component header template. +type ComponentHeaderData struct { + Name string + ComponentEndpoint string + Link bool +} + +// WriteHTMLFooter writes the footer. +func WriteHTMLComponentHeader(w io.Writer, chd ComponentHeaderData) { + if err := componentHeaderTemplate.Execute(w, chd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// PropertiesTableData contains data for properties table template. +type PropertiesTableData struct { + Name string + Properties [][2]string +} + +// WriteHTMLFooter writes the footer. +func WriteHTMLPropertiesTable(w io.Writer, chd PropertiesTableData) { + if err := propertiesTableTemplate.Execute(w, chd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// WriteHTMLFooter writes the footer. +func WriteHTMLFooter(w io.Writer) { + if err := footerTemplate.Execute(w, nil); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +func even(x int) bool { + return x%2 == 0 +} + +func getKey(row [2]string) string { + return row[0] +} + +func getValue(row [2]string) string { + return row[1] +} diff --git a/internal/otel_collector/service/internal/templates/component_header.html b/internal/otel_collector/service/internal/templates/component_header.html new file mode 100644 index 00000000000..463a15cc7e6 --- /dev/null +++ b/internal/otel_collector/service/internal/templates/component_header.html @@ -0,0 +1,7 @@ +{{$a := .ComponentEndpoint}} +{{$link := .Link}} +{{- if $link -}} +
{{.Name}}
+{{- else -}} +
{{.Name}}
+{{- end -}} \ No newline at end of file diff --git a/internal/otel_collector/service/internal/templates/extensions_table.html b/internal/otel_collector/service/internal/templates/extensions_table.html new file mode 100644 index 00000000000..0b39ee933e1 --- /dev/null +++ b/internal/otel_collector/service/internal/templates/extensions_table.html @@ -0,0 +1,11 @@ + + {{$a := .ComponentEndpoint}} + {{range $rowindex, $row := .Rows}} + {{- if even $rowindex}} + + {{else}} + {{end -}} + + + {{end}} +
{{.FullName}}
\ No newline at end of file diff --git a/internal/otel_collector/service/internal/templates/footer.html b/internal/otel_collector/service/internal/templates/footer.html new file mode 100644 index 00000000000..691287b6e35 --- /dev/null +++ b/internal/otel_collector/service/internal/templates/footer.html @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/internal/otel_collector/service/internal/templates/header.html b/internal/otel_collector/service/internal/templates/header.html new file mode 100644 index 00000000000..c381c0ff6c8 --- /dev/null +++ b/internal/otel_collector/service/internal/templates/header.html @@ -0,0 +1,11 @@ + + + + {{.Title}} + + + + + + +

{{.Title}}

\ No newline at end of file diff --git a/internal/otel_collector/service/internal/templates/pipelines_table.html b/internal/otel_collector/service/internal/templates/pipelines_table.html new file mode 100644 index 00000000000..8798f15862d --- /dev/null +++ b/internal/otel_collector/service/internal/templates/pipelines_table.html @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + {{$a := .ComponentEndpoint}} + {{range $rowindex, $row := .Rows}} + {{- if even $rowindex}} + + {{else}} + {{end -}} + + + + + + + + {{end}} +
FullName  |  InputType  |  MutatesConsumedData  |  Receivers  |  Processors  |  Exporters
{{$row.FullName}}  |  {{$row.InputType}}  |  {{$row.MutatesConsumedData}}  |   + {{range $recindex, $rec := $row.Receivers}} + {{$rec}} +
+ {{end}} +
  |   + → + {{range $proindex, $pro := $row.Processors}} + {{$pro}} + → + {{end}} +   |   + {{range $expindex, $exp := $row.Exporters}} + {{$exp}} +
+ {{end}} +
\ No newline at end of file diff --git a/internal/otel_collector/service/internal/templates/properties_table.html b/internal/otel_collector/service/internal/templates/properties_table.html new file mode 100644 index 00000000000..a43841ba770 --- /dev/null +++ b/internal/otel_collector/service/internal/templates/properties_table.html @@ -0,0 +1,14 @@ +{{.Name}}: + + {{ $index := 0 }} + {{range $index, $element := .Properties}} + {{- if even $index}} + + {{else}} + {{end -}} + + + + + {{end}} +
{{$element|getKey}}  |  {{$element|getValue}}
\ No newline at end of file diff --git a/internal/otel_collector/service/internal/templates_test.go b/internal/otel_collector/service/internal/templates_test.go new file mode 100644 index 00000000000..4e320a9e3d0 --- /dev/null +++ b/internal/otel_collector/service/internal/templates_test.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "bytes" + "html/template" + "testing" + + "github.com/stretchr/testify/assert" +) + +const tmplBody = ` +

{{.Index|even}}

+

{{.Element|getKey}}

+

{{.Element|getValue}}

+` + +const want = ` +

true

+

key

+

value

+` + +type testFuncsInput struct { + Index int + Element [2]string +} + +var tmpl = template.Must(template.New("countTest").Funcs(templateFunctions).Parse(tmplBody)) + +func TestTemplateFuncs(t *testing.T) { + buf := new(bytes.Buffer) + input := testFuncsInput{ + Index: 32, + Element: [2]string{"key", "value"}, + } + assert.NoError(t, tmpl.Execute(buf, input)) + assert.EqualValues(t, want, buf.String()) +} + +func TestNoCrash(t *testing.T) { + buf := new(bytes.Buffer) + assert.NotPanics(t, func() { WriteHTMLHeader(buf, HeaderData{Title: "Foo"}) }) + assert.NotPanics(t, func() { WriteHTMLComponentHeader(buf, ComponentHeaderData{Name: "Bar"}) }) + assert.NotPanics(t, func() { + WriteHTMLComponentHeader(buf, ComponentHeaderData{Name: "Bar", ComponentEndpoint: "pagez", Link: true}) + }) + assert.NotPanics(t, func() { + WriteHTMLPipelinesSummaryTable(buf, SummaryPipelinesTableData{ + ComponentEndpoint: "pagez", + Rows: []SummaryPipelinesTableRowData{{ + FullName: "test", + InputType: "metrics", + MutatesConsumedData: false, + Receivers: []string{"oc"}, + Processors: []string{"nop"}, + Exporters: []string{"oc"}, + }}, + }) + }) + assert.NotPanics(t, func() { + WriteHTMLExtensionsSummaryTable(buf, SummaryExtensionsTableData{ + ComponentEndpoint: "pagez", + Rows: []SummaryExtensionsTableRowData{{ + FullName: "test", + }}, + }) + }) + assert.NotPanics(t, func() { + WriteHTMLPropertiesTable(buf, PropertiesTableData{Name: "Bar", Properties: [][2]string{{"key", "value"}}}) + }) + assert.NotPanics(t, func() { WriteHTMLFooter(buf) }) + assert.NotPanics(t, func() { WriteHTMLFooter(buf) }) +} diff --git a/internal/otel_collector/service/logger.go b/internal/otel_collector/service/logger.go new file mode 100644 index 00000000000..f494972bd05 --- /dev/null +++ b/internal/otel_collector/service/logger.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "flag" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/internal/version" +) + +const ( + logLevelCfg = "log-level" + logProfileCfg = "log-profile" + logFormatCfg = "log-format" +) + +var ( + // Command line pointer to logger level flag configuration. + loggerLevelPtr *string + loggerProfilePtr *string + loggerFormatPtr *string +) + +func loggerFlags(flags *flag.FlagSet) { + loggerLevelPtr = flags.String(logLevelCfg, "INFO", "Output level of logs (DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL)") + loggerProfilePtr = flags.String(logProfileCfg, "", "Logging profile to use (dev, prod)") + + // Note: we use "console" by default for more human-friendly mode of logging (tab delimited, formatted timestamps). + loggerFormatPtr = flags.String(logFormatCfg, "console", "Format of logs to use (json, console)") +} + +func newLogger(options []zap.Option) (*zap.Logger, error) { + var level zapcore.Level + err := (&level).UnmarshalText([]byte(*loggerLevelPtr)) + if err != nil { + return nil, err + } + + conf := zap.NewProductionConfig() + + // Use logger profile if set on command line before falling back + // to default based on build type. + switch *loggerProfilePtr { + case "dev": + conf = zap.NewDevelopmentConfig() + case "prod": + conf = zap.NewProductionConfig() + default: + if version.IsDevBuild() { + conf = zap.NewDevelopmentConfig() + } + } + + conf.Encoding = *loggerFormatPtr + if conf.Encoding == "console" { + // Human-readable timestamps for console format of logs. + conf.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } + + conf.Level.SetLevel(level) + return conf.Build(options...) +} diff --git a/internal/otel_collector/service/service.go b/internal/otel_collector/service/service.go new file mode 100644 index 00000000000..d0ff423a199 --- /dev/null +++ b/internal/otel_collector/service/service.go @@ -0,0 +1,596 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package service handles the command-line, configuration, and runs the +// OpenTelemetry Collector. +package service + +import ( + "context" + "errors" + "flag" + "fmt" + "net/http" + "os" + "os/signal" + "path" + "runtime" + "sort" + "syscall" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/collector/telemetry" + "go.opentelemetry.io/collector/internal/version" + "go.opentelemetry.io/collector/service/builder" + "go.opentelemetry.io/collector/service/internal" +) + +const ( + servicezPath = "servicez" + pipelinezPath = "pipelinez" + extensionzPath = "extensionz" +) + +// State defines Application's state. +type State int + +const ( + Starting State = iota + Running + Closing + Closed +) + +// GetStateChannel returns state channel of the application. +func (app *Application) GetStateChannel() chan State { + return app.stateChannel +} + +// Application represents a collector application +type Application struct { + info component.ApplicationStartInfo + rootCmd *cobra.Command + v *viper.Viper + logger *zap.Logger + builtExporters builder.Exporters + builtReceivers builder.Receivers + builtPipelines builder.BuiltPipelines + builtExtensions builder.Extensions + stateChannel chan State + + factories component.Factories + config *configmodels.Config + + // stopTestChan is used to terminate the application in end to end tests. + stopTestChan chan struct{} + + // signalsChannel is used to receive termination signals from the OS. + signalsChannel chan os.Signal + + // asyncErrorChannel is used to signal a fatal error from any component. + asyncErrorChannel chan error +} + +// Command returns Application's root command. +func (app *Application) Command() *cobra.Command { + return app.rootCmd +} + +// Parameters holds configuration for creating a new Application. +type Parameters struct { + // Factories component factories. + Factories component.Factories + // ApplicationStartInfo provides application start information. + ApplicationStartInfo component.ApplicationStartInfo + // ConfigFactory that creates the configuration. + // If it is not provided the default factory (FileLoaderConfigFactory) is used. + // The default factory loads the configuration file and overrides component's configuration + // properties supplied via --set command line flag. + ConfigFactory ConfigFactory + // LoggingOptions provides a way to change behavior of zap logging. + LoggingOptions []zap.Option +} + +// ConfigFactory creates config. +// The ConfigFactory implementation should call AddSetFlagProperties to enable configuration passed via `--set` flag. +// Viper and command instances are passed from the Application. +// The factories also belong to the Application and are equal to the factories passed via Parameters. +type ConfigFactory func(v *viper.Viper, cmd *cobra.Command, factories component.Factories) (*configmodels.Config, error) + +// FileLoaderConfigFactory implements ConfigFactory and it creates configuration from file +// and from --set command line flag (if the flag is present). +func FileLoaderConfigFactory(v *viper.Viper, cmd *cobra.Command, factories component.Factories) (*configmodels.Config, error) { + file := builder.GetConfigFile() + if file == "" { + return nil, errors.New("config file not specified") + } + // first load the config file + v.SetConfigFile(file) + err := v.ReadInConfig() + if err != nil { + return nil, fmt.Errorf("error loading config file %q: %v", file, err) + } + + // next overlay the config file with --set flags + if err := AddSetFlagProperties(v, cmd); err != nil { + return nil, fmt.Errorf("failed to process set flag: %v", err) + } + return config.Load(v, factories) +} + +// New creates and returns a new instance of Application. +func New(params Parameters) (*Application, error) { + app := &Application{ + info: params.ApplicationStartInfo, + v: config.NewViper(), + factories: params.Factories, + stateChannel: make(chan State, Closed+1), + } + + factory := params.ConfigFactory + if factory == nil { + // use default factory that loads the configuration file + factory = FileLoaderConfigFactory + } + + rootCmd := &cobra.Command{ + Use: params.ApplicationStartInfo.ExeName, + Long: params.ApplicationStartInfo.LongName, + RunE: func(cmd *cobra.Command, args []string) error { + err := app.init(params.LoggingOptions) + if err != nil { + return err + } + + err = app.execute(context.Background(), factory) + if err != nil { + return err + } + + return nil + }, + } + + // TODO: coalesce this code and expose this information to other components. + flagSet := new(flag.FlagSet) + addFlagsFns := []func(*flag.FlagSet){ + configtelemetry.Flags, + telemetry.Flags, + builder.Flags, + loggerFlags, + } + for _, addFlags := range addFlagsFns { + addFlags(flagSet) + } + rootCmd.Flags().AddGoFlagSet(flagSet) + addSetFlag(rootCmd.Flags()) + + app.rootCmd = rootCmd + + return app, nil +} + +// ReportFatalError is used to report to the host that the receiver encountered +// a fatal error (i.e.: an error that the instance can't recover from) after +// its start function has already returned. +func (app *Application) ReportFatalError(err error) { + app.asyncErrorChannel <- err +} + +// GetLogger returns logger used by the Application. +// The logger is initialized after application start. +func (app *Application) GetLogger() *zap.Logger { + return app.logger +} + +func (app *Application) GetFactory(kind component.Kind, componentType configmodels.Type) component.Factory { + switch kind { + case component.KindReceiver: + return app.factories.Receivers[componentType] + case component.KindProcessor: + return app.factories.Processors[componentType] + case component.KindExporter: + return app.factories.Exporters[componentType] + case component.KindExtension: + return app.factories.Extensions[componentType] + } + return nil +} + +func (app *Application) GetExtensions() map[configmodels.Extension]component.ServiceExtension { + return app.builtExtensions.ToMap() +} + +func (app *Application) GetExporters() map[configmodels.DataType]map[configmodels.Exporter]component.Exporter { + return app.builtExporters.ToMapByDataType() +} + +func (app *Application) RegisterZPages(mux *http.ServeMux, pathPrefix string) { + mux.HandleFunc(path.Join(pathPrefix, servicezPath), app.handleServicezRequest) + mux.HandleFunc(path.Join(pathPrefix, pipelinezPath), app.handlePipelinezRequest) + mux.HandleFunc(path.Join(pathPrefix, extensionzPath), app.handleExtensionzRequest) +} + +func (app *Application) Shutdown() { + // TODO: Implement a proper shutdown with graceful draining of the pipeline. + // See https://github.com/open-telemetry/opentelemetry-collector/issues/483. + defer func() { + if r := recover(); r != nil { + app.logger.Info("stopTestChan already closed") + } + }() + close(app.stopTestChan) +} + +func (app *Application) init(options []zap.Option) error { + l, err := newLogger(options) + if err != nil { + return fmt.Errorf("failed to get logger: %w", err) + } + app.logger = l + return nil +} + +func (app *Application) setupTelemetry(ballastSizeBytes uint64) error { + app.logger.Info("Setting up own telemetry...") + + err := applicationTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger) + if err != nil { + return fmt.Errorf("failed to initialize telemetry: %w", err) + } + + return nil +} + +// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen. +func (app *Application) runAndWaitForShutdownEvent() { + app.logger.Info("Everything is ready. Begin running and processing data.") + + // plug SIGTERM signal into a channel. + app.signalsChannel = make(chan os.Signal, 1) + signal.Notify(app.signalsChannel, os.Interrupt, syscall.SIGTERM) + + // set the channel to stop testing. + app.stopTestChan = make(chan struct{}) + app.stateChannel <- Running + select { + case err := <-app.asyncErrorChannel: + app.logger.Error("Asynchronous error received, terminating process", zap.Error(err)) + case s := <-app.signalsChannel: + app.logger.Info("Received signal from OS", zap.String("signal", s.String())) + case <-app.stopTestChan: + app.logger.Info("Received stop test request") + } + app.stateChannel <- Closing +} + +func (app *Application) setupConfigurationComponents(ctx context.Context, factory ConfigFactory) error { + if err := configcheck.ValidateConfigFromFactories(app.factories); err != nil { + return err + } + + app.logger.Info("Loading configuration...") + cfg, err := factory(app.v, app.rootCmd, app.factories) + if err != nil { + return fmt.Errorf("cannot load configuration: %w", err) + } + err = config.ValidateConfig(cfg, app.logger) + if err != nil { + return fmt.Errorf("cannot load configuration: %w", err) + } + + app.config = cfg + app.logger.Info("Applying configuration...") + + err = app.setupExtensions(ctx) + if err != nil { + return fmt.Errorf("cannot setup extensions: %w", err) + } + + err = app.setupPipelines(ctx) + if err != nil { + return fmt.Errorf("cannot setup pipelines: %w", err) + } + + return nil +} + +func (app *Application) setupExtensions(ctx context.Context) error { + var err error + app.builtExtensions, err = builder.NewExtensionsBuilder(app.logger, app.info, app.config, app.factories.Extensions).Build() + if err != nil { + return fmt.Errorf("cannot build builtExtensions: %w", err) + } + app.logger.Info("Starting extensions...") + return app.builtExtensions.StartAll(ctx, app) +} + +func (app *Application) setupPipelines(ctx context.Context) error { + // Pipeline is built backwards, starting from exporters, so that we create objects + // which are referenced before objects which reference them. + + // First create exporters. + var err error + app.builtExporters, err = builder.NewExportersBuilder(app.logger, app.info, app.config, app.factories.Exporters).Build() + if err != nil { + return fmt.Errorf("cannot build builtExporters: %w", err) + } + + app.logger.Info("Starting exporters...") + err = app.builtExporters.StartAll(ctx, app) + if err != nil { + return fmt.Errorf("cannot start builtExporters: %w", err) + } + + // Create pipelines and their processors and plug exporters to the + // end of the pipelines. + app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.info, app.config, app.builtExporters, app.factories.Processors).Build() + if err != nil { + return fmt.Errorf("cannot build pipelines: %w", err) + } + + app.logger.Info("Starting processors...") + err = app.builtPipelines.StartProcessors(ctx, app) + if err != nil { + return fmt.Errorf("cannot start processors: %w", err) + } + + // Create receivers and plug them into the start of the pipelines. + app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.info, app.config, app.builtPipelines, app.factories.Receivers).Build() + if err != nil { + return fmt.Errorf("cannot build receivers: %w", err) + } + + app.logger.Info("Starting receivers...") + err = app.builtReceivers.StartAll(ctx, app) + if err != nil { + return fmt.Errorf("cannot start receivers: %w", err) + } + + return nil +} + +func (app *Application) shutdownPipelines(ctx context.Context) error { + // Shutdown order is the reverse of building: first receivers, then flushing pipelines + // giving senders a chance to send all their data. This may take time, the allowed + // time should be part of configuration. + + var errs []error + + app.logger.Info("Stopping receivers...") + err := app.builtReceivers.ShutdownAll(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed to stop receivers: %w", err)) + } + + app.logger.Info("Stopping processors...") + err = app.builtPipelines.ShutdownProcessors(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown processors: %w", err)) + } + + app.logger.Info("Stopping exporters...") + err = app.builtExporters.ShutdownAll(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown exporters: %w", err)) + } + + return componenterror.CombineErrors(errs) +} + +func (app *Application) shutdownExtensions(ctx context.Context) error { + app.logger.Info("Stopping extensions...") + err := app.builtExtensions.ShutdownAll(ctx) + if err != nil { + return fmt.Errorf("failed to shutdown extensions: %w", err) + } + return nil +} + +func (app *Application) execute(ctx context.Context, factory ConfigFactory) error { + app.logger.Info("Starting "+app.info.LongName+"...", + zap.String("Version", app.info.Version), + zap.String("GitHash", app.info.GitHash), + zap.Int("NumCPU", runtime.NumCPU()), + ) + app.stateChannel <- Starting + + // Set memory ballast + ballast, ballastSizeBytes := app.createMemoryBallast() + + app.asyncErrorChannel = make(chan error) + + // Setup everything. + err := app.setupTelemetry(ballastSizeBytes) + if err != nil { + return err + } + + err = app.setupConfigurationComponents(ctx, factory) + if err != nil { + return err + } + + err = app.builtExtensions.NotifyPipelineReady() + if err != nil { + return err + } + + // Everything is ready, now run until an event requiring shutdown happens. + app.runAndWaitForShutdownEvent() + + // Accumulate errors and proceed with shutting down remaining components. + var errs []error + + // Begin shutdown sequence. + runtime.KeepAlive(ballast) + app.logger.Info("Starting shutdown...") + + err = app.builtExtensions.NotifyPipelineNotReady() + if err != nil { + errs = append(errs, fmt.Errorf("failed to notify that pipeline is not ready: %w", err)) + } + + err = app.shutdownPipelines(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown pipelines: %w", err)) + } + + err = app.shutdownExtensions(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown extensions: %w", err)) + } + + err = applicationTelemetry.shutdown() + if err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown extensions: %w", err)) + } + + app.logger.Info("Shutdown complete.") + app.stateChannel <- Closed + close(app.stateChannel) + + return componenterror.CombineErrors(errs) +} + +// Run starts the collector according to the command and configuration +// given by the user, and waits for it to complete. +func (app *Application) Run() error { + // From this point on do not show usage in case of error. + app.rootCmd.SilenceUsage = true + + return app.rootCmd.Execute() +} + +const ( + zPipelineName = "zpipelinename" + zComponentName = "zcomponentname" + zComponentKind = "zcomponentkind" + zExtensionName = "zextensionname" +) + +func (app *Application) handleServicezRequest(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + w.Header().Set("Content-Type", "text/html; charset=utf-8") + internal.WriteHTMLHeader(w, internal.HeaderData{Title: "Service"}) + internal.WriteHTMLComponentHeader(w, internal.ComponentHeaderData{ + Name: "Pipelines", + ComponentEndpoint: pipelinezPath, + Link: true, + }) + internal.WriteHTMLComponentHeader(w, internal.ComponentHeaderData{ + Name: "Extensions", + ComponentEndpoint: extensionzPath, + Link: true, + }) + internal.WriteHTMLPropertiesTable(w, internal.PropertiesTableData{Name: "Build And Runtime", Properties: version.InfoVar}) + internal.WriteHTMLFooter(w) +} + +func (app *Application) handlePipelinezRequest(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + w.Header().Set("Content-Type", "text/html; charset=utf-8") + pipelineName := r.Form.Get(zPipelineName) + componentName := r.Form.Get(zComponentName) + componentKind := r.Form.Get(zComponentKind) + internal.WriteHTMLHeader(w, internal.HeaderData{Title: "Pipelines"}) + internal.WriteHTMLPipelinesSummaryTable(w, app.getPipelinesSummaryTableData()) + if pipelineName != "" && componentName != "" && componentKind != "" { + fullName := componentName + if componentKind == "processor" { + fullName = pipelineName + "/" + componentName + } + internal.WriteHTMLComponentHeader(w, internal.ComponentHeaderData{ + Name: componentKind + ": " + fullName, + }) + // TODO: Add config + status info. + } + internal.WriteHTMLFooter(w) +} + +func (app *Application) handleExtensionzRequest(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + w.Header().Set("Content-Type", "text/html; charset=utf-8") + extensionName := r.Form.Get(zExtensionName) + internal.WriteHTMLHeader(w, internal.HeaderData{Title: "Extensions"}) + internal.WriteHTMLExtensionsSummaryTable(w, app.getExtensionsSummaryTableData()) + if extensionName != "" { + internal.WriteHTMLComponentHeader(w, internal.ComponentHeaderData{ + Name: extensionName, + }) + // TODO: Add config + status info. + } + internal.WriteHTMLFooter(w) +} + +func (app *Application) getPipelinesSummaryTableData() internal.SummaryPipelinesTableData { + data := internal.SummaryPipelinesTableData{ + ComponentEndpoint: pipelinezPath, + } + + data.Rows = make([]internal.SummaryPipelinesTableRowData, 0, len(app.builtExtensions)) + for c, p := range app.builtPipelines { + row := internal.SummaryPipelinesTableRowData{ + FullName: c.Name, + InputType: string(c.InputType), + MutatesConsumedData: p.MutatesConsumedData, + Receivers: c.Receivers, + Processors: c.Processors, + Exporters: c.Exporters, + } + data.Rows = append(data.Rows, row) + } + + sort.Slice(data.Rows, func(i, j int) bool { + return data.Rows[i].FullName < data.Rows[j].FullName + }) + return data +} + +func (app *Application) getExtensionsSummaryTableData() internal.SummaryExtensionsTableData { + data := internal.SummaryExtensionsTableData{ + ComponentEndpoint: extensionzPath, + } + + data.Rows = make([]internal.SummaryExtensionsTableRowData, 0, len(app.builtExtensions)) + for c := range app.builtExtensions { + row := internal.SummaryExtensionsTableRowData{FullName: c.Name()} + data.Rows = append(data.Rows, row) + } + + sort.Slice(data.Rows, func(i, j int) bool { + return data.Rows[i].FullName < data.Rows[j].FullName + }) + return data +} + +func (app *Application) createMemoryBallast() ([]byte, uint64) { + ballastSizeMiB := builder.MemBallastSize() + if ballastSizeMiB > 0 { + ballastSizeBytes := uint64(ballastSizeMiB) * 1024 * 1024 + ballast := make([]byte, ballastSizeBytes) + app.logger.Info("Using memory ballast", zap.Int("MiBs", ballastSizeMiB)) + return ballast, ballastSizeBytes + } + return nil, 0 +} diff --git a/internal/otel_collector/service/service_test.go b/internal/otel_collector/service/service_test.go new file mode 100644 index 00000000000..2d1125d005c --- /dev/null +++ b/internal/otel_collector/service/service_test.go @@ -0,0 +1,649 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package collector handles the command-line, configuration, and runs the OC collector. +package service + +import ( + "bufio" + "context" + "errors" + "flag" + "fmt" + "net/http" + "sort" + "strconv" + "strings" + "syscall" + "testing" + "time" + + "github.com/prometheus/common/expfmt" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/processor/attributesprocessor" + "go.opentelemetry.io/collector/processor/batchprocessor" + "go.opentelemetry.io/collector/receiver/jaegerreceiver" + "go.opentelemetry.io/collector/service/builder" + "go.opentelemetry.io/collector/service/defaultcomponents" + "go.opentelemetry.io/collector/testutil" +) + +func TestApplication_Start(t *testing.T) { + factories, err := defaultcomponents.Components() + require.NoError(t, err) + + loggingHookCalled := false + hook := func(entry zapcore.Entry) error { + loggingHookCalled = true + return nil + } + + app, err := New(Parameters{Factories: factories, ApplicationStartInfo: componenttest.TestApplicationStartInfo(), LoggingOptions: []zap.Option{zap.Hooks(hook)}}) + require.NoError(t, err) + assert.Equal(t, app.rootCmd, app.Command()) + + const testPrefix = "a_test" + metricsPort := testutil.GetAvailablePort(t) + app.rootCmd.SetArgs([]string{ + "--config=testdata/otelcol-config.yaml", + "--metrics-addr=localhost:" + strconv.FormatUint(uint64(metricsPort), 10), + "--metrics-prefix=" + testPrefix, + }) + + appDone := make(chan struct{}) + go func() { + defer close(appDone) + assert.NoError(t, app.Run()) + }() + + assert.Equal(t, Starting, <-app.GetStateChannel()) + assert.Equal(t, Running, <-app.GetStateChannel()) + require.True(t, isAppAvailable(t, "http://localhost:13133")) + assert.Equal(t, app.logger, app.GetLogger()) + assert.True(t, loggingHookCalled) + + // All labels added to all collector metrics by default are listed below. + // These labels are hard coded here in order to avoid inadvertent changes: + // at this point changing labels should be treated as a breaking changing + // and requires a good justification. The reason is that changes to metric + // names or labels can break alerting, dashboards, etc that are used to + // monitor the Collector in production deployments. + mandatoryLabels := []string{ + "service_instance_id", + } + assertMetrics(t, testPrefix, metricsPort, mandatoryLabels) + + app.signalsChannel <- syscall.SIGTERM + <-appDone + assert.Equal(t, Closing, <-app.GetStateChannel()) + assert.Equal(t, Closed, <-app.GetStateChannel()) +} + +type mockAppTelemetry struct{} + +func (tel *mockAppTelemetry) init(chan<- error, uint64, *zap.Logger) error { + return nil +} + +func (tel *mockAppTelemetry) shutdown() error { + return errors.New("err1") +} + +func TestApplication_ReportError(t *testing.T) { + // use a mock AppTelemetry struct to return an error on shutdown + preservedAppTelemetry := applicationTelemetry + applicationTelemetry = &mockAppTelemetry{} + defer func() { applicationTelemetry = preservedAppTelemetry }() + + factories, err := defaultcomponents.Components() + require.NoError(t, err) + + app, err := New(Parameters{Factories: factories, ApplicationStartInfo: componenttest.TestApplicationStartInfo()}) + require.NoError(t, err) + + app.rootCmd.SetArgs([]string{"--config=testdata/otelcol-config-minimal.yaml"}) + + appDone := make(chan struct{}) + go func() { + defer close(appDone) + assert.EqualError(t, app.Run(), "failed to shutdown extensions: err1") + }() + + assert.Equal(t, Starting, <-app.GetStateChannel()) + assert.Equal(t, Running, <-app.GetStateChannel()) + app.ReportFatalError(errors.New("err2")) + <-appDone + assert.Equal(t, Closing, <-app.GetStateChannel()) + assert.Equal(t, Closed, <-app.GetStateChannel()) +} + +func TestApplication_StartAsGoRoutine(t *testing.T) { + factories, err := defaultcomponents.Components() + require.NoError(t, err) + + params := Parameters{ + ApplicationStartInfo: componenttest.TestApplicationStartInfo(), + ConfigFactory: func(_ *viper.Viper, _ *cobra.Command, factories component.Factories) (*configmodels.Config, error) { + return constructMimumalOpConfig(t, factories), nil + }, + Factories: factories, + } + app, err := New(params) + require.NoError(t, err) + app.Command().SetArgs([]string{ + "--metrics-level=NONE", + }) + + appDone := make(chan struct{}) + go func() { + defer close(appDone) + appErr := app.Run() + if appErr != nil { + err = appErr + } + }() + + assert.Equal(t, Starting, <-app.GetStateChannel()) + assert.Equal(t, Running, <-app.GetStateChannel()) + + app.Shutdown() + app.Shutdown() + <-appDone + assert.Equal(t, Closing, <-app.GetStateChannel()) + assert.Equal(t, Closed, <-app.GetStateChannel()) +} + +// isAppAvailable checks if the healthcheck server at the given endpoint is +// returning `available`. +func isAppAvailable(t *testing.T, healthCheckEndPoint string) bool { + client := &http.Client{} + resp, err := client.Get(healthCheckEndPoint) + require.NoError(t, err) + + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +func assertMetrics(t *testing.T, prefix string, metricsPort uint16, mandatoryLabels []string) { + client := &http.Client{} + resp, err := client.Get(fmt.Sprintf("http://localhost:%d/metrics", metricsPort)) + require.NoError(t, err) + + defer resp.Body.Close() + reader := bufio.NewReader(resp.Body) + + var parser expfmt.TextParser + parsed, err := parser.TextToMetricFamilies(reader) + require.NoError(t, err) + + for metricName, metricFamily := range parsed { + // require is used here so test fails with a single message. + require.True( + t, + strings.HasPrefix(metricName, prefix), + "expected prefix %q but string starts with %q", + prefix, + metricName[:len(prefix)+1]+"...") + + for _, metric := range metricFamily.Metric { + var labelNames []string + for _, labelPair := range metric.Label { + labelNames = append(labelNames, *labelPair.Name) + } + + for _, mandatoryLabel := range mandatoryLabels { + // require is used here so test fails with a single message. + require.Contains(t, labelNames, mandatoryLabel, "mandatory label %q not present", mandatoryLabel) + } + } + } +} + +func TestApplication_setupExtensions(t *testing.T) { + exampleExtensionFactory := &componenttest.ExampleExtensionFactory{FailCreation: true} + exampleExtensionConfig := &componenttest.ExampleExtensionCfg{ + ExtensionSettings: configmodels.ExtensionSettings{ + TypeVal: exampleExtensionFactory.Type(), + NameVal: string(exampleExtensionFactory.Type()), + }, + } + + badExtensionFactory := &badExtensionFactory{} + badExtensionFactoryConfig := &configmodels.ExtensionSettings{ + TypeVal: "bf", + NameVal: "bf", + } + + tests := []struct { + name string + factories component.Factories + config *configmodels.Config + wantErrMsg string + }{ + { + name: "extension_not_configured", + config: &configmodels.Config{ + Service: configmodels.Service{ + Extensions: []string{ + "myextension", + }, + }, + }, + wantErrMsg: "cannot build builtExtensions: extension \"myextension\" is not configured", + }, + { + name: "missing_extension_factory", + config: &configmodels.Config{ + Extensions: map[string]configmodels.Extension{ + string(exampleExtensionFactory.Type()): exampleExtensionConfig, + }, + Service: configmodels.Service{ + Extensions: []string{ + string(exampleExtensionFactory.Type()), + }, + }, + }, + wantErrMsg: "cannot build builtExtensions: extension factory for type \"exampleextension\" is not configured", + }, + { + name: "error_on_create_extension", + factories: component.Factories{ + Extensions: map[configmodels.Type]component.ExtensionFactory{ + exampleExtensionFactory.Type(): exampleExtensionFactory, + }, + }, + config: &configmodels.Config{ + Extensions: map[string]configmodels.Extension{ + string(exampleExtensionFactory.Type()): exampleExtensionConfig, + }, + Service: configmodels.Service{ + Extensions: []string{ + string(exampleExtensionFactory.Type()), + }, + }, + }, + wantErrMsg: "cannot build builtExtensions: failed to create extension \"exampleextension\": cannot create \"exampleextension\" extension type", + }, + { + name: "bad_factory", + factories: component.Factories{ + Extensions: map[configmodels.Type]component.ExtensionFactory{ + badExtensionFactory.Type(): badExtensionFactory, + }, + }, + config: &configmodels.Config{ + Extensions: map[string]configmodels.Extension{ + string(badExtensionFactory.Type()): badExtensionFactoryConfig, + }, + Service: configmodels.Service{ + Extensions: []string{ + string(badExtensionFactory.Type()), + }, + }, + }, + wantErrMsg: "cannot build builtExtensions: factory for \"bf\" produced a nil extension", + }, + } + + nopLogger := zap.NewNop() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + app := &Application{ + logger: nopLogger, + factories: tt.factories, + config: tt.config, + } + + err := app.setupExtensions(context.Background()) + + if tt.wantErrMsg == "" { + assert.NoError(t, err) + assert.Equal(t, 1, len(app.builtExtensions)) + for _, ext := range app.builtExtensions { + assert.NotNil(t, ext) + } + } else { + assert.Error(t, err) + assert.Equal(t, tt.wantErrMsg, err.Error()) + assert.Equal(t, 0, len(app.builtExtensions)) + } + }) + } +} + +// badExtensionFactory is a factory that returns no error but returns a nil object. +type badExtensionFactory struct{} + +func (b badExtensionFactory) Type() configmodels.Type { + return "bf" +} + +func (b badExtensionFactory) CreateDefaultConfig() configmodels.Extension { + return &configmodels.ExtensionSettings{} +} + +func (b badExtensionFactory) CreateExtension(_ context.Context, _ component.ExtensionCreateParams, _ configmodels.Extension) (component.ServiceExtension, error) { + return nil, nil +} + +func TestApplication_GetFactory(t *testing.T) { + // Create some factories. + exampleReceiverFactory := &componenttest.ExampleReceiverFactory{} + exampleProcessorFactory := &componenttest.ExampleProcessorFactory{} + exampleExporterFactory := &componenttest.ExampleExporterFactory{} + exampleExtensionFactory := &componenttest.ExampleExtensionFactory{} + + factories := component.Factories{ + Receivers: map[configmodels.Type]component.ReceiverFactory{ + exampleReceiverFactory.Type(): exampleReceiverFactory, + }, + Processors: map[configmodels.Type]component.ProcessorFactory{ + exampleProcessorFactory.Type(): exampleProcessorFactory, + }, + Exporters: map[configmodels.Type]component.ExporterFactory{ + exampleExporterFactory.Type(): exampleExporterFactory, + }, + Extensions: map[configmodels.Type]component.ExtensionFactory{ + exampleExtensionFactory.Type(): exampleExtensionFactory, + }, + } + + // Create an App with factories. + app, err := New(Parameters{Factories: factories}) + require.NoError(t, err) + + // Verify GetFactory call for all component kinds. + + factory := app.GetFactory(component.KindReceiver, exampleReceiverFactory.Type()) + assert.EqualValues(t, exampleReceiverFactory, factory) + factory = app.GetFactory(component.KindReceiver, "wrongtype") + assert.EqualValues(t, nil, factory) + + factory = app.GetFactory(component.KindProcessor, exampleProcessorFactory.Type()) + assert.EqualValues(t, exampleProcessorFactory, factory) + factory = app.GetFactory(component.KindProcessor, "wrongtype") + assert.EqualValues(t, nil, factory) + + factory = app.GetFactory(component.KindExporter, exampleExporterFactory.Type()) + assert.EqualValues(t, exampleExporterFactory, factory) + factory = app.GetFactory(component.KindExporter, "wrongtype") + assert.EqualValues(t, nil, factory) + + factory = app.GetFactory(component.KindExtension, exampleExtensionFactory.Type()) + assert.EqualValues(t, exampleExtensionFactory, factory) + factory = app.GetFactory(component.KindExtension, "wrongtype") + assert.EqualValues(t, nil, factory) +} + +func createExampleApplication(t *testing.T) *Application { + // Create some factories. + exampleReceiverFactory := &componenttest.ExampleReceiverFactory{} + exampleProcessorFactory := &componenttest.ExampleProcessorFactory{} + exampleExporterFactory := &componenttest.ExampleExporterFactory{} + exampleExtensionFactory := &componenttest.ExampleExtensionFactory{} + factories := component.Factories{ + Receivers: map[configmodels.Type]component.ReceiverFactory{ + exampleReceiverFactory.Type(): exampleReceiverFactory, + }, + Processors: map[configmodels.Type]component.ProcessorFactory{ + exampleProcessorFactory.Type(): exampleProcessorFactory, + }, + Exporters: map[configmodels.Type]component.ExporterFactory{ + exampleExporterFactory.Type(): exampleExporterFactory, + }, + Extensions: map[configmodels.Type]component.ExtensionFactory{ + exampleExtensionFactory.Type(): exampleExtensionFactory, + }, + } + + app, err := New(Parameters{ + Factories: factories, + ConfigFactory: func(_ *viper.Viper, _ *cobra.Command, factories component.Factories) (c *configmodels.Config, err error) { + config := &configmodels.Config{ + Receivers: map[string]configmodels.Receiver{ + string(exampleReceiverFactory.Type()): exampleReceiverFactory.CreateDefaultConfig(), + }, + Exporters: map[string]configmodels.Exporter{ + string(exampleExporterFactory.Type()): exampleExporterFactory.CreateDefaultConfig(), + }, + Extensions: map[string]configmodels.Extension{ + string(exampleExtensionFactory.Type()): exampleExtensionFactory.CreateDefaultConfig(), + }, + Service: configmodels.Service{ + Extensions: []string{string(exampleExtensionFactory.Type())}, + Pipelines: map[string]*configmodels.Pipeline{ + "trace": { + Name: "traces", + InputType: configmodels.TracesDataType, + Receivers: []string{string(exampleReceiverFactory.Type())}, + Processors: []string{}, + Exporters: []string{string(exampleExporterFactory.Type())}, + }, + }, + }, + } + return config, nil + }, + }) + require.NoError(t, err) + return app +} + +func TestApplication_GetExtensions(t *testing.T) { + app := createExampleApplication(t) + + appDone := make(chan struct{}) + go func() { + defer close(appDone) + assert.NoError(t, app.Run()) + }() + + assert.Equal(t, Starting, <-app.GetStateChannel()) + assert.Equal(t, Running, <-app.GetStateChannel()) + + // Verify GetExensions(). The results must match what we have in testdata/otelcol-config.yaml. + + extMap := app.GetExtensions() + var extTypes []string + for cfg, ext := range extMap { + assert.NotNil(t, ext) + extTypes = append(extTypes, string(cfg.Type())) + } + sort.Strings(extTypes) + + assert.Equal(t, []string{"exampleextension"}, extTypes) + + // Stop the Application. + close(app.stopTestChan) + <-appDone +} + +func TestApplication_GetExporters(t *testing.T) { + app := createExampleApplication(t) + + appDone := make(chan struct{}) + go func() { + defer close(appDone) + assert.NoError(t, app.Run()) + }() + + assert.Equal(t, Starting, <-app.GetStateChannel()) + assert.Equal(t, Running, <-app.GetStateChannel()) + + // Verify GetExporters(). + + expMap := app.GetExporters() + var expTypes []string + var expCfg configmodels.Exporter + for _, m := range expMap { + for cfg, exp := range m { + if exp != nil { + expTypes = append(expTypes, string(cfg.Type())) + assert.Nil(t, expCfg) + expCfg = cfg + } + } + } + sort.Strings(expTypes) + + assert.Equal(t, []string{"exampleexporter"}, expTypes) + + assert.EqualValues(t, 0, len(expMap[configmodels.MetricsDataType])) + assert.NotNil(t, expMap[configmodels.TracesDataType][expCfg]) + assert.EqualValues(t, "exampleexporter", expCfg.Type()) + + // Stop the Application. + close(app.stopTestChan) + <-appDone +} + +func TestSetFlag(t *testing.T) { + factories, err := defaultcomponents.Components() + require.NoError(t, err) + params := Parameters{ + Factories: factories, + } + t.Run("unknown_component", func(t *testing.T) { + app, err := New(params) + require.NoError(t, err) + err = app.rootCmd.ParseFlags([]string{ + "--config=testdata/otelcol-config.yaml", + "--set=processors.doesnotexist.timeout=2s", + }) + require.NoError(t, err) + cfg, err := FileLoaderConfigFactory(app.v, app.rootCmd, factories) + require.Error(t, err) + require.Nil(t, cfg) + + }) + t.Run("component_not_added_to_pipeline", func(t *testing.T) { + app, err := New(params) + require.NoError(t, err) + err = app.rootCmd.ParseFlags([]string{ + "--config=testdata/otelcol-config.yaml", + "--set=processors.batch/foo.timeout=2s", + }) + require.NoError(t, err) + cfg, err := FileLoaderConfigFactory(app.v, app.rootCmd, factories) + require.NoError(t, err) + assert.NotNil(t, cfg) + err = config.ValidateConfig(cfg, zap.NewNop()) + require.NoError(t, err) + + var processors []string + for k := range cfg.Processors { + processors = append(processors, k) + } + sort.Strings(processors) + // batch/foo is not added to the pipeline + assert.Equal(t, []string{"attributes", "batch", "batch/foo", "queued_retry"}, processors) + assert.Equal(t, []string{"attributes", "batch", "queued_retry"}, cfg.Service.Pipelines["traces"].Processors) + }) + t.Run("ok", func(t *testing.T) { + app, err := New(params) + require.NoError(t, err) + + err = app.rootCmd.ParseFlags([]string{ + "--config=testdata/otelcol-config.yaml", + "--set=processors.batch.timeout=2s", + // Arrays are overridden and object arrays cannot be indexed + // this creates actions array of size 1 + "--set=processors.attributes.actions.key=foo", + "--set=processors.attributes.actions.value=bar", + "--set=receivers.jaeger.protocols.grpc.endpoint=localhost:12345", + "--set=extensions.health_check.port=8080", + }) + require.NoError(t, err) + cfg, err := FileLoaderConfigFactory(app.v, app.rootCmd, factories) + require.NoError(t, err) + require.NotNil(t, cfg) + err = config.ValidateConfig(cfg, zap.NewNop()) + require.NoError(t, err) + + assert.Equal(t, 3, len(cfg.Processors)) + batch := cfg.Processors["batch"].(*batchprocessor.Config) + assert.Equal(t, time.Second*2, batch.Timeout) + jaeger := cfg.Receivers["jaeger"].(*jaegerreceiver.Config) + assert.Equal(t, "localhost:12345", jaeger.GRPC.NetAddr.Endpoint) + attributes := cfg.Processors["attributes"].(*attributesprocessor.Config) + require.Equal(t, 1, len(attributes.Actions)) + assert.Equal(t, "foo", attributes.Actions[0].Key) + assert.Equal(t, "bar", attributes.Actions[0].Value) + }) +} + +func TestSetFlag_component_does_not_exist(t *testing.T) { + factories, err := defaultcomponents.Components() + require.NoError(t, err) + + v := config.NewViper() + cmd := &cobra.Command{} + addSetFlag(cmd.Flags()) + fs := &flag.FlagSet{} + builder.Flags(fs) + cmd.Flags().AddGoFlagSet(fs) + cmd.ParseFlags([]string{ + "--config=testdata/otelcol-config.yaml", + "--set=processors.batch.timeout=2s", + // Arrays are overridden and object arrays cannot be indexed + // this creates actions array of size 1 + "--set=processors.attributes.actions.key=foo", + "--set=processors.attributes.actions.value=bar", + "--set=receivers.jaeger.protocols.grpc.endpoint=localhost:12345", + }) + cfg, err := FileLoaderConfigFactory(v, cmd, factories) + require.NoError(t, err) + require.NotNil(t, cfg) +} + +func constructMimumalOpConfig(t *testing.T, factories component.Factories) *configmodels.Config { + configStr := ` +receivers: + otlp: + protocols: + grpc: +exporters: + logging: +processors: + batch: + +extensions: + +service: + extensions: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [logging] +` + v := config.NewViper() + v.SetConfigType("yaml") + v.ReadConfig(strings.NewReader(configStr)) + cfg, err := config.Load(v, factories) + assert.NoError(t, err) + err = config.ValidateConfig(cfg, zap.NewNop()) + assert.NoError(t, err) + return cfg +} diff --git a/internal/otel_collector/service/service_windows.go b/internal/otel_collector/service/service_windows.go new file mode 100644 index 00000000000..df5018c65db --- /dev/null +++ b/internal/otel_collector/service/service_windows.go @@ -0,0 +1,147 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package service + +import ( + "fmt" + "syscall" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/eventlog" +) + +type WindowsService struct { + params Parameters + app *Application +} + +func NewWindowsService(params Parameters) *WindowsService { + return &WindowsService{params: params} +} + +// Execute implements https://godoc.org/golang.org/x/sys/windows/svc#Handler +func (s *WindowsService) Execute(args []string, requests <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) { + // The first argument supplied to service.Execute is the service name. If this is + // not provided for some reason, raise a relevant error to the system event log + if len(args) == 0 { + return false, 1213 // 1213: ERROR_INVALID_SERVICENAME + } + + elog, err := openEventLog(args[0]) + if err != nil { + return false, 1501 // 1501: ERROR_EVENTLOG_CANT_START + } + + appErrorChannel := make(chan error, 1) + + changes <- svc.Status{State: svc.StartPending} + if err = s.start(elog, appErrorChannel); err != nil { + elog.Error(3, fmt.Sprintf("failed to start service: %v", err)) + return false, 1064 // 1064: ERROR_EXCEPTION_IN_SERVICE + } + changes <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown} + + for req := range requests { + switch req.Cmd { + case svc.Interrogate: + changes <- req.CurrentStatus + + case svc.Stop, svc.Shutdown: + changes <- svc.Status{State: svc.StopPending} + if err := s.stop(appErrorChannel); err != nil { + elog.Error(3, fmt.Sprintf("errors occurred while shutting down the service: %v", err)) + } + changes <- svc.Status{State: svc.Stopped} + return false, 0 + + default: + elog.Error(3, fmt.Sprintf("unexpected service control request #%d", req.Cmd)) + return false, 1052 // 1052: ERROR_INVALID_SERVICE_CONTROL + } + } + + return false, 0 +} + +func (s *WindowsService) start(elog *eventlog.Log, appErrorChannel chan error) error { + var err error + s.app, err = newWithEventViewerLoggingHook(s.params, elog) + if err != nil { + return err + } + + // app.Start blocks until receiving a SIGTERM signal, so needs to be started + // asynchronously, but it will exit early if an error occurs on startup + go func() { appErrorChannel <- s.app.Run() }() + + // wait until the app is in the Running state + go func() { + for state := range s.app.GetStateChannel() { + if state == Running { + appErrorChannel <- nil + break + } + } + }() + + // wait until the app is in the Running state, or an error was returned + return <-appErrorChannel +} + +func (s *WindowsService) stop(appErrorChannel chan error) error { + // simulate a SIGTERM signal to terminate the application + s.app.signalsChannel <- syscall.SIGTERM + // return the response of app.Start + return <-appErrorChannel +} + +func openEventLog(serviceName string) (*eventlog.Log, error) { + elog, err := eventlog.Open(serviceName) + if err != nil { + return nil, fmt.Errorf("service failed to open event log: %w", err) + } + + return elog, nil +} + +func newWithEventViewerLoggingHook(params Parameters, elog *eventlog.Log) (*Application, error) { + params.LoggingOptions = append( + params.LoggingOptions, + zap.Hooks(func(entry zapcore.Entry) error { + msg := fmt.Sprintf("%v\r\n\r\nStack Trace:\r\n%v", entry.Message, entry.Stack) + + switch entry.Level { + case zapcore.FatalLevel, zapcore.PanicLevel, zapcore.DPanicLevel: + // golang.org/x/sys/windows/svc/eventlog does not support Critical level event logs + return elog.Error(3, msg) + case zapcore.ErrorLevel: + return elog.Error(3, msg) + case zapcore.WarnLevel: + return elog.Warning(2, msg) + case zapcore.InfoLevel: + return elog.Info(1, msg) + } + + // ignore Debug level logs + return nil + }), + ) + + return New(params) +} diff --git a/internal/otel_collector/service/service_windows_test.go b/internal/otel_collector/service/service_windows_test.go new file mode 100644 index 00000000000..4a3024ba2d9 --- /dev/null +++ b/internal/otel_collector/service/service_windows_test.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package service + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sys/windows/svc" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/service/defaultcomponents" +) + +func TestWindowsService_Execute(t *testing.T) { + os.Args = []string{"otelcol", "--config", "testdata/otelcol-config-minimal.yaml"} + + factories, err := defaultcomponents.Components() + require.NoError(t, err) + + s := NewWindowsService(Parameters{Factories: factories, ApplicationStartInfo: componenttest.TestApplicationStartInfo()}) + + appDone := make(chan struct{}) + requests := make(chan svc.ChangeRequest) + changes := make(chan svc.Status) + go func() { + defer close(appDone) + ssec, errno := s.Execute([]string{"svc name"}, requests, changes) + assert.Equal(t, uint32(0), errno) + assert.False(t, ssec) + }() + + assert.Equal(t, svc.StartPending, (<-changes).State) + assert.Equal(t, svc.Running, (<-changes).State) + requests <- svc.ChangeRequest{Cmd: svc.Interrogate, CurrentStatus: svc.Status{State: svc.Running}} + assert.Equal(t, svc.Running, (<-changes).State) + requests <- svc.ChangeRequest{Cmd: svc.Stop} + assert.Equal(t, svc.StopPending, (<-changes).State) + assert.Equal(t, svc.Stopped, (<-changes).State) + <-appDone +} diff --git a/internal/otel_collector/service/set_flag.go b/internal/otel_collector/service/set_flag.go new file mode 100644 index 00000000000..61c155e3efa --- /dev/null +++ b/internal/otel_collector/service/set_flag.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "bytes" + "fmt" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "go.opentelemetry.io/collector/config" +) + +const ( + setFlagName = "set" + setFlagFileType = "properties" +) + +func addSetFlag(flagSet *pflag.FlagSet) { + flagSet.StringArray(setFlagName, []string{}, "Set arbitrary component config property. The component has to be defined in the config file and the flag has a higher precedence. Array config properties are overridden and maps are joined, note that only a single (first) array property can be set e.g. -set=processors.attributes.actions.key=some_key. Example --set=processors.batch.timeout=2s") +} + +// AddSetFlagProperties overrides properties from set flag(s) in supplied viper instance. +// The implementation reads set flag(s) from the cmd and passes the content to a new viper instance as .properties file. +// Then the properties from new viper instance are read and set to the supplied viper. +func AddSetFlagProperties(v *viper.Viper, cmd *cobra.Command) error { + flagProperties, err := cmd.Flags().GetStringArray(setFlagName) + if err != nil { + return err + } + if len(flagProperties) == 0 { + return nil + } + b := &bytes.Buffer{} + for _, property := range flagProperties { + property = strings.TrimSpace(property) + if _, err := fmt.Fprintf(b, "%s\n", property); err != nil { + return err + } + } + viperFlags := config.NewViper() + viperFlags.SetConfigType(setFlagFileType) + if err := viperFlags.ReadConfig(b); err != nil { + return fmt.Errorf("failed to read set flag config: %v", err) + } + + // Viper implementation of v.MergeConfig(io.Reader) or v.MergeConfigMap(map[string]interface) + // does not work properly. This is b/c if it attempts to merge into a nil object it will fail here + // https://github.com/spf13/viper/blob/3826be313591f83193f048520482a7b3cf17d506/viper.go#L1709 + + // The workaround is to call v.Set(string, interface) on all root properties from the config file + // this will correctly preserve the original config and set them up for viper to overlay them with + // the --set params. It should also be noted that setting the root keys is important. This is + // b/c the viper .AllKeys() method does not return empty objects. + // For instance with the following yaml structure: + // a: + // b: + // c: {} + // + // viper.AllKeys() would only return a.b, but not a.c. However otel expects {} to behave + // the same as nil object in its config file. Therefore we extract and set the root keys only + // to catch both a.b and a.c. + + rootKeys := map[string]struct{}{} + for _, k := range viperFlags.AllKeys() { + keys := strings.Split(k, config.ViperDelimiter) + if len(keys) > 0 { + rootKeys[keys[0]] = struct{}{} + } + } + + for k := range rootKeys { + v.Set(k, v.Get(k)) + } + + // now that we've copied the config into the viper "overrides" copy the --set flags + // as well + for _, k := range viperFlags.AllKeys() { + v.Set(k, viperFlags.Get(k)) + } + + return nil +} diff --git a/internal/otel_collector/service/set_flag_test.go b/internal/otel_collector/service/set_flag_test.go new file mode 100644 index 00000000000..f6f0c63ffcc --- /dev/null +++ b/internal/otel_collector/service/set_flag_test.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "testing" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSetFlags(t *testing.T) { + cmd := &cobra.Command{} + addSetFlag(cmd.Flags()) + + err := cmd.ParseFlags([]string{ + "--set=processors.batch.timeout=2s", + "--set=processors.batch/foo.timeout=3s", + "--set=receivers.otlp.protocols.grpc.endpoint=localhost:1818", + "--set=exporters.kafka.brokers=foo:9200,foo2:9200", + }) + require.NoError(t, err) + + v := viper.New() + err = AddSetFlagProperties(v, cmd) + require.NoError(t, err) + + settings := v.AllSettings() + assert.Equal(t, 4, len(settings)) + assert.Equal(t, "2s", v.Get("processors::batch::timeout")) + assert.Equal(t, "3s", v.Get("processors::batch/foo::timeout")) + assert.Equal(t, "foo:9200,foo2:9200", v.Get("exporters::kafka::brokers")) + assert.Equal(t, "localhost:1818", v.Get("receivers::otlp::protocols::grpc::endpoint")) +} + +func TestSetFlags_err_set_flag(t *testing.T) { + cmd := &cobra.Command{} + v := viper.New() + err := AddSetFlagProperties(v, cmd) + require.Error(t, err) +} + +func TestSetFlags_empty(t *testing.T) { + cmd := &cobra.Command{} + addSetFlag(cmd.Flags()) + v := viper.New() + err := AddSetFlagProperties(v, cmd) + require.NoError(t, err) + assert.Equal(t, 0, len(v.AllSettings())) +} diff --git a/internal/otel_collector/service/telemetry.go b/internal/otel_collector/service/telemetry.go new file mode 100644 index 00000000000..a6ce4c18253 --- /dev/null +++ b/internal/otel_collector/service/telemetry.go @@ -0,0 +1,144 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "net/http" + "strings" + "unicode" + + "contrib.go.opencensus.io/exporter/prometheus" + "github.com/google/uuid" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/collector/telemetry" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/batchprocessor" + "go.opentelemetry.io/collector/processor/queuedprocessor" + fluentobserv "go.opentelemetry.io/collector/receiver/fluentforwardreceiver/observ" + "go.opentelemetry.io/collector/receiver/kafkareceiver" + telemetry2 "go.opentelemetry.io/collector/service/internal/telemetry" + "go.opentelemetry.io/collector/translator/conventions" +) + +// applicationTelemetry is application's own telemetry. +var applicationTelemetry appTelemetryExporter = &appTelemetry{} + +type appTelemetryExporter interface { + init(asyncErrorChannel chan<- error, ballastSizeBytes uint64, logger *zap.Logger) error + shutdown() error +} + +type appTelemetry struct { + views []*view.View + server *http.Server +} + +func (tel *appTelemetry) init(asyncErrorChannel chan<- error, ballastSizeBytes uint64, logger *zap.Logger) error { + level := configtelemetry.GetMetricsLevelFlagValue() + metricsAddr := telemetry.GetMetricsAddr() + + if level == configtelemetry.LevelNone || metricsAddr == "" { + return nil + } + + processMetricsViews, err := telemetry2.NewProcessMetricsViews(ballastSizeBytes) + if err != nil { + return err + } + + var views []*view.View + views = append(views, obsreport.Configure(level)...) + views = append(views, processor.MetricViews()...) + views = append(views, queuedprocessor.MetricViews()...) + views = append(views, batchprocessor.MetricViews()...) + views = append(views, kafkareceiver.MetricViews()...) + views = append(views, processMetricsViews.Views()...) + views = append(views, fluentobserv.MetricViews()...) + tel.views = views + if err = view.Register(views...); err != nil { + return err + } + + processMetricsViews.StartCollection() + + // Until we can use a generic metrics exporter, default to Prometheus. + opts := prometheus.Options{ + Namespace: telemetry.GetMetricsPrefix(), + } + + var instanceID string + if telemetry.GetAddInstanceID() { + instanceUUID, _ := uuid.NewRandom() + instanceID = instanceUUID.String() + opts.ConstLabels = map[string]string{ + sanitizePrometheusKey(conventions.AttributeServiceInstance): instanceID, + } + } + + pe, err := prometheus.NewExporter(opts) + if err != nil { + return err + } + + view.RegisterExporter(pe) + + logger.Info( + "Serving Prometheus metrics", + zap.String("address", metricsAddr), + zap.Int8("level", int8(level)), // TODO: make it human friendly + zap.String(conventions.AttributeServiceInstance, instanceID), + ) + + mux := http.NewServeMux() + mux.Handle("/metrics", pe) + + tel.server = &http.Server{ + Addr: metricsAddr, + Handler: mux, + } + + go func() { + serveErr := tel.server.ListenAndServe() + if serveErr != nil && serveErr != http.ErrServerClosed { + asyncErrorChannel <- serveErr + } + }() + + return nil +} + +func (tel *appTelemetry) shutdown() error { + view.Unregister(tel.views...) + + if tel.server != nil { + return tel.server.Close() + } + + return nil +} + +func sanitizePrometheusKey(str string) string { + runeFilterMap := func(r rune) rune { + if unicode.IsDigit(r) || unicode.IsLetter(r) || r == '_' { + return r + } + return '_' + } + return strings.Map(runeFilterMap, str) +} diff --git a/internal/otel_collector/service/testdata/otelcol-config-minimal.yaml b/internal/otel_collector/service/testdata/otelcol-config-minimal.yaml new file mode 100644 index 00000000000..372bd08d5cb --- /dev/null +++ b/internal/otel_collector/service/testdata/otelcol-config-minimal.yaml @@ -0,0 +1,15 @@ +receivers: + otlp: + protocols: + grpc: + +exporters: + otlp: + endpoint: "locahost:14250" + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] + diff --git a/internal/otel_collector/service/testdata/otelcol-config.yaml b/internal/otel_collector/service/testdata/otelcol-config.yaml new file mode 100644 index 00000000000..ac3bd1fdcb1 --- /dev/null +++ b/internal/otel_collector/service/testdata/otelcol-config.yaml @@ -0,0 +1,31 @@ +receivers: + jaeger: + protocols: + grpc: + +exporters: + opencensus: + endpoint: "locahost:55678" + +processors: + attributes: + actions: + - key: key1 + value: 123 + action: insert + queued_retry: + batch: + +extensions: + health_check: {} + pprof: + zpages: + +service: + extensions: [pprof, zpages, health_check] + pipelines: + traces: + receivers: [jaeger] + processors: [attributes, batch, queued_retry] + exporters: [opencensus] + diff --git a/internal/otel_collector/testbed/CCRepo_result.png b/internal/otel_collector/testbed/CCRepo_result.png new file mode 100644 index 00000000000..a6dce048014 Binary files /dev/null and b/internal/otel_collector/testbed/CCRepo_result.png differ diff --git a/internal/otel_collector/testbed/README.md b/internal/otel_collector/testbed/README.md new file mode 100644 index 00000000000..93a8dc5d2d6 --- /dev/null +++ b/internal/otel_collector/testbed/README.md @@ -0,0 +1,136 @@ +# OpenTelemetry Collector Testbed + +Testbed is a controlled environment and tools for conducting end-to-end tests for the Otel Collector, +including reproducible short-term benchmarks, correctness tests, long-running stability tests and +maximum load stress tests. + +## Usage + +For each type of tests that should have a summary report create a new directory and then a test suite function which utilizes `*testing.M`. This function should delegate all functionality to `testbed.DoTestMain` supplying a global instance of `testbed.TestResultsSummary` to it. + +Each test case within the suite should create a `testbed.TestCase` and supply implementations of each of the various interfaces the `NewTestCase` function takes as parameters. + +## DataFlow + +`testbed.TestCase` uses `LoadGenerator` and `MockBackend` to further encapsulate pluggable components. `LoadGenerator` further encapsulates `DataProvider` and `DataSender` in order to generate and send data. `MockBackend` further encapsulate `DataReceiver` and provide consume functionality. + +For instance, if using the existing end-to-end test, the general dataflow can be (Note that MockBackend does not really have a consumer instance, only to make it intuitive, this diagram draws it a separate module): + +![e2e diagram](./e2e_diagram.jpeg) + +## Pluggable Test Components + +* `DataProvider` - Generates test data to send to receiver under test. + * `PerfTestDataProvider` - Implementation of the `DataProvider` for use in performance tests. Tracing IDs are based on the incremented batch and data items counters. + * `GoldenDataProvider` - Implementation of `DataProvider` for use in correctness tests. Provides data from the "Golden" dataset generated using pairwise combinatorial testing techniques. +* `DataSender` - Sends data to the collector instance under test. + * `JaegerGRPCDataSender` - Implementation of `DataSender` which sends to `jaeger` receiver. + * `OCTraceDataSender` - Implementation of `DataSender` which sends to `opencensus` receiver. + * `OCMetricsDataSender` - Implementation of `DataSender` which sends to `opencensus` receiver. + * `OTLPTraceDataSender` - Implementation of `DataSender` which sends to `otlp` receiver. + * `OTLPMetricsDataSender` - Implementation of `DataSender` which sends to `otlp` receiver. + * `ZipkinDataSender` - Implementation of `DataSender` which sends to `zipkin` receiver. +* `DataReceiver` - Receives data from the collector instance under test and stores it for use in test assertions. + * `OCDataReceiver` - Implementation of `DataReceiver` which receives data from `opencensus` exporter. + * `JaegerDataReceiver` - Implementation of `DataReceiver` which receives data from `jaeger` exporter. + * `OTLPDataReceiver` - Implementation of `DataReceiver` which receives data from `otlp` exporter. + * `ZipkinDataReceiver` - Implementation of `DataReceiver` which receives data from `zipkin` exporter. +* `OtelcolRunner` - Configures, starts and stops one or more instances of otelcol which will be the subject of testing being executed. + * `ChildProcess` - Implementation of `OtelcolRunner` runs a single otelcol as a child process on the same machine as the test executor. + * `InProcessCollector` - Implementation of `OtelcolRunner` runs a single otelcol as a go routine within the same process as the test executor. +* `TestCaseValidator` - Validates and reports on test results. + * `PerfTestValidator` - Implementation of `TestCaseValidator` for test suites using `PerformanceResults` for summarizing results. + * `CorrectnessTestValidator` - Implementation of `TestCaseValidator` for test suites using `CorrectnessResults` for summarizing results. +* `TestResultsSummary` - Records itemized test case results plus a summary of one category of testing. + * `PerformanceResults` - Implementation of `TestResultsSummary` with fields suitable for reporting performance test results. + * `CorrectnessResults` - Implementation of `TestResultsSummary` with fields suitable for reporting data translation correctness test results. + +## Adding New Receiver and/or Exporters to the testbed + +Generally, when designing a test for new exporter and receiver components, developers should mainly focus on designing and implementing the components with yellow background in the diagram above as the other components are implemented by the testbed framework: + +* `DataSender` - This part should provide below interfaces for testing purpose: + + * `Start()` - Start sender and connect to the configured endpoint. Must be called before sending data. + * `Flush()` - Send any accumulated data. + * `GetCollectorPort()` - Return the port to which this sender will send data. + * `GenConfigYAMLStr()` - Generate a config string to place in receiver part of collector config so that it can receive data from this sender. + * `ProtocolName()` - Return protocol name to use in collector config pipeline. + +* `DataReceiver` - This part should provide below interfaces for testing purpose: + + * `Start()` - Start receiver. + * `Stop()` - Stop receiver. + * `GenConfigYAMLStr()` - Generate a config string to place in exporter part of collector config so that it can send data to this receiver. + * `ProtocolName()` - Return protocol name to use in collector config pipeline. + +* `Testing` - This part may vary from what kind of testing developers would like to do. In existing implementation, we can refer to [End-to-End testing](https://github.com/EdZou/opentelemetry-collector/blob/master/testbed/tests/e2e_test.go), [Metrics testing](https://github.com/EdZou/opentelemetry-collector/blob/master/testbed/tests/metric_test.go), [Traces testing](https://github.com/EdZou/opentelemetry-collector/blob/master/testbed/tests/trace_test.go) and [Correctness testing](https://github.com/EdZou/opentelemetry-collector/blob/master/testbed/correctness/correctness_test.go). For instance, if developers would like to design a trace test for a new exporter and receiver: + + * ```go + func TestTrace10kSPS(t *testing.T) { + tests := []struct { + name string + sender testbed.DataSender + receiver testbed.DataReceiver + resourceSpec testbed.ResourceSpec + }{ + { + "NewExporterOrReceiver", + testbed.NewXXXDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewXXXDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: XX, + ExpectedMaxRAM: XX, + }, + }, + ... + } + + processors := map[string]string{ + "batch": ` + batch: + `, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + Scenario10kItemsPerSecond( + t, + test.sender, + test.receiver, + test.resourceSpec, + performanceResultsSummary, + processors, + ) + }) + } + } + ``` + +## Run Tests and Get Results + +Here providing some examples of how to run and get the results of testing. + +1. Under the [collector-contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib) repo, by running following command: + +``` + cd /testbed/tests + TESTBED_CONFIG=local.yaml go test -v +``` + +​ Then get the result: + +![collector-contrib tests result](./CCRepo_result.png) + +2. Under [Collector/testbed/](https://github.com/EdZou/opentelemetry-collector/tree/master/testbed) repo, here taking correctness tests as an example, by running: + +``` + cd correctness + source ~/.bash_profile # remember you should enable your envir var here + ./runtests.sh +``` + +Then get the result: + +![collector correctness tests result](./correctness_result.png) + diff --git a/internal/otel_collector/testbed/correctness/.gitignore b/internal/otel_collector/testbed/correctness/.gitignore new file mode 100644 index 00000000000..0482cb4e736 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/.gitignore @@ -0,0 +1,2 @@ +results + diff --git a/internal/otel_collector/testbed/correctness/metrics/correctness_test_case.go b/internal/otel_collector/testbed/correctness/metrics/correctness_test_case.go new file mode 100644 index 00000000000..c8f44b06ebb --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/correctness_test_case.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "log" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/service/defaultcomponents" + "go.opentelemetry.io/collector/testbed/correctness" + "go.opentelemetry.io/collector/testbed/testbed" +) + +type correctnessTestCase struct { + t *testing.T + sender testbed.DataSender + receiver testbed.DataReceiver + harness *testHarness + collector *testbed.InProcessCollector +} + +func newCorrectnessTestCase( + t *testing.T, + sender testbed.DataSender, + receiver testbed.DataReceiver, + harness *testHarness, +) *correctnessTestCase { + return &correctnessTestCase{t: t, sender: sender, receiver: receiver, harness: harness} +} + +func (tc *correctnessTestCase) startCollector() { + tc.collector = testbed.NewInProcessCollector(componentFactories(tc.t)) + _, err := tc.collector.PrepareConfig(correctness.CreateConfigYaml(tc.sender, tc.receiver, nil, "metrics")) + require.NoError(tc.t, err) + rd, err := newResultsDir(tc.t.Name()) + require.NoError(tc.t, err) + err = rd.mkDir() + require.NoError(tc.t, err) + fname, err := rd.fullPath("agent.log") + require.NoError(tc.t, err) + log.Println("starting collector") + err = tc.collector.Start(testbed.StartParams{ + Name: "Agent", + LogFilePath: fname, + CmdArgs: []string{"--metrics-level=NONE"}, + }) + require.NoError(tc.t, err) +} + +func (tc *correctnessTestCase) stopCollector() { + _, err := tc.collector.Stop() + require.NoError(tc.t, err) +} + +func (tc *correctnessTestCase) startTestbedSender() { + log.Println("starting testbed sender") + err := tc.sender.Start() + require.NoError(tc.t, err) +} + +func (tc *correctnessTestCase) startTestbedReceiver() { + log.Println("starting testbed receiver") + err := tc.receiver.Start(&testbed.MockTraceConsumer{}, tc.harness, &testbed.MockLogConsumer{}) + require.NoError(tc.t, err) +} + +func (tc *correctnessTestCase) stopTestbedReceiver() { + log.Println("stopping testbed receiver") + err := tc.receiver.Stop() + require.NoError(tc.t, err) +} + +func (tc *correctnessTestCase) sendFirstMetric() { + tc.harness.sendNextMetric() +} + +func (tc *correctnessTestCase) waitForAllMetrics() { + log.Println("waiting for allMetricsReceived") + for { + select { + case <-time.After(10 * time.Second): + tc.t.Fatal("Deadline exceeded while waiting to receive metrics") + return + case <-tc.harness.allMetricsReceived: + log.Println("all metrics received") + return + } + } +} + +func componentFactories(t *testing.T) component.Factories { + factories, err := defaultcomponents.Components() + require.NoError(t, err) + return factories +} diff --git a/internal/otel_collector/testbed/correctness/metrics/doc.go b/internal/otel_collector/testbed/correctness/metrics/doc.go new file mode 100644 index 00000000000..f39c88252de --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/doc.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metrics contains functionality for testing an otelcol pipeline end to end for metric correctness. +// Partly because of how Prometheus works (being pull-based) metrics correctness works differently than +// the performance testbed in the parent directory. Whereas performance testing sends a relatively large +// number of datapoints into the collector, this package sends metrics in one at a time, and only sends the +// next datapoint when the previous datapoint has been processed and compared to the original. +// +// Mostly simlar to the performance testing pipeline, this pipeline looks like the following: + +// [testbed exporter] -> [otelcol receiver] -> [otelcol exporter] -> [testbed receiver] -> [test harness] +// +// the difference being the testHarness, which is connected to [testbed receiver] as its metrics +// consumer, listening for datapoints. To start the process, one datapoint is sent into the testbed +// exporter, it goes through the pipeline, and arrives at the testbed receiver, which passes it along to the +// test harness. The test harness compares the received datapoint to the original datapoint it sent, and saves +// any diffs it found in a diffAccumulator instance. Then it sends the next datapoint. This continues until +// there are no more datapoints. The simple diagram above should have a loop, where [test harness] connects +// back to [testbed exporter]. +// +// Datapoints are supplied to the testHarness by a metricSupplier, which receives all of the metrics it needs +// upfront. Those metrics are in turn generated by a metricGenerator, which receives its config from a PICT +// generated file, as the trace correctness funcionality does. +package metrics diff --git a/internal/otel_collector/testbed/correctness/metrics/metric_diff.go b/internal/otel_collector/testbed/correctness/metrics/metric_diff.go new file mode 100644 index 00000000000..edb2a33a644 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/metric_diff.go @@ -0,0 +1,332 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// MetricDiff is intended to support producing human-readable diffs between two MetricData structs during +// testing. Two MetricDatas, when compared, could produce a list of MetricDiffs containing all of their +// differences, which could be used to correct the differences between the expected and actual values. +type MetricDiff struct { + ExpectedValue interface{} + ActualValue interface{} + Msg string +} + +func (mf MetricDiff) String() string { + return fmt.Sprintf("{msg='%v' expected=[%v] actual=[%v]}\n", mf.Msg, mf.ExpectedValue, mf.ActualValue) +} + +func pdmToPDRM(pdm []pdata.Metrics) (out []pdata.ResourceMetrics) { + for _, md := range pdm { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + out = append(out, rm) + } + } + return out +} + +func diffRMSlices(sent []pdata.ResourceMetrics, recd []pdata.ResourceMetrics) []*MetricDiff { + var diffs []*MetricDiff + if len(sent) != len(recd) { + return []*MetricDiff{{ + ExpectedValue: len(sent), + ActualValue: len(recd), + Msg: "Sent vs received ResourceMetrics not equal length", + }} + } + for i := 0; i < len(sent); i++ { + sentRM := sent[i] + recdRM := recd[i] + diffs = diffRMs(diffs, sentRM, recdRM) + } + return diffs +} + +func diffRMs(diffs []*MetricDiff, expected pdata.ResourceMetrics, actual pdata.ResourceMetrics) []*MetricDiff { + diffs = diffResource(diffs, expected.Resource(), actual.Resource()) + diffs = diffILMSlice( + diffs, + expected.InstrumentationLibraryMetrics(), + actual.InstrumentationLibraryMetrics(), + ) + return diffs +} + +func diffILMSlice( + diffs []*MetricDiff, + expected pdata.InstrumentationLibraryMetricsSlice, + actual pdata.InstrumentationLibraryMetricsSlice, +) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffValues(diffs, actual.Len(), expected.Len(), "InstrumentationLibraryMetricsSlice len") + if mismatch { + return diffs + } + for i := 0; i < expected.Len(); i++ { + diffs = diffILM(diffs, expected.At(i), actual.At(i)) + } + return diffs +} + +func diffILM( + diffs []*MetricDiff, + expected pdata.InstrumentationLibraryMetrics, + actual pdata.InstrumentationLibraryMetrics, +) []*MetricDiff { + return diffMetrics(diffs, expected.Metrics(), actual.Metrics()) +} + +func diffMetrics(diffs []*MetricDiff, expected pdata.MetricSlice, actual pdata.MetricSlice) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffValues(diffs, actual.Len(), expected.Len(), "MetricSlice len") + if mismatch { + return diffs + } + for i := 0; i < expected.Len(); i++ { + diffs = DiffMetric(diffs, expected.At(i), actual.At(i)) + } + return diffs +} + +func DiffMetric(diffs []*MetricDiff, expected pdata.Metric, actual pdata.Metric) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffMetricDescriptor(diffs, expected, actual) + if mismatch { + return diffs + } + switch actual.DataType() { + case pdata.MetricDataTypeIntGauge: + diffs = diffIntPts(diffs, expected.IntGauge().DataPoints(), actual.IntGauge().DataPoints()) + case pdata.MetricDataTypeDoubleGauge: + diffs = diffDoublePts(diffs, expected.DoubleGauge().DataPoints(), actual.DoubleGauge().DataPoints()) + case pdata.MetricDataTypeIntSum: + diffs = diff(diffs, expected.IntSum().IsMonotonic(), actual.IntSum().IsMonotonic(), "IntSum IsMonotonic") + diffs = diff(diffs, expected.IntSum().AggregationTemporality(), actual.IntSum().AggregationTemporality(), "IntSum AggregationTemporality") + diffs = diffIntPts(diffs, expected.IntSum().DataPoints(), actual.IntSum().DataPoints()) + case pdata.MetricDataTypeDoubleSum: + diffs = diff(diffs, expected.DoubleSum().IsMonotonic(), actual.DoubleSum().IsMonotonic(), "DoubleSum IsMonotonic") + diffs = diff(diffs, expected.DoubleSum().AggregationTemporality(), actual.DoubleSum().AggregationTemporality(), "DoubleSum AggregationTemporality") + diffs = diffDoublePts(diffs, expected.DoubleSum().DataPoints(), actual.DoubleSum().DataPoints()) + case pdata.MetricDataTypeIntHistogram: + diffs = diff(diffs, expected.IntHistogram().AggregationTemporality(), actual.IntHistogram().AggregationTemporality(), "IntHistogram AggregationTemporality") + diffs = diffIntHistogramPts(diffs, expected.IntHistogram().DataPoints(), actual.IntHistogram().DataPoints()) + case pdata.MetricDataTypeDoubleHistogram: + diffs = diff(diffs, expected.DoubleHistogram().AggregationTemporality(), actual.DoubleHistogram().AggregationTemporality(), "DoubleHistogram AggregationTemporality") + diffs = diffDoubleHistogramPts(diffs, expected.DoubleHistogram().DataPoints(), actual.DoubleHistogram().DataPoints()) + } + return diffs +} + +func diffMetricDescriptor( + diffs []*MetricDiff, + expected pdata.Metric, + actual pdata.Metric, +) ([]*MetricDiff, bool) { + diffs = diff(diffs, expected.Name(), actual.Name(), "Metric Name") + diffs = diff(diffs, expected.Description(), actual.Description(), "Metric Description") + diffs = diff(diffs, expected.Unit(), actual.Unit(), "Metric Unit") + return diffValues(diffs, expected.DataType(), actual.DataType(), "Metric Type") +} + +func diffDoublePts( + diffs []*MetricDiff, + expected pdata.DoubleDataPointSlice, + actual pdata.DoubleDataPointSlice, +) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "DoubleDataPointSlice len") + if mismatch { + return diffs + } + for i := 0; i < expected.Len(); i++ { + diffs = diffDoublePt(diffs, expected.At(i), actual.At(i)) + } + return diffs +} + +func diffDoublePt( + diffs []*MetricDiff, + expected pdata.DoubleDataPoint, + actual pdata.DoubleDataPoint, +) []*MetricDiff { + diffs = diff(diffs, expected.Value(), actual.Value(), "DoubleDataPoint value") + return diffDoubleExemplars(diffs, expected.Exemplars(), actual.Exemplars()) +} + +func diffDoubleHistogramPts( + diffs []*MetricDiff, + expected pdata.DoubleHistogramDataPointSlice, + actual pdata.DoubleHistogramDataPointSlice, +) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "HistogramDataPointSlice len") + if mismatch { + return diffs + } + for i := 0; i < expected.Len(); i++ { + diffs = diffDoubleHistogramPt(diffs, expected.At(i), actual.At(i)) + } + return diffs +} + +func diffDoubleHistogramPt( + diffs []*MetricDiff, + expected pdata.DoubleHistogramDataPoint, + actual pdata.DoubleHistogramDataPoint, +) []*MetricDiff { + diffs = diff(diffs, expected.Count(), actual.Count(), "DoubleHistogramDataPoint Count") + diffs = diff(diffs, expected.Sum(), actual.Sum(), "DoubleHistogramDataPoint Sum") + diffs = diff(diffs, expected.BucketCounts(), actual.BucketCounts(), "DoubleHistogramDataPoint BucketCounts") + diffs = diff(diffs, expected.ExplicitBounds(), actual.ExplicitBounds(), "DoubleHistogramDataPoint ExplicitBounds") + // todo LabelsMap() + return diffDoubleExemplars(diffs, expected.Exemplars(), actual.Exemplars()) +} + +func diffDoubleExemplars( + diffs []*MetricDiff, + expected pdata.DoubleExemplarSlice, + actual pdata.DoubleExemplarSlice, +) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "DoubleExemplarSlice len") + if mismatch { + return diffs + } + for i := 0; i < expected.Len(); i++ { + diffs = diff(diffs, expected.At(i).Value(), actual.At(i).Value(), "DoubleExemplar Value") + } + return diffs +} + +func diffIntHistogramPts( + diffs []*MetricDiff, + expected pdata.IntHistogramDataPointSlice, + actual pdata.IntHistogramDataPointSlice, +) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "HistogramDataPointSlice len") + if mismatch { + return diffs + } + for i := 0; i < expected.Len(); i++ { + diffs = diffIntHistogramPt(diffs, expected.At(i), actual.At(i)) + } + return diffs +} + +func diffIntHistogramPt( + diffs []*MetricDiff, + expected pdata.IntHistogramDataPoint, + actual pdata.IntHistogramDataPoint, +) []*MetricDiff { + diffs = diff(diffs, expected.Count(), actual.Count(), "DoubleHistogramDataPoint Count") + diffs = diff(diffs, expected.Sum(), actual.Sum(), "DoubleHistogramDataPoint Sum") + diffs = diff(diffs, expected.BucketCounts(), actual.BucketCounts(), "DoubleHistogramDataPoint BucketCounts") + diffs = diff(diffs, expected.ExplicitBounds(), actual.ExplicitBounds(), "DoubleHistogramDataPoint ExplicitBounds") + // todo LabelsMap() + return diffIntExemplars(diffs, expected.Exemplars(), actual.Exemplars()) +} + +func diffIntExemplars( + diffs []*MetricDiff, + expected pdata.IntExemplarSlice, + actual pdata.IntExemplarSlice, +) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "DoubleExemplarSlice len") + if mismatch { + return diffs + } + for i := 0; i < expected.Len(); i++ { + diffs = diff(diffs, expected.At(i).Value(), actual.At(i).Value(), "DoubleExemplar Value") + } + return diffs +} + +func diffIntPts( + diffs []*MetricDiff, + expected pdata.IntDataPointSlice, + actual pdata.IntDataPointSlice, +) []*MetricDiff { + var mismatch bool + diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "IntDataPointSlice len") + if mismatch { + return diffs + } + for i := 0; i < expected.Len(); i++ { + diffs = diffIntPt(diffs, expected.At(i), actual.At(i)) + } + return diffs +} + +func diffIntPt( + diffs []*MetricDiff, + expected pdata.IntDataPoint, + actual pdata.IntDataPoint, +) []*MetricDiff { + return diff(diffs, expected.Value(), actual.Value(), "IntDataPoint value") +} + +func diffResource(diffs []*MetricDiff, expected pdata.Resource, actual pdata.Resource) []*MetricDiff { + return diffAttrs(diffs, expected.Attributes(), actual.Attributes()) +} + +func diffAttrs(diffs []*MetricDiff, expected pdata.AttributeMap, actual pdata.AttributeMap) []*MetricDiff { + if !reflect.DeepEqual(expected, actual) { + diffs = append(diffs, &MetricDiff{ + ExpectedValue: attrMapToString(expected), + ActualValue: attrMapToString(actual), + Msg: "Resource attributes", + }) + } + return diffs +} + +func diff(diffs []*MetricDiff, expected interface{}, actual interface{}, msg string) []*MetricDiff { + out, _ := diffValues(diffs, expected, actual, msg) + return out +} + +func diffValues( + diffs []*MetricDiff, + expected interface{}, + actual interface{}, + msg string, +) ([]*MetricDiff, bool) { + if !reflect.DeepEqual(expected, actual) { + return append(diffs, &MetricDiff{ + Msg: msg, + ExpectedValue: expected, + ActualValue: actual, + }), true + } + return diffs, false +} + +func attrMapToString(m pdata.AttributeMap) string { + out := "" + m.ForEach(func(k string, v pdata.AttributeValue) { + out += "[" + k + "=" + v.StringVal() + "]" + }) + return out +} diff --git a/internal/otel_collector/testbed/correctness/metrics/metric_diff_test.go b/internal/otel_collector/testbed/correctness/metrics/metric_diff_test.go new file mode 100644 index 00000000000..4f87e2f7808 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/metric_diff_test.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/goldendataset" +) + +func TestSameMetrics(t *testing.T) { + expected := goldendataset.DefaultMetricData() + actual := goldendataset.DefaultMetricData() + diffs := diffMetricData(expected, actual) + assert.Nil(t, diffs) +} + +func diffMetricData(expected pdata.Metrics, actual pdata.Metrics) []*MetricDiff { + expectedRMSlice := expected.ResourceMetrics() + actualRMSlice := actual.ResourceMetrics() + return diffRMSlices(toSlice(expectedRMSlice), toSlice(actualRMSlice)) +} + +func toSlice(s pdata.ResourceMetricsSlice) (out []pdata.ResourceMetrics) { + for i := 0; i < s.Len(); i++ { + out = append(out, s.At(i)) + } + return out +} + +func TestDifferentValues(t *testing.T) { + expected := goldendataset.DefaultMetricData() + cfg := goldendataset.DefaultCfg() + cfg.PtVal = 2 + actual := goldendataset.MetricDataFromCfg(cfg) + diffs := diffMetricData(expected, actual) + assert.Len(t, diffs, 1) +} + +func TestDifferentNumPts(t *testing.T) { + expected := goldendataset.DefaultMetricData() + cfg := goldendataset.DefaultCfg() + cfg.NumPtsPerMetric = 2 + actual := goldendataset.MetricDataFromCfg(cfg) + diffs := diffMetricData(expected, actual) + assert.Len(t, diffs, 1) +} + +func TestDifferentPtTypes(t *testing.T) { + expected := goldendataset.DefaultMetricData() + cfg := goldendataset.DefaultCfg() + cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleGauge + actual := goldendataset.MetricDataFromCfg(cfg) + diffs := diffMetricData(expected, actual) + assert.Len(t, diffs, 1) +} + +func TestDoubleHistogram(t *testing.T) { + cfg1 := goldendataset.DefaultCfg() + cfg1.MetricDescriptorType = pdata.MetricDataTypeDoubleHistogram + expected := goldendataset.MetricDataFromCfg(cfg1) + cfg2 := goldendataset.DefaultCfg() + cfg2.MetricDescriptorType = pdata.MetricDataTypeDoubleHistogram + cfg2.PtVal = 2 + actual := goldendataset.MetricDataFromCfg(cfg2) + diffs := diffMetricData(expected, actual) + assert.Len(t, diffs, 3) +} + +func TestIntHistogram(t *testing.T) { + cfg1 := goldendataset.DefaultCfg() + cfg1.MetricDescriptorType = pdata.MetricDataTypeIntHistogram + expected := goldendataset.MetricDataFromCfg(cfg1) + cfg2 := goldendataset.DefaultCfg() + cfg2.MetricDescriptorType = pdata.MetricDataTypeIntHistogram + cfg2.PtVal = 2 + actual := goldendataset.MetricDataFromCfg(cfg2) + diffs := diffMetricData(expected, actual) + assert.Len(t, diffs, 3) +} + +func TestPDMToPDRM(t *testing.T) { + md := pdata.NewMetrics() + md.ResourceMetrics().Resize(2) + rms := pdmToPDRM([]pdata.Metrics{md}) + require.Len(t, rms, 2) +} diff --git a/internal/otel_collector/testbed/correctness/metrics/metric_index.go b/internal/otel_collector/testbed/correctness/metrics/metric_index.go new file mode 100644 index 00000000000..a07f9b39a14 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/metric_index.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +type metricReceived struct { + pdm pdata.Metrics + received bool +} + +type metricsReceivedIndex struct { + m map[string]*metricReceived +} + +func newMetricsReceivedIndex(pdms []pdata.Metrics) *metricsReceivedIndex { + mi := &metricsReceivedIndex{m: map[string]*metricReceived{}} + for _, pdm := range pdms { + metrics := pdm.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() + name := metrics.At(0).Name() + mi.m[name] = &metricReceived{pdm: pdm} + } + return mi +} + +func (mi *metricsReceivedIndex) lookup(name string) (*metricReceived, bool) { + mr, ok := mi.m[name] + return mr, ok +} + +func (mi *metricsReceivedIndex) allReceived() bool { + for _, m := range mi.m { + if !m.received { + return false + } + } + return true +} diff --git a/internal/otel_collector/testbed/correctness/metrics/metric_supplier.go b/internal/otel_collector/testbed/correctness/metrics/metric_supplier.go new file mode 100644 index 00000000000..d1b01510e32 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/metric_supplier.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +type metricSupplier struct { + pdms []pdata.Metrics + currIdx int +} + +func newMetricSupplier(pdms []pdata.Metrics) *metricSupplier { + return &metricSupplier{pdms: pdms} +} + +func (p *metricSupplier) nextMetrics() (pdm pdata.Metrics, done bool) { + if p.currIdx == len(p.pdms) { + return pdata.Metrics{}, true + } + pdm = p.pdms[p.currIdx] + p.currIdx++ + return pdm, false +} diff --git a/internal/otel_collector/testbed/correctness/metrics/metrics_correctness_test.go b/internal/otel_collector/testbed/correctness/metrics/metrics_correctness_test.go new file mode 100644 index 00000000000..fd9368a48aa --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/metrics_correctness_test.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "fmt" + "log" + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/goldendataset" + "go.opentelemetry.io/collector/testbed/correctness" + "go.opentelemetry.io/collector/testbed/testbed" +) + +func TestMetricsGoldenData(t *testing.T) { + tests, err := correctness.LoadPictOutputPipelineDefs("../testdata/generated_pict_pairs_metrics_pipeline.txt") + require.NoError(t, err) + for _, test := range tests { + test.TestName = fmt.Sprintf("%s-%s", test.Receiver, test.Exporter) + test.DataSender = correctness.ConstructMetricsSender(t, test.Receiver) + test.DataReceiver = correctness.ConstructReceiver(t, test.Exporter) + t.Run(test.TestName, func(t *testing.T) { + testWithMetricsGoldenDataset(t, test.DataSender.(testbed.MetricDataSender), test.DataReceiver) + }) + } +} + +func testWithMetricsGoldenDataset(t *testing.T, sender testbed.MetricDataSender, receiver testbed.DataReceiver) { + mds := getTestMetrics(t) + accumulator := newDiffAccumulator() + h := newTestHarness( + t, + newMetricSupplier(mds), + newMetricsReceivedIndex(mds), + sender, + accumulator, + ) + tc := newCorrectnessTestCase(t, sender, receiver, h) + + tc.startTestbedReceiver() + tc.startCollector() + tc.startTestbedSender() + + tc.sendFirstMetric() + tc.waitForAllMetrics() + + tc.stopTestbedReceiver() + tc.stopCollector() + + if accumulator.foundDiffs { + t.Fail() + } +} + +func getTestMetrics(t *testing.T) []pdata.Metrics { + const file = "../../../internal/goldendataset/testdata/generated_pict_pairs_metrics.txt" + mds, err := goldendataset.GenerateMetricDatas(file) + require.NoError(t, err) + return mds +} + +type diffAccumulator struct { + foundDiffs bool +} + +var _ diffConsumer = (*diffAccumulator)(nil) + +func newDiffAccumulator() *diffAccumulator { + return &diffAccumulator{} +} + +func (d *diffAccumulator) accept(metricName string, diffs []*MetricDiff) { + if len(diffs) > 0 { + d.foundDiffs = true + log.Printf("Found diffs for [%v]\n%v", metricName, diffs) + } +} diff --git a/internal/otel_collector/testbed/correctness/metrics/metrics_test_harness.go b/internal/otel_collector/testbed/correctness/metrics/metrics_test_harness.go new file mode 100644 index 00000000000..40296a8d566 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/metrics_test_harness.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/testbed/testbed" +) + +// testHarness listens for datapoints from the receiver to which it is attached +// and when it receives one, it compares it to the datapoint that was previously +// sent out. It then sends the next datapoint, if there is one. +type testHarness struct { + t *testing.T + metricSupplier *metricSupplier + metricIndex *metricsReceivedIndex + sender testbed.MetricDataSender + currPDM pdata.Metrics + diffConsumer diffConsumer + outOfMetrics bool + allMetricsReceived chan struct{} +} + +type diffConsumer interface { + accept(string, []*MetricDiff) +} + +func newTestHarness( + t *testing.T, + s *metricSupplier, + mi *metricsReceivedIndex, + ds testbed.MetricDataSender, + diffConsumer diffConsumer, +) *testHarness { + return &testHarness{ + t: t, + metricSupplier: s, + metricIndex: mi, + sender: ds, + diffConsumer: diffConsumer, + allMetricsReceived: make(chan struct{}), + } +} + +func (h *testHarness) ConsumeMetrics(_ context.Context, pdm pdata.Metrics) error { + h.compare(pdm) + if h.metricIndex.allReceived() { + close(h.allMetricsReceived) + } + if !h.outOfMetrics { + h.sendNextMetric() + } + return nil +} + +func (h *testHarness) compare(pdm pdata.Metrics) { + pdms := pdm.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() + var diffs []*MetricDiff + for i := 0; i < pdms.Len(); i++ { + pdmRecd := pdms.At(i) + metricName := pdmRecd.Name() + metric, found := h.metricIndex.lookup(metricName) + if !found { + h.diffConsumer.accept(metricName, []*MetricDiff{{ + ExpectedValue: metricName, + Msg: "Metric name not found in index", + }}) + } + if !metric.received { + metric.received = true + sent := metric.pdm + pdmExpected := sent.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) + diffs = DiffMetric( + diffs, + pdmExpected, + pdmRecd, + ) + h.diffConsumer.accept(metricName, diffs) + } + } +} + +func (h *testHarness) sendNextMetric() { + h.currPDM, h.outOfMetrics = h.metricSupplier.nextMetrics() + if h.outOfMetrics { + return + } + err := h.sender.ConsumeMetrics(context.Background(), h.currPDM) + require.NoError(h.t, err) +} diff --git a/internal/otel_collector/testbed/correctness/metrics/results_dir.go b/internal/otel_collector/testbed/correctness/metrics/results_dir.go new file mode 100644 index 00000000000..2ad05ed36a8 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/metrics/results_dir.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "os" + "path" + "path/filepath" +) + +type resultsDir struct { + dir string +} + +func newResultsDir(dirName string) (*resultsDir, error) { + dir, err := filepath.Abs(path.Join("results", dirName)) + if err != nil { + return nil, err + } + return &resultsDir{dir: dir}, nil +} + +func (d *resultsDir) mkDir() error { + return os.MkdirAll(d.dir, os.ModePerm) +} + +func (d *resultsDir) fullPath(name string) (string, error) { + return filepath.Abs(path.Join(d.dir, name)) +} diff --git a/internal/otel_collector/testbed/correctness/testdata/generated_pict_pairs_metrics_pipeline.txt b/internal/otel_collector/testbed/correctness/testdata/generated_pict_pairs_metrics_pipeline.txt new file mode 100644 index 00000000000..1db11505cd5 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/testdata/generated_pict_pairs_metrics_pipeline.txt @@ -0,0 +1,2 @@ +Receiver Exporter +otlp otlp diff --git a/internal/otel_collector/testbed/correctness/testdata/pict_input_metrics_pipeline.txt b/internal/otel_collector/testbed/correctness/testdata/pict_input_metrics_pipeline.txt new file mode 100644 index 00000000000..648e967e482 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/testdata/pict_input_metrics_pipeline.txt @@ -0,0 +1,2 @@ +Receiver: otlp +Exporter: otlp diff --git a/internal/otel_collector/testbed/correctness/traces/.gitignore b/internal/otel_collector/testbed/correctness/traces/.gitignore new file mode 100644 index 00000000000..a61c5ef81e8 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/traces/.gitignore @@ -0,0 +1,2 @@ +results/* +!results/BASELINE.md diff --git a/internal/otel_collector/testbed/correctness/traces/correctness_test.go b/internal/otel_collector/testbed/correctness/traces/correctness_test.go new file mode 100644 index 00000000000..6d712dd7d45 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/traces/correctness_test.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traces + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/service/defaultcomponents" + "go.opentelemetry.io/collector/testbed/correctness" + "go.opentelemetry.io/collector/testbed/testbed" +) + +var correctnessResults testbed.TestResultsSummary = &testbed.CorrectnessResults{} + +func TestMain(m *testing.M) { + testbed.DoTestMain(m, correctnessResults) +} + +func TestTracingGoldenData(t *testing.T) { + tests, err := correctness.LoadPictOutputPipelineDefs("testdata/generated_pict_pairs_traces_pipeline.txt") + require.NoError(t, err) + processors := map[string]string{ + "batch": ` + batch: + send_batch_size: 1024 +`, + } + for _, test := range tests { + test.TestName = fmt.Sprintf("%s-%s", test.Receiver, test.Exporter) + test.DataSender = correctness.ConstructTraceSender(t, test.Receiver) + test.DataReceiver = correctness.ConstructReceiver(t, test.Exporter) + t.Run(test.TestName, func(t *testing.T) { + testWithTracingGoldenDataset(t, test.DataSender, test.DataReceiver, test.ResourceSpec, processors) + }) + } +} + +func testWithTracingGoldenDataset( + t *testing.T, + sender testbed.DataSender, + receiver testbed.DataReceiver, + resourceSpec testbed.ResourceSpec, + processors map[string]string, +) { + dataProvider := testbed.NewGoldenDataProvider( + "../../../internal/goldendataset/testdata/generated_pict_pairs_traces.txt", + "../../../internal/goldendataset/testdata/generated_pict_pairs_spans.txt", + "", + 161803) + factories, err := defaultcomponents.Components() + require.NoError(t, err, "default components resulted in: %v", err) + runner := testbed.NewInProcessCollector(factories) + validator := testbed.NewCorrectTestValidator(dataProvider) + config := correctness.CreateConfigYaml(sender, receiver, processors, "traces") + configCleanup, cfgErr := runner.PrepareConfig(config) + require.NoError(t, cfgErr, "collector configuration resulted in: %v", cfgErr) + defer configCleanup() + tc := testbed.NewTestCase( + t, + dataProvider, + sender, + receiver, + runner, + validator, + correctnessResults, + ) + defer tc.Stop() + + tc.SetResourceLimits(resourceSpec) + tc.EnableRecording() + tc.StartBackend() + tc.StartAgent("--metrics-level=NONE") + + tc.StartLoad(testbed.LoadOptions{ + DataItemsPerSecond: 1024, + ItemsPerBatch: 1, + }) + + duration := time.Second + tc.Sleep(duration) + + tc.StopLoad() + + tc.WaitForN(func() bool { return tc.LoadGenerator.DataItemsSent() == tc.MockBackend.DataItemsReceived() }, + duration*3, "all data items received") + + tc.StopAgent() + + tc.ValidateData() +} diff --git a/internal/otel_collector/testbed/correctness/traces/runtests.sh b/internal/otel_collector/testbed/correctness/traces/runtests.sh new file mode 100644 index 00000000000..f4dc30921e5 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/traces/runtests.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +SED="sed" + +PASS_COLOR=$(printf "\033[32mPASS\033[0m") +FAIL_COLOR=$(printf "\033[31mFAIL\033[0m") +TEST_COLORIZE="${SED} 's/PASS/${PASS_COLOR}/' | ${SED} 's/FAIL/${FAIL_COLOR}/'" +echo ${TEST_ARGS} +mkdir -p results +RUN_TESTBED=1 go test -v ${TEST_ARGS} 2>&1 | tee results/testoutput.log | bash -c "${TEST_COLORIZE}" + +testStatus=${PIPESTATUS[0]} + +mkdir -p results/junit +go-junit-report < results/testoutput.log > results/junit/results.xml + +bash -c "cat results/CORRECTNESSRESULTS.md | ${TEST_COLORIZE}" + +exit ${testStatus} diff --git a/internal/otel_collector/testbed/correctness/traces/testdata/generated_pict_pairs_traces_pipeline.txt b/internal/otel_collector/testbed/correctness/traces/testdata/generated_pict_pairs_traces_pipeline.txt new file mode 100644 index 00000000000..af31f5aa370 --- /dev/null +++ b/internal/otel_collector/testbed/correctness/traces/testdata/generated_pict_pairs_traces_pipeline.txt @@ -0,0 +1,17 @@ +Receiver Exporter +otlp jaeger +zipkin opencensus +otlp opencensus +jaeger opencensus +opencensus jaeger +zipkin otlp +jaeger jaeger +opencensus opencensus +otlp zipkin +jaeger zipkin +opencensus zipkin +zipkin jaeger +otlp otlp +jaeger otlp +opencensus otlp +zipkin zipkin diff --git a/internal/otel_collector/testbed/correctness/traces/testdata/pict_input_traces_pipeline.txt b/internal/otel_collector/testbed/correctness/traces/testdata/pict_input_traces_pipeline.txt new file mode 100644 index 00000000000..05efad8fdea --- /dev/null +++ b/internal/otel_collector/testbed/correctness/traces/testdata/pict_input_traces_pipeline.txt @@ -0,0 +1,2 @@ +Receiver: jaeger, opencensus, otlp, zipkin +Exporter: jaeger, opencensus, otlp, zipkin diff --git a/internal/otel_collector/testbed/correctness/utils.go b/internal/otel_collector/testbed/correctness/utils.go new file mode 100644 index 00000000000..7f3278370ac --- /dev/null +++ b/internal/otel_collector/testbed/correctness/utils.go @@ -0,0 +1,175 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package correctness + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "go.opentelemetry.io/collector/testbed/testbed" +) + +// CreateConfigYaml creates a yaml config for an otel collector given a testbed sender, testbed receiver, any +// processors, and a pipeline type. A collector created from the resulting yaml string should be able to talk +// the specified sender and receiver. +func CreateConfigYaml( + sender testbed.DataSender, + receiver testbed.DataReceiver, + processors map[string]string, + pipelineType string, +) string { + + // Prepare extra processor config section and comma-separated list of extra processor + // names to use in corresponding "processors" settings. + processorsSections := "" + processorsList := "" + if len(processors) > 0 { + first := true + for name, cfg := range processors { + processorsSections += cfg + "\n" + if !first { + processorsList += "," + } + processorsList += name + first = false + } + } + + format := ` +receivers:%v +exporters:%v +processors: + %s + +extensions: + +service: + extensions: + pipelines: + %s: + receivers: [%v] + processors: [%s] + exporters: [%v] +` + + return fmt.Sprintf( + format, + sender.GenConfigYAMLStr(), + receiver.GenConfigYAMLStr(), + processorsSections, + pipelineType, + sender.ProtocolName(), + processorsList, + receiver.ProtocolName(), + ) +} + +// PipelineDef holds the information necessary to run a single testbed configuration. +type PipelineDef struct { + Receiver string + Exporter string + TestName string + DataSender testbed.DataSender + DataReceiver testbed.DataReceiver + ResourceSpec testbed.ResourceSpec +} + +// LoadPictOutputPipelineDefs generates a slice of PipelineDefs from the passed-in generated PICT file. The +// result should be a set of PipelineDefs that covers all possible pipeline configurations. +func LoadPictOutputPipelineDefs(fileName string) ([]PipelineDef, error) { + file, err := os.Open(filepath.Clean(fileName)) + if err != nil { + return nil, err + } + defer func() { + cerr := file.Close() + if err == nil { + err = cerr + } + }() + + defs := make([]PipelineDef, 0) + scanner := bufio.NewScanner(file) + for scanner.Scan() { + s := strings.Split(scanner.Text(), "\t") + if s[0] == "Receiver" { + continue + } + + var aDef PipelineDef + aDef.Receiver, aDef.Exporter = s[0], s[1] + defs = append(defs, aDef) + } + + return defs, err +} + +// ConstructTraceSender creates a testbed trace sender from the passed-in trace sender identifier. +func ConstructTraceSender(t *testing.T, receiver string) testbed.DataSender { + var sender testbed.DataSender + switch receiver { + case "otlp": + sender = testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + case "opencensus": + sender = testbed.NewOCTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + case "jaeger": + sender = testbed.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + case "zipkin": + sender = testbed.NewZipkinDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + default: + t.Errorf("unknown receiver type: %s", receiver) + } + return sender +} + +// ConstructMetricsSender creates a testbed metrics sender from the passed-in metrics sender identifier. +func ConstructMetricsSender(t *testing.T, receiver string) testbed.MetricDataSender { + var sender testbed.MetricDataSender + switch receiver { + case "otlp": + sender = testbed.NewOTLPMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + case "opencensus": + sender = testbed.NewOCMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + case "prometheus": + sender = testbed.NewPrometheusDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + default: + t.Errorf("unknown receiver type: %s", receiver) + } + return sender +} + +// ConstructReceiver creates a testbed receiver from the passed-in recevier identifier. +func ConstructReceiver(t *testing.T, exporter string) testbed.DataReceiver { + var receiver testbed.DataReceiver + switch exporter { + case "otlp": + receiver = testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)) + case "opencensus": + receiver = testbed.NewOCDataReceiver(testbed.GetAvailablePort(t)) + case "jaeger": + receiver = testbed.NewJaegerDataReceiver(testbed.GetAvailablePort(t)) + case "zipkin": + receiver = testbed.NewZipkinDataReceiver(testbed.GetAvailablePort(t)) + case "prometheus": + receiver = testbed.NewPrometheusDataReceiver(testbed.GetAvailablePort(t)) + default: + t.Errorf("unknown exporter type: %s", exporter) + } + return receiver +} diff --git a/internal/otel_collector/testbed/correctness_result.png b/internal/otel_collector/testbed/correctness_result.png new file mode 100644 index 00000000000..c9046855597 Binary files /dev/null and b/internal/otel_collector/testbed/correctness_result.png differ diff --git a/internal/otel_collector/testbed/e2e_diagram.jpeg b/internal/otel_collector/testbed/e2e_diagram.jpeg new file mode 100644 index 00000000000..49a74456374 Binary files /dev/null and b/internal/otel_collector/testbed/e2e_diagram.jpeg differ diff --git a/internal/otel_collector/testbed/testbed/.gitignore b/internal/otel_collector/testbed/testbed/.gitignore new file mode 100644 index 00000000000..9835eb192b0 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/.gitignore @@ -0,0 +1 @@ +mockbackend.log diff --git a/internal/otel_collector/testbed/testbed/child_process.go b/internal/otel_collector/testbed/testbed/child_process.go new file mode 100644 index 00000000000..18e93bf1867 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/child_process.go @@ -0,0 +1,512 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "sync" + "syscall" + "text/template" + "time" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/process" + "go.uber.org/atomic" +) + +// ResourceSpec is a resource consumption specification. +type ResourceSpec struct { + // Percentage of one core the process is expected to consume at most. + // Test is aborted and failed if consumption during + // ResourceCheckPeriod exceeds this number. If 0 the CPU + // consumption is not monitored and does not affect the test result. + ExpectedMaxCPU uint32 + + // Maximum RAM in MiB the process is expected to consume. + // Test is aborted and failed if consumption exceeds this number. + // If 0 memory consumption is not monitored and does not affect + // the test result. + ExpectedMaxRAM uint32 + + // Period during which CPU and RAM of the process are measured. + // Bigger numbers will result in more averaging of short spikes. + ResourceCheckPeriod time.Duration +} + +// isSpecified returns true if any part of ResourceSpec is specified, +// i.e. has non-zero value. +func (rs *ResourceSpec) isSpecified() bool { + return rs != nil && (rs.ExpectedMaxCPU != 0 || rs.ExpectedMaxRAM != 0) +} + +// ChildProcess implements the OtelcolRunner interface as a child process on the same machine executing +// the test. The process can be monitored and the output of which will be written to a log file. +type ChildProcess struct { + // Path to agent executable. If unset the default executable in + // bin/otelcol_{{.GOOS}}_{{.GOARCH}} will be used. + // Can be set for example to use the unstable executable for a specific test. + AgentExePath string + + // Descriptive name of the process + name string + + // Config file name + configFileName string + + // Command to execute + cmd *exec.Cmd + + // WaitGroup for copying process output + outputWG sync.WaitGroup + + // Various starting/stopping flags + isStarted bool + stopOnce sync.Once + isStopped bool + doneSignal chan struct{} + + // Resource specification that must be monitored for. + resourceSpec *ResourceSpec + + // Process monitoring data. + processMon *process.Process + + // Time when process was started. + startTime time.Time + + // Last tick time we monitored the process. + lastElapsedTime time.Time + + // Process times that were fetched on last monitoring tick. + lastProcessTimes *cpu.TimesStat + + // Current RAM RSS in MiBs + ramMiBCur atomic.Uint32 + + // Current CPU percentage times 1000 (we use scaling since we have to use int for atomic operations). + cpuPercentX1000Cur atomic.Uint32 + + // Maximum CPU seen + cpuPercentMax float64 + + // Number of memory measurements + memProbeCount int + + // Cumulative RAM RSS in MiBs + ramMiBTotal uint64 + + // Maximum RAM seen + ramMiBMax uint32 +} + +type StartParams struct { + Name string + LogFilePath string + CmdArgs []string + resourceSpec *ResourceSpec +} + +type ResourceConsumption struct { + CPUPercentAvg float64 + CPUPercentMax float64 + RAMMiBAvg uint32 + RAMMiBMax uint32 +} + +func (cp *ChildProcess) PrepareConfig(configStr string) (configCleanup func(), err error) { + configCleanup = func() { + // NoOp + } + var file *os.File + file, err = ioutil.TempFile("", "agent*.yaml") + if err != nil { + log.Printf("%s", err) + return configCleanup, err + } + + defer func() { + errClose := file.Close() + if errClose != nil { + log.Printf("%s", errClose) + } + }() + + if _, err = file.WriteString(configStr); err != nil { + log.Printf("%s", err) + return configCleanup, err + } + cp.configFileName = file.Name() + configCleanup = func() { + os.Remove(cp.configFileName) + } + return configCleanup, err +} + +func expandExeFileName(exeName string) string { + cfgTemplate, err := template.New("").Parse(exeName) + if err != nil { + log.Fatalf("Template failed to parse exe name %q: %s", + exeName, err.Error()) + } + + templateVars := struct { + GOOS string + GOARCH string + }{ + GOOS: runtime.GOOS, + GOARCH: runtime.GOARCH, + } + var buf bytes.Buffer + if err = cfgTemplate.Execute(&buf, templateVars); err != nil { + log.Fatalf("Configuration template failed to run on exe name %q: %s", + exeName, err.Error()) + } + + return buf.String() +} + +// start a child process. +// +// cp.AgentExePath defines the executable to run. If unspecified +// "../../bin/otelcol_{{.GOOS}}_{{.GOARCH}}" will be used. +// {{.GOOS}} and {{.GOARCH}} will be expanded to the current OS and ARCH correspondingly. +// +// Parameters: +// name is the human readable name of the process (e.g. "Agent"), used for logging. +// logFilePath is the file path to write the standard output and standard error of +// the process to. +// cmdArgs is the command line arguments to pass to the process. +func (cp *ChildProcess) Start(params StartParams) error { + + cp.name = params.Name + cp.doneSignal = make(chan struct{}) + cp.resourceSpec = params.resourceSpec + + if cp.AgentExePath == "" { + cp.AgentExePath = GlobalConfig.DefaultAgentExeRelativeFile + } + exePath := expandExeFileName(cp.AgentExePath) + exePath, err := filepath.Abs(exePath) + if err != nil { + return err + } + + log.Printf("Starting %s (%s)", cp.name, exePath) + + // Prepare log file + logFile, err := os.Create(params.LogFilePath) + if err != nil { + return fmt.Errorf("cannot create %s: %s", params.LogFilePath, err.Error()) + } + log.Printf("Writing %s log to %s", cp.name, params.LogFilePath) + + // Prepare to start the process. + // #nosec + args := params.CmdArgs + if !containsConfig(args) { + if cp.configFileName == "" { + configFile := path.Join("testdata", "agent-config.yaml") + cp.configFileName, err = filepath.Abs(configFile) + if err != nil { + return err + } + } + args = append(args, "--config") + args = append(args, cp.configFileName) + } + cp.cmd = exec.Command(exePath, args...) + + // Capture standard output and standard error. + stdoutIn, err := cp.cmd.StdoutPipe() + if err != nil { + return fmt.Errorf("cannot capture stdout of %s: %s", exePath, err.Error()) + } + stderrIn, err := cp.cmd.StderrPipe() + if err != nil { + return fmt.Errorf("cannot capture stderr of %s: %s", exePath, err.Error()) + } + + // Start the process. + if err = cp.cmd.Start(); err != nil { + return fmt.Errorf("cannot start executable at %s: %s", exePath, err.Error()) + } + + cp.startTime = time.Now() + cp.isStarted = true + + log.Printf("%s running, pid=%d", cp.name, cp.cmd.Process.Pid) + + // Create a WaitGroup that waits for both outputs to be finished copying. + cp.outputWG.Add(2) + + // Begin copying outputs. + go func() { + _, _ = io.Copy(logFile, stdoutIn) + cp.outputWG.Done() + }() + go func() { + _, _ = io.Copy(logFile, stderrIn) + cp.outputWG.Done() + }() + + return err +} + +func (cp *ChildProcess) Stop() (stopped bool, err error) { + if !cp.isStarted || cp.isStopped { + return false, nil + } + cp.stopOnce.Do(func() { + + if !cp.isStarted { + // Process wasn't started, nothing to stop. + return + } + + cp.isStopped = true + + log.Printf("Gracefully terminating %s pid=%d, sending SIGTEM...", cp.name, cp.cmd.Process.Pid) + + // Notify resource monitor to stop. + close(cp.doneSignal) + + // Gracefully signal process to stop. + if err = cp.cmd.Process.Signal(syscall.SIGTERM); err != nil { + log.Printf("Cannot send SIGTEM: %s", err.Error()) + } + + finished := make(chan struct{}) + + // Setup a goroutine to wait a while for process to finish and send kill signal + // to the process if it doesn't finish. + go func() { + // Wait 10 seconds. + t := time.After(10 * time.Second) + select { + case <-t: + // Time is out. Kill the process. + log.Printf("%s pid=%d is not responding to SIGTERM. Sending SIGKILL to kill forcedly.", + cp.name, cp.cmd.Process.Pid) + if err = cp.cmd.Process.Signal(syscall.SIGKILL); err != nil { + log.Printf("Cannot send SIGKILL: %s", err.Error()) + } + case <-finished: + // Process is successfully finished. + } + }() + + // Wait for output to be fully copied. + cp.outputWG.Wait() + + // Wait for process to terminate + err = cp.cmd.Wait() + + // Let goroutine know process is finished. + close(finished) + + // Set resource consumption stats to 0 + cp.ramMiBCur.Store(0) + cp.cpuPercentX1000Cur.Store(0) + + log.Printf("%s process stopped, exit code=%d", cp.name, cp.cmd.ProcessState.ExitCode()) + + if err != nil { + log.Printf("%s execution failed: %s", cp.name, err.Error()) + } + }) + stopped = true + return stopped, err +} + +func (cp *ChildProcess) WatchResourceConsumption() error { + if !cp.resourceSpec.isSpecified() { + // Resource monitoring is not enabled. + return nil + } + + var err error + cp.processMon, err = process.NewProcess(int32(cp.cmd.Process.Pid)) + if err != nil { + return fmt.Errorf("cannot monitor process %d: %s", + cp.cmd.Process.Pid, err.Error()) + } + + cp.fetchRAMUsage() + + // Begin measuring elapsed and process CPU times. + cp.lastElapsedTime = time.Now() + cp.lastProcessTimes, err = cp.processMon.Times() + if err != nil { + return fmt.Errorf("cannot get process times for %d: %s", + cp.cmd.Process.Pid, err.Error()) + } + + // Measure every ResourceCheckPeriod. + ticker := time.NewTicker(cp.resourceSpec.ResourceCheckPeriod) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + cp.fetchRAMUsage() + cp.fetchCPUUsage() + + if err := cp.checkAllowedResourceUsage(); err != nil { + cp.Stop() + return err + } + + case <-cp.doneSignal: + log.Printf("Stopping process monitor.") + return nil + } + } +} + +func (cp *ChildProcess) GetProcessMon() *process.Process { + return cp.processMon +} + +func (cp *ChildProcess) fetchRAMUsage() { + // Get process memory and CPU times + mi, err := cp.processMon.MemoryInfo() + if err != nil { + log.Printf("cannot get process memory for %d: %s", + cp.cmd.Process.Pid, err.Error()) + return + } + + // Calculate RSS in MiBs. + ramMiBCur := uint32(mi.RSS / mibibyte) + + // Calculate aggregates. + cp.memProbeCount++ + cp.ramMiBTotal += uint64(ramMiBCur) + if ramMiBCur > cp.ramMiBMax { + cp.ramMiBMax = ramMiBCur + } + + // Store current usage. + cp.ramMiBCur.Store(ramMiBCur) +} + +func (cp *ChildProcess) fetchCPUUsage() { + times, err := cp.processMon.Times() + if err != nil { + log.Printf("cannot get process times for %d: %s", + cp.cmd.Process.Pid, err.Error()) + return + } + + now := time.Now() + + // Calculate elapsed and process CPU time deltas in seconds + deltaElapsedTime := now.Sub(cp.lastElapsedTime).Seconds() + deltaCPUTime := times.Total() - cp.lastProcessTimes.Total() + + cp.lastProcessTimes = times + cp.lastElapsedTime = now + + // Calculate CPU usage percentage in elapsed period. + cpuPercent := deltaCPUTime * 100 / deltaElapsedTime + if cpuPercent > cp.cpuPercentMax { + cp.cpuPercentMax = cpuPercent + } + + curCPUPercentageX1000 := uint32(cpuPercent * 1000) + + // Store current usage. + cp.cpuPercentX1000Cur.Store(curCPUPercentageX1000) +} + +func (cp *ChildProcess) checkAllowedResourceUsage() error { + // Check if current CPU usage exceeds expected. + var errMsg string + if cp.resourceSpec.ExpectedMaxCPU != 0 && cp.cpuPercentX1000Cur.Load()/1000 > cp.resourceSpec.ExpectedMaxCPU { + errMsg = fmt.Sprintf("CPU consumption is %.1f%%, max expected is %d%%", + float64(cp.cpuPercentX1000Cur.Load())/1000.0, cp.resourceSpec.ExpectedMaxCPU) + } + + // Check if current RAM usage exceeds expected. + if cp.resourceSpec.ExpectedMaxRAM != 0 && cp.ramMiBCur.Load() > cp.resourceSpec.ExpectedMaxRAM { + errMsg = fmt.Sprintf("RAM consumption is %s MiB, max expected is %d MiB", + cp.ramMiBCur.String(), cp.resourceSpec.ExpectedMaxRAM) + } + + if errMsg == "" { + return nil + } + + log.Printf("Performance error: %s", errMsg) + + return errors.New(errMsg) +} + +// GetResourceConsumption returns resource consumption as a string +func (cp *ChildProcess) GetResourceConsumption() string { + if !cp.resourceSpec.isSpecified() { + // Monitoring is not enabled. + return "" + } + + curRSSMib := cp.ramMiBCur.Load() + curCPUPercentageX1000 := cp.cpuPercentX1000Cur.Load() + + return fmt.Sprintf("%s RAM (RES):%4d MiB, CPU:%4.1f%%", cp.name, + curRSSMib, float64(curCPUPercentageX1000)/1000.0) +} + +// GetTotalConsumption returns total resource consumption since start of process +func (cp *ChildProcess) GetTotalConsumption() *ResourceConsumption { + rc := &ResourceConsumption{} + + if cp.processMon != nil { + // Get total elapsed time since process start + elapsedDuration := cp.lastElapsedTime.Sub(cp.startTime).Seconds() + + if elapsedDuration > 0 { + // Calculate average CPU usage since start of process + rc.CPUPercentAvg = cp.lastProcessTimes.Total() / elapsedDuration * 100.0 + } + rc.CPUPercentMax = cp.cpuPercentMax + + if cp.memProbeCount > 0 { + // Calculate average RAM usage by averaging all RAM measurements + rc.RAMMiBAvg = uint32(cp.ramMiBTotal / uint64(cp.memProbeCount)) + } + rc.RAMMiBMax = cp.ramMiBMax + } + + return rc +} + +func containsConfig(s []string) bool { + for _, a := range s { + if a == "--config" { + return true + } + } + return false +} diff --git a/internal/otel_collector/testbed/testbed/data_providers.go b/internal/otel_collector/testbed/testbed/data_providers.go new file mode 100644 index 00000000000..0767135a9dd --- /dev/null +++ b/internal/otel_collector/testbed/testbed/data_providers.go @@ -0,0 +1,310 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "math/rand" + "strconv" + "time" + + "go.uber.org/atomic" + + "go.opentelemetry.io/collector/consumer/pdata" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/goldendataset" +) + +// DataProvider defines the interface for generators of test data used to drive various end-to-end tests. +type DataProvider interface { + // SetLoadGeneratorCounters supplies pointers to LoadGenerator counters. + // The data provider implementation should increment these as it generates data. + SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) + // GenerateTraces returns an internal Traces instance with an OTLP ResourceSpans slice populated with test data. + GenerateTraces() (pdata.Traces, bool) + // GenerateMetrics returns an internal MetricData instance with an OTLP ResourceMetrics slice of test data. + GenerateMetrics() (pdata.Metrics, bool) + // GetGeneratedSpan returns the generated Span matching the provided traceId and spanId or else nil if no match found. + GetGeneratedSpan(traceID pdata.TraceID, spanID pdata.SpanID) *otlptrace.Span + // GenerateLogs returns the internal pdata.Logs format + GenerateLogs() (pdata.Logs, bool) +} + +// PerfTestDataProvider in an implementation of the DataProvider for use in performance tests. +// Tracing IDs are based on the incremented batch and data items counters. +type PerfTestDataProvider struct { + options LoadOptions + batchesGenerated *atomic.Uint64 + dataItemsGenerated *atomic.Uint64 +} + +// NewPerfTestDataProvider creates an instance of PerfTestDataProvider which generates test data based on the sizes +// specified in the supplied LoadOptions. +func NewPerfTestDataProvider(options LoadOptions) *PerfTestDataProvider { + return &PerfTestDataProvider{ + options: options, + } +} + +func (dp *PerfTestDataProvider) SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) { + dp.batchesGenerated = batchesGenerated + dp.dataItemsGenerated = dataItemsGenerated +} + +func (dp *PerfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { + + traceData := pdata.NewTraces() + traceData.ResourceSpans().Resize(1) + ilss := traceData.ResourceSpans().At(0).InstrumentationLibrarySpans() + ilss.Resize(1) + spans := ilss.At(0).Spans() + spans.Resize(dp.options.ItemsPerBatch) + + traceID := dp.batchesGenerated.Inc() + for i := 0; i < dp.options.ItemsPerBatch; i++ { + + startTime := time.Now() + endTime := startTime.Add(time.Millisecond) + + spanID := dp.dataItemsGenerated.Inc() + + span := spans.At(i) + + // Create a span. + span.SetTraceID(GenerateSequentialTraceID(traceID)) + span.SetSpanID(GenerateSequentialSpanID(spanID)) + span.SetName("load-generator-span") + span.SetKind(pdata.SpanKindCLIENT) + attrs := span.Attributes() + attrs.UpsertInt("load_generator.span_seq_num", int64(spanID)) + attrs.UpsertInt("load_generator.trace_seq_num", int64(traceID)) + // Additional attributes. + for k, v := range dp.options.Attributes { + attrs.UpsertString(k, v) + } + span.SetStartTime(pdata.TimestampUnixNano(uint64(startTime.UnixNano()))) + span.SetEndTime(pdata.TimestampUnixNano(uint64(endTime.UnixNano()))) + } + return traceData, false +} + +func GenerateSequentialTraceID(id uint64) pdata.TraceID { + var traceID [16]byte + binary.PutUvarint(traceID[:], id) + return pdata.NewTraceID(traceID) +} + +func GenerateSequentialSpanID(id uint64) pdata.SpanID { + var spanID [8]byte + binary.PutUvarint(spanID[:], id) + return pdata.NewSpanID(spanID) +} + +func (dp *PerfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { + + // Generate 7 data points per metric. + const dataPointsPerMetric = 7 + + md := pdata.NewMetrics() + md.ResourceMetrics().Resize(1) + md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().Resize(1) + if dp.options.Attributes != nil { + attrs := md.ResourceMetrics().At(0).Resource().Attributes() + attrs.InitEmptyWithCapacity(len(dp.options.Attributes)) + for k, v := range dp.options.Attributes { + attrs.UpsertString(k, v) + } + } + metrics := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() + metrics.Resize(dp.options.ItemsPerBatch) + + for i := 0; i < dp.options.ItemsPerBatch; i++ { + metric := metrics.At(i) + metric.SetName("load_generator_" + strconv.Itoa(i)) + metric.SetDescription("Load Generator Counter #" + strconv.Itoa(i)) + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntGauge) + + batchIndex := dp.batchesGenerated.Inc() + + dps := metric.IntGauge().DataPoints() + // Generate data points for the metric. + dps.Resize(dataPointsPerMetric) + for j := 0; j < dataPointsPerMetric; j++ { + dataPoint := dps.At(j) + dataPoint.SetStartTime(pdata.TimestampUnixNano(uint64(time.Now().UnixNano()))) + value := dp.dataItemsGenerated.Inc() + dataPoint.SetValue(int64(value)) + dataPoint.LabelsMap().InitFromMap(map[string]string{ + "item_index": "item_" + strconv.Itoa(j), + "batch_index": "batch_" + strconv.Itoa(int(batchIndex)), + }) + } + } + return md, false +} + +func (dp *PerfTestDataProvider) GetGeneratedSpan(pdata.TraceID, pdata.SpanID) *otlptrace.Span { + // function not supported for this data provider + return nil +} + +func (dp *PerfTestDataProvider) GenerateLogs() (pdata.Logs, bool) { + logs := pdata.NewLogs() + logs.ResourceLogs().Resize(1) + logs.ResourceLogs().At(0).InstrumentationLibraryLogs().Resize(1) + if dp.options.Attributes != nil { + attrs := logs.ResourceLogs().At(0).Resource().Attributes() + attrs.InitEmptyWithCapacity(len(dp.options.Attributes)) + for k, v := range dp.options.Attributes { + attrs.UpsertString(k, v) + } + } + logRecords := logs.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() + logRecords.Resize(dp.options.ItemsPerBatch) + + now := pdata.TimestampUnixNano(time.Now().UnixNano()) + + batchIndex := dp.batchesGenerated.Inc() + + for i := 0; i < dp.options.ItemsPerBatch; i++ { + itemIndex := dp.dataItemsGenerated.Inc() + record := logRecords.At(i) + record.SetSeverityNumber(pdata.SeverityNumberINFO3) + record.SetSeverityText("INFO3") + record.SetName("load_generator_" + strconv.Itoa(i)) + record.Body().SetStringVal("Load Generator Counter #" + strconv.Itoa(i)) + record.SetFlags(uint32(2)) + record.SetTimestamp(now) + + attrs := record.Attributes() + attrs.UpsertString("batch_index", "batch_"+strconv.Itoa(int(batchIndex))) + attrs.UpsertString("item_index", "item_"+strconv.Itoa(int(itemIndex))) + attrs.UpsertString("a", "test") + attrs.UpsertDouble("b", 5.0) + attrs.UpsertInt("c", 3) + attrs.UpsertBool("d", true) + } + return logs, false +} + +// GoldenDataProvider is an implementation of DataProvider for use in correctness tests. +// Provided data from the "Golden" dataset generated using pairwise combinatorial testing techniques. +type GoldenDataProvider struct { + tracePairsFile string + spanPairsFile string + random io.Reader + batchesGenerated *atomic.Uint64 + dataItemsGenerated *atomic.Uint64 + resourceSpans []*otlptrace.ResourceSpans + spansIndex int + spansMap map[string]*otlptrace.Span + + metricPairsFile string + metricsGenerated []pdata.Metrics + metricsIndex int +} + +// NewGoldenDataProvider creates a new instance of GoldenDataProvider which generates test data based +// on the pairwise combinations specified in the tracePairsFile and spanPairsFile input variables. +// The supplied randomSeed is used to initialize the random number generator used in generating tracing IDs. +func NewGoldenDataProvider(tracePairsFile string, spanPairsFile string, metricPairsFile string, randomSeed int64) *GoldenDataProvider { + return &GoldenDataProvider{ + tracePairsFile: tracePairsFile, + spanPairsFile: spanPairsFile, + metricPairsFile: metricPairsFile, + random: io.Reader(rand.New(rand.NewSource(randomSeed))), + } +} + +func (dp *GoldenDataProvider) SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) { + dp.batchesGenerated = batchesGenerated + dp.dataItemsGenerated = dataItemsGenerated +} + +func (dp *GoldenDataProvider) GenerateTraces() (pdata.Traces, bool) { + if dp.resourceSpans == nil { + var err error + dp.resourceSpans, err = goldendataset.GenerateResourceSpans(dp.tracePairsFile, dp.spanPairsFile, dp.random) + if err != nil { + log.Printf("cannot generate traces: %s", err) + dp.resourceSpans = make([]*otlptrace.ResourceSpans, 0) + } + } + dp.batchesGenerated.Inc() + if dp.spansIndex >= len(dp.resourceSpans) { + return pdata.TracesFromOtlp(make([]*otlptrace.ResourceSpans, 0)), true + } + resourceSpans := make([]*otlptrace.ResourceSpans, 1) + resourceSpans[0] = dp.resourceSpans[dp.spansIndex] + dp.spansIndex++ + spanCount := uint64(0) + for _, libSpans := range resourceSpans[0].InstrumentationLibrarySpans { + spanCount += uint64(len(libSpans.Spans)) + } + dp.dataItemsGenerated.Add(spanCount) + return pdata.TracesFromOtlp(resourceSpans), false +} + +func (dp *GoldenDataProvider) GenerateMetrics() (pdata.Metrics, bool) { + if dp.metricsGenerated == nil { + var err error + dp.metricsGenerated, err = goldendataset.GenerateMetricDatas(dp.metricPairsFile) + if err != nil { + log.Printf("cannot generate metrics: %s", err) + } + } + numMetricsGenerated := len(dp.metricsGenerated) + if dp.metricsIndex == numMetricsGenerated { + return pdata.Metrics{}, true + } + pdm := dp.metricsGenerated[dp.metricsIndex] + dp.metricsIndex++ + _, dpCount := pdm.MetricAndDataPointCount() + dp.dataItemsGenerated.Add(uint64(dpCount)) + return pdm, false +} + +func (dp *GoldenDataProvider) GenerateLogs() (pdata.Logs, bool) { + return pdata.NewLogs(), true +} + +func (dp *GoldenDataProvider) GetGeneratedSpan(traceID pdata.TraceID, spanID pdata.SpanID) *otlptrace.Span { + if dp.spansMap == nil { + dp.spansMap = populateSpansMap(dp.resourceSpans) + } + key := traceIDAndSpanIDToString(traceID, spanID) + return dp.spansMap[key] +} + +func populateSpansMap(resourceSpansList []*otlptrace.ResourceSpans) map[string]*otlptrace.Span { + spansMap := make(map[string]*otlptrace.Span) + for _, resourceSpans := range resourceSpansList { + for _, libSpans := range resourceSpans.InstrumentationLibrarySpans { + for _, span := range libSpans.Spans { + key := traceIDAndSpanIDToString(pdata.TraceID(span.TraceId), pdata.SpanID(span.SpanId)) + spansMap[key] = span + } + } + } + return spansMap +} + +func traceIDAndSpanIDToString(traceID pdata.TraceID, spanID pdata.SpanID) string { + return fmt.Sprintf("%s-%s", traceID.HexString(), spanID.HexString()) +} diff --git a/internal/otel_collector/testbed/testbed/data_providers_test.go b/internal/otel_collector/testbed/testbed/data_providers_test.go new file mode 100644 index 00000000000..f80adae298c --- /dev/null +++ b/internal/otel_collector/testbed/testbed/data_providers_test.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +const metricsPictPairsFile = "../../internal/goldendataset/testdata/generated_pict_pairs_metrics.txt" + +func TestGoldenDataProvider(t *testing.T) { + dp := NewGoldenDataProvider("", "", metricsPictPairsFile, 42) + dp.SetLoadGeneratorCounters(atomic.NewUint64(0), atomic.NewUint64(0)) + var ms []pdata.Metrics + for { + m, done := dp.GenerateMetrics() + if done { + break + } + ms = append(ms, m) + } + require.Equal(t, len(dp.metricsGenerated), len(ms)) +} diff --git a/internal/otel_collector/testbed/testbed/load_generator.go b/internal/otel_collector/testbed/testbed/load_generator.go new file mode 100644 index 00000000000..0ce3175f731 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/load_generator.go @@ -0,0 +1,242 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + "go.uber.org/atomic" + "golang.org/x/text/message" +) + +var printer = message.NewPrinter(message.MatchLanguage("en")) + +// LoadGenerator is a simple load generator. +type LoadGenerator struct { + sender DataSender + + dataProvider DataProvider + + // Number of batches of data items sent. + batchesSent atomic.Uint64 + + // Number of data items (spans or metric data points) sent. + dataItemsSent atomic.Uint64 + + stopOnce sync.Once + stopWait sync.WaitGroup + stopSignal chan struct{} + + options LoadOptions + + // Record information about previous errors to avoid flood of error messages. + prevErr error +} + +// LoadOptions defines the options to use for generating the load. +type LoadOptions struct { + // DataItemsPerSecond specifies how many spans, metric data points, or log + // records to generate each second. + DataItemsPerSecond int + + // ItemsPerBatch specifies how many spans, metric data points, or log + // records per batch to generate. Should be greater than zero. The number + // of batches generated per second will be DataItemsPerSecond/ItemsPerBatch. + ItemsPerBatch int + + // Attributes to add to each generated data item. Can be empty. + Attributes map[string]string + + // Parallel specifies how many goroutines to send from. + Parallel int +} + +// NewLoadGenerator creates a load generator that sends data using specified sender. +func NewLoadGenerator(dataProvider DataProvider, sender DataSender) (*LoadGenerator, error) { + if sender == nil { + return nil, fmt.Errorf("cannot create load generator without DataSender") + } + + lg := &LoadGenerator{ + stopSignal: make(chan struct{}), + sender: sender, + dataProvider: dataProvider, + } + + return lg, nil +} + +// Start the load. +func (lg *LoadGenerator) Start(options LoadOptions) { + lg.options = options + + if lg.options.ItemsPerBatch == 0 { + // 10 items per batch by default. + lg.options.ItemsPerBatch = 10 + } + + log.Printf("Starting load generator at %d items/sec.", lg.options.DataItemsPerSecond) + + // Indicate that generation is in progress. + lg.stopWait.Add(1) + + // Begin generation + go lg.generate() +} + +// Stop the load. +func (lg *LoadGenerator) Stop() { + lg.stopOnce.Do(func() { + // Signal generate() to stop. + close(lg.stopSignal) + + // Wait for it to stop. + lg.stopWait.Wait() + + // Print stats. + log.Printf("Stopped generator. %s", lg.GetStats()) + }) +} + +// GetStats returns the stats as a printable string. +func (lg *LoadGenerator) GetStats() string { + return fmt.Sprintf("Sent:%10d items", lg.DataItemsSent()) +} + +func (lg *LoadGenerator) DataItemsSent() uint64 { + return lg.dataItemsSent.Load() +} + +// IncDataItemsSent is used when a test bypasses the LoadGenerator and sends data +// directly via TestCases's Sender. This is necessary so that the total number of sent +// items in the end is correct, because the reports are printed from LoadGenerator's +// fields. This is not the best way, a better approach would be to refactor the +// reports to use their own counter and load generator and other sending sources +// to contribute to this counter. This could be done as a future improvement. +func (lg *LoadGenerator) IncDataItemsSent() { + lg.dataItemsSent.Inc() +} + +func (lg *LoadGenerator) generate() { + // Indicate that generation is done at the end + defer lg.stopWait.Done() + + if lg.options.DataItemsPerSecond == 0 { + return + } + + lg.dataProvider.SetLoadGeneratorCounters(&lg.batchesSent, &lg.dataItemsSent) + + err := lg.sender.Start() + if err != nil { + log.Printf("Cannot start sender: %v", err) + return + } + + numWorkers := 1 + + if lg.options.Parallel > 0 { + numWorkers = lg.options.Parallel + } + + var workers sync.WaitGroup + + for i := 0; i < numWorkers; i++ { + workers.Add(1) + + go func() { + defer workers.Done() + t := time.NewTicker(time.Second / time.Duration(lg.options.DataItemsPerSecond/lg.options.ItemsPerBatch/numWorkers)) + defer t.Stop() + for { + select { + case <-t.C: + switch lg.sender.(type) { + case TraceDataSender: + lg.generateTrace() + case MetricDataSender: + lg.generateMetrics() + case LogDataSender: + lg.generateLog() + default: + log.Printf("Invalid type of LoadGenerator sender") + } + case <-lg.stopSignal: + return + } + } + }() + } + + workers.Wait() + + // Send all pending generated data. + lg.sender.Flush() +} + +func (lg *LoadGenerator) generateTrace() { + traceSender := lg.sender.(TraceDataSender) + + traceData, done := lg.dataProvider.GenerateTraces() + if done { + return + } + + err := traceSender.ConsumeTraces(context.Background(), traceData) + if err == nil { + lg.prevErr = nil + } else if lg.prevErr == nil || lg.prevErr.Error() != err.Error() { + lg.prevErr = err + log.Printf("Cannot send traces: %v", err) + } +} + +func (lg *LoadGenerator) generateMetrics() { + metricSender := lg.sender.(MetricDataSender) + + metricData, done := lg.dataProvider.GenerateMetrics() + if done { + return + } + + err := metricSender.ConsumeMetrics(context.Background(), metricData) + if err == nil { + lg.prevErr = nil + } else if lg.prevErr == nil || lg.prevErr.Error() != err.Error() { + lg.prevErr = err + log.Printf("Cannot send metrics: %v", err) + } +} + +func (lg *LoadGenerator) generateLog() { + logSender := lg.sender.(LogDataSender) + + logData, done := lg.dataProvider.GenerateLogs() + if done { + return + } + + err := logSender.ConsumeLogs(context.Background(), logData) + if err == nil { + lg.prevErr = nil + } else if lg.prevErr == nil || lg.prevErr.Error() != err.Error() { + lg.prevErr = err + log.Printf("Cannot send logs: %v", err) + } +} diff --git a/internal/otel_collector/testbed/testbed/mock_backend.go b/internal/otel_collector/testbed/testbed/mock_backend.go new file mode 100644 index 00000000000..e57d74dcf71 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/mock_backend.go @@ -0,0 +1,242 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "context" + "log" + "os" + "sync" + "time" + + "go.uber.org/atomic" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// MockBackend is a backend that allows receiving the data locally. +type MockBackend struct { + // Metric and trace consumers + tc *MockTraceConsumer + mc *MockMetricConsumer + lc *MockLogConsumer + + receiver DataReceiver + + // Log file + logFilePath string + logFile *os.File + + // Start/stop flags + isStarted bool + stopOnce sync.Once + startedAt time.Time + + // Recording fields. + isRecording bool + recordMutex sync.Mutex + ReceivedTraces []pdata.Traces + ReceivedMetrics []pdata.Metrics + ReceivedLogs []pdata.Logs +} + +// NewMockBackend creates a new mock backend that receives data using specified receiver. +func NewMockBackend(logFilePath string, receiver DataReceiver) *MockBackend { + mb := &MockBackend{ + logFilePath: logFilePath, + receiver: receiver, + tc: &MockTraceConsumer{}, + mc: &MockMetricConsumer{}, + lc: &MockLogConsumer{}, + } + mb.tc.backend = mb + mb.mc.backend = mb + mb.lc.backend = mb + return mb +} + +func (mb *MockBackend) ReportFatalError(err error) { + log.Printf("Fatal error reported: %v", err) +} + +// Start a backend. +func (mb *MockBackend) Start() error { + log.Printf("Starting mock backend...") + + var err error + + // Open log file + mb.logFile, err = os.Create(mb.logFilePath) + if err != nil { + return err + } + + err = mb.receiver.Start(mb.tc, mb.mc, mb.lc) + if err != nil { + return err + } + + mb.isStarted = true + mb.startedAt = time.Now() + return nil +} + +// Stop the backend +func (mb *MockBackend) Stop() { + mb.stopOnce.Do(func() { + if !mb.isStarted { + return + } + + log.Printf("Stopping mock backend...") + + mb.logFile.Close() + mb.receiver.Stop() + + // Print stats. + log.Printf("Stopped backend. %s", mb.GetStats()) + }) +} + +// EnableRecording enables recording of all data received by MockBackend. +func (mb *MockBackend) EnableRecording() { + mb.recordMutex.Lock() + defer mb.recordMutex.Unlock() + mb.isRecording = true +} + +func (mb *MockBackend) GetStats() string { + received := mb.DataItemsReceived() + return printer.Sprintf("Received:%10d items (%d/sec)", received, int(float64(received)/time.Since(mb.startedAt).Seconds())) +} + +// DataItemsReceived returns total number of received spans and metrics. +func (mb *MockBackend) DataItemsReceived() uint64 { + return mb.tc.numSpansReceived.Load() + mb.mc.numMetricsReceived.Load() + mb.lc.numLogRecordsReceived.Load() +} + +// ClearReceivedItems clears the list of received traces and metrics. Note: counters +// return by DataItemsReceived() are not cleared, they are cumulative. +func (mb *MockBackend) ClearReceivedItems() { + mb.recordMutex.Lock() + defer mb.recordMutex.Unlock() + mb.ReceivedTraces = nil + mb.ReceivedMetrics = nil + mb.ReceivedLogs = nil +} + +func (mb *MockBackend) ConsumeTrace(td pdata.Traces) { + mb.recordMutex.Lock() + defer mb.recordMutex.Unlock() + if mb.isRecording { + mb.ReceivedTraces = append(mb.ReceivedTraces, td) + } +} + +func (mb *MockBackend) ConsumeMetric(md pdata.Metrics) { + mb.recordMutex.Lock() + defer mb.recordMutex.Unlock() + if mb.isRecording { + mb.ReceivedMetrics = append(mb.ReceivedMetrics, md) + } +} + +var _ consumer.TracesConsumer = (*MockTraceConsumer)(nil) + +func (mb *MockBackend) ConsumeLogs(ld pdata.Logs) { + mb.recordMutex.Lock() + defer mb.recordMutex.Unlock() + if mb.isRecording { + mb.ReceivedLogs = append(mb.ReceivedLogs, ld) + } +} + +type MockTraceConsumer struct { + numSpansReceived atomic.Uint64 + backend *MockBackend +} + +func (tc *MockTraceConsumer) ConsumeTraces(_ context.Context, td pdata.Traces) error { + tc.numSpansReceived.Add(uint64(td.SpanCount())) + + rs := td.ResourceSpans() + for i := 0; i < rs.Len(); i++ { + ils := rs.At(i).InstrumentationLibrarySpans() + for j := 0; j < ils.Len(); j++ { + spans := ils.At(j).Spans() + for k := 0; k < spans.Len(); k++ { + span := spans.At(k) + var spanSeqnum int64 + var traceSeqnum int64 + + seqnumAttr, ok := span.Attributes().Get("load_generator.span_seq_num") + if ok { + spanSeqnum = seqnumAttr.IntVal() + } + + seqnumAttr, ok = span.Attributes().Get("load_generator.trace_seq_num") + if ok { + traceSeqnum = seqnumAttr.IntVal() + } + + // Ignore the seqnums for now. We will use them later. + _ = spanSeqnum + _ = traceSeqnum + + } + } + } + + tc.backend.ConsumeTrace(td) + + return nil +} + +var _ consumer.MetricsConsumer = (*MockMetricConsumer)(nil) + +type MockMetricConsumer struct { + numMetricsReceived atomic.Uint64 + backend *MockBackend +} + +func (mc *MockMetricConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + _, dataPoints := md.MetricAndDataPointCount() + mc.numMetricsReceived.Add(uint64(dataPoints)) + mc.backend.ConsumeMetric(md) + return nil +} + +func (tc *MockTraceConsumer) MockConsumeTraceData(spansCount int) error { + tc.numSpansReceived.Add(uint64(spansCount)) + return nil +} + +func (mc *MockMetricConsumer) MockConsumeMetricData(metricsCount int) error { + mc.numMetricsReceived.Add(uint64(metricsCount)) + return nil +} + +type MockLogConsumer struct { + numLogRecordsReceived atomic.Uint64 + backend *MockBackend +} + +func (mc *MockLogConsumer) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + recordCount := ld.LogRecordCount() + mc.numLogRecordsReceived.Add(uint64(recordCount)) + mc.backend.ConsumeLogs(ld) + return nil +} diff --git a/internal/otel_collector/testbed/testbed/mock_backend_test.go b/internal/otel_collector/testbed/testbed/mock_backend_test.go new file mode 100644 index 00000000000..a837120ef95 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/mock_backend_test.go @@ -0,0 +1,101 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGeneratorAndBackend(t *testing.T) { + port := GetAvailablePort(t) + + tests := []struct { + name string + receiver DataReceiver + sender DataSender + }{ + { + name: "Jaeger-JaegerGRPC", + receiver: NewJaegerDataReceiver(port), + sender: NewJaegerGRPCDataSender(DefaultHost, port), + }, + { + name: "Zipkin-Zipkin", + receiver: NewZipkinDataReceiver(port), + sender: NewZipkinDataSender(DefaultHost, port), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mb := NewMockBackend("mockbackend.log", test.receiver) + + assert.EqualValues(t, 0, mb.DataItemsReceived()) + require.NoError(t, mb.Start(), "Cannot start backend") + + defer mb.Stop() + + options := LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} + dataProvider := NewPerfTestDataProvider(options) + lg, err := NewLoadGenerator(dataProvider, test.sender) + require.NoError(t, err, "Cannot start load generator") + + assert.EqualValues(t, 0, lg.dataItemsSent.Load()) + + // Generate at 1000 SPS + lg.Start(LoadOptions{DataItemsPerSecond: 1000}) + + // Wait until at least 50 spans are sent + WaitFor(t, func() bool { return lg.DataItemsSent() > 50 }, "DataItemsSent > 50") + + lg.Stop() + + // The backend should receive everything generated. + assert.Equal(t, lg.DataItemsSent(), mb.DataItemsReceived()) + }) + } +} + +// WaitFor the specific condition for up to 10 seconds. Records a test error +// if condition does not become true. +func WaitFor(t *testing.T, cond func() bool, errMsg ...interface{}) bool { + startTime := time.Now() + + // Start with 5 ms waiting interval between condition re-evaluation. + waitInterval := time.Millisecond * 5 + + for { + time.Sleep(waitInterval) + + // Increase waiting interval exponentially up to 500 ms. + if waitInterval < time.Millisecond*500 { + waitInterval *= 2 + } + + if cond() { + return true + } + + if time.Since(startTime) > time.Second*10 { + // Waited too long + t.Error("Time out waiting for", errMsg) + return false + } + } +} diff --git a/internal/otel_collector/testbed/testbed/mockconsumer.go b/internal/otel_collector/testbed/testbed/mockconsumer.go new file mode 100644 index 00000000000..7d22ccbe981 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/mockconsumer.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +// MockDataConsumer is an interface that keeps the count of number of events received by mock receiver. +// This is mainly useful for the Exporters that are not have the matching receiver +type MockTraceDataConsumer interface { + // MockConsumeTraceData receives traces and counts the number of events received. + MockConsumeTraceData(spansCount int) error +} + +type MockMetricDataConsumer interface { + // MockConsumeMetricData receives metrics and counts the number of events received. + MockConsumeMetricData(metricsCount int) error +} diff --git a/internal/otel_collector/testbed/testbed/options.go b/internal/otel_collector/testbed/testbed/options.go new file mode 100644 index 00000000000..ed401666c8f --- /dev/null +++ b/internal/otel_collector/testbed/testbed/options.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tests contains test cases. To run the tests go to tests directory and run: +// RUN_TESTBED=1 go test -v + +package testbed + +// TestCaseOption defines a TestCase option. +type TestCaseOption struct { + option func(t *TestCase) +} + +// Apply takes a TestCase and runs the option function on it. +func (o TestCaseOption) Apply(t *TestCase) { + o.option(t) +} + +// WithSkipResults option disables writing out results file for a TestCase. +func WithSkipResults() TestCaseOption { + return TestCaseOption{func(t *TestCase) { + t.skipResults = true + }} +} + +// WithConfigFile allows a custom configuration file for TestCase. +func WithConfigFile(file string) TestCaseOption { + return TestCaseOption{func(t *TestCase) { + t.agentConfigFile = file + }} +} diff --git a/internal/otel_collector/testbed/testbed/otelcol_runner.go b/internal/otel_collector/testbed/testbed/otelcol_runner.go new file mode 100644 index 00000000000..489fff4bc4e --- /dev/null +++ b/internal/otel_collector/testbed/testbed/otelcol_runner.go @@ -0,0 +1,178 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "fmt" + "strings" + + "github.com/shirou/gopsutil/process" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/internal/version" + "go.opentelemetry.io/collector/service" +) + +// OtelcolRunner defines the interface for configuring, starting and stopping one or more instances of +// otelcol which will be the subject of testing being executed. +type OtelcolRunner interface { + // PrepareConfig stores the provided YAML-based otelcol configuration file in the format needed by the otelcol + // instance(s) this runner manages. If successful, it returns the cleanup config function to be executed after + // the test is executed. + PrepareConfig(configStr string) (configCleanup func(), err error) + // Starts the otelcol instance(s) if not already running which is the subject of the test to be run. + // It returns the host:port of the data receiver to post test data to. + Start(args StartParams) error + // Stops the otelcol instance(s) which are the subject of the test just run if applicable. Returns whether + // the instance was actually stopped or not. + Stop() (stopped bool, err error) + // WatchResourceConsumption toggles on the monitoring of resource consumpution by the otelcol instance under test. + WatchResourceConsumption() error + // GetProcessMon returns the Process being used to monitor resource consumption. + GetProcessMon() *process.Process + // GetTotalConsumption returns the data collected by the process monitor. + GetTotalConsumption() *ResourceConsumption + // GetResourceConsumption returns the data collected by the process monitor as a display string. + GetResourceConsumption() string +} + +// InProcessCollector implements the OtelcolRunner interfaces running a single otelcol as a go routine within the +// same process as the test executor. +type InProcessCollector struct { + logger *zap.Logger + factories component.Factories + config *configmodels.Config + svc *service.Application + appDone chan struct{} + stopped bool +} + +// NewInProcessCollector crewtes a new InProcessCollector using the supplied component factories. +func NewInProcessCollector(factories component.Factories) *InProcessCollector { + return &InProcessCollector{ + factories: factories, + } +} + +func (ipp *InProcessCollector) PrepareConfig(configStr string) (configCleanup func(), err error) { + configCleanup = func() { + // NoOp + } + var logger *zap.Logger + logger, err = configureLogger() + if err != nil { + return configCleanup, err + } + ipp.logger = logger + v := config.NewViper() + v.SetConfigType("yaml") + v.ReadConfig(strings.NewReader(configStr)) + cfg, err := config.Load(v, ipp.factories) + if err != nil { + return configCleanup, err + } + err = config.ValidateConfig(cfg, zap.NewNop()) + if err != nil { + return configCleanup, err + } + ipp.config = cfg + return configCleanup, err +} + +func (ipp *InProcessCollector) Start(args StartParams) error { + params := service.Parameters{ + ApplicationStartInfo: component.ApplicationStartInfo{ + ExeName: "otelcol", + LongName: "InProcess Collector", + Version: version.Version, + GitHash: version.GitHash, + }, + ConfigFactory: func(_ *viper.Viper, _ *cobra.Command, _ component.Factories) (*configmodels.Config, error) { + return ipp.config, nil + }, + Factories: ipp.factories, + } + var err error + ipp.svc, err = service.New(params) + if err != nil { + return err + } + ipp.svc.Command().SetArgs(args.CmdArgs) + + ipp.appDone = make(chan struct{}) + go func() { + defer close(ipp.appDone) + appErr := ipp.svc.Run() + if appErr != nil { + err = appErr + } + }() + + for state := range ipp.svc.GetStateChannel() { + switch state { + case service.Starting: + // NoOp + case service.Running: + return err + default: + err = fmt.Errorf("unable to start, otelcol state is %d", state) + } + } + return err +} + +func (ipp *InProcessCollector) Stop() (stopped bool, err error) { + if !ipp.stopped { + ipp.stopped = true + ipp.svc.Shutdown() + } + <-ipp.appDone + stopped = ipp.stopped + return stopped, err +} + +func (ipp *InProcessCollector) WatchResourceConsumption() error { + return nil +} + +func (ipp *InProcessCollector) GetProcessMon() *process.Process { + return nil +} + +func (ipp *InProcessCollector) GetTotalConsumption() *ResourceConsumption { + return &ResourceConsumption{ + CPUPercentAvg: 0, + CPUPercentMax: 0, + RAMMiBAvg: 0, + RAMMiBMax: 0, + } +} + +func (ipp *InProcessCollector) GetResourceConsumption() string { + return "" +} + +func configureLogger() (*zap.Logger, error) { + conf := zap.NewDevelopmentConfig() + conf.Level.SetLevel(zapcore.InfoLevel) + logger, err := conf.Build() + return logger, err +} diff --git a/internal/otel_collector/testbed/testbed/otelcol_runner_test.go b/internal/otel_collector/testbed/testbed/otelcol_runner_test.go new file mode 100644 index 00000000000..2c022b18cb3 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/otelcol_runner_test.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/service/defaultcomponents" +) + +func TestNewInProcessPipeline(t *testing.T) { + factories, err := defaultcomponents.Components() + assert.NoError(t, err) + sender := NewOTLPTraceDataSender(DefaultHost, GetAvailablePort(t)) + receiver := NewOTLPDataReceiver(DefaultOTLPPort) + runner := NewInProcessCollector(factories) + + format := ` +receivers:%v +exporters:%v +processors: + batch: + +extensions: + +service: + extensions: + pipelines: + traces: + receivers: [%v] + processors: [batch] + exporters: [%v] +` + config := fmt.Sprintf( + format, + sender.GenConfigYAMLStr(), + receiver.GenConfigYAMLStr(), + sender.ProtocolName(), + receiver.ProtocolName(), + ) + configCleanup, cfgErr := runner.PrepareConfig(config) + defer configCleanup() + assert.NoError(t, cfgErr) + assert.NotNil(t, configCleanup) + assert.NotNil(t, runner.config) + args := StartParams{} + defer runner.Stop() + assert.NoError(t, runner.Start(args)) + assert.NotNil(t, runner.svc) +} diff --git a/internal/otel_collector/testbed/testbed/receivers.go b/internal/otel_collector/testbed/testbed/receivers.go new file mode 100644 index 00000000000..52aebc0508d --- /dev/null +++ b/internal/otel_collector/testbed/testbed/receivers.go @@ -0,0 +1,383 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/jaegerreceiver" + "go.opentelemetry.io/collector/receiver/opencensusreceiver" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/collector/receiver/prometheusreceiver" + "go.opentelemetry.io/collector/receiver/zipkinreceiver" +) + +// DataReceiver allows to receive traces or metrics. This is an interface that must +// be implemented by all protocols that want to be used in MockBackend. +// Note the terminology: testbed.DataReceiver is something that can listen and receive data +// from Collector and the corresponding entity in the Collector that sends this data is +// an exporter. +type DataReceiver interface { + Start(tc consumer.TracesConsumer, mc consumer.MetricsConsumer, lc consumer.LogsConsumer) error + Stop() error + + // Generate a config string to place in exporter part of collector config + // so that it can send data to this receiver. + GenConfigYAMLStr() string + + // Return exporterType name to use in collector config pipeline. + ProtocolName() string +} + +// DataReceiverBase implement basic functions needed by all receivers. +type DataReceiverBase struct { + // Port on which to listen. + Port int +} + +const DefaultHost = "localhost" + +func (mb *DataReceiverBase) ReportFatalError(err error) { + log.Printf("Fatal error reported: %v", err) +} + +// GetFactory of the specified kind. Returns the factory for a component type. +func (mb *DataReceiverBase) GetFactory(_ component.Kind, _ configmodels.Type) component.Factory { + return nil +} + +// Return map of extensions. Only enabled and created extensions will be returned. +func (mb *DataReceiverBase) GetExtensions() map[configmodels.Extension]component.ServiceExtension { + return nil +} + +func (mb *DataReceiverBase) GetExporters() map[configmodels.DataType]map[configmodels.Exporter]component.Exporter { + return nil +} + +// OCDataReceiver implements OpenCensus format receiver. +type OCDataReceiver struct { + DataReceiverBase + traceReceiver component.TracesReceiver + metricsReceiver component.MetricsReceiver +} + +// Ensure OCDataReceiver implements DataReceiver. +var _ DataReceiver = (*OCDataReceiver)(nil) + +const DefaultOCPort = 56565 + +// NewOCDataReceiver creates a new OCDataReceiver that will listen on the specified port after Start +// is called. +func NewOCDataReceiver(port int) *OCDataReceiver { + return &OCDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} +} + +func (or *OCDataReceiver) Start(tc consumer.TracesConsumer, mc consumer.MetricsConsumer, _ consumer.LogsConsumer) error { + factory := opencensusreceiver.NewFactory() + cfg := factory.CreateDefaultConfig().(*opencensusreceiver.Config) + cfg.SetName(or.ProtocolName()) + cfg.NetAddr = confignet.NetAddr{Endpoint: fmt.Sprintf("localhost:%d", or.Port), Transport: "tcp"} + var err error + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + if or.traceReceiver, err = factory.CreateTracesReceiver(context.Background(), params, cfg, tc); err != nil { + return err + } + if or.metricsReceiver, err = factory.CreateMetricsReceiver(context.Background(), params, cfg, mc); err != nil { + return err + } + if err = or.traceReceiver.Start(context.Background(), or); err != nil { + return err + } + return or.metricsReceiver.Start(context.Background(), or) +} + +func (or *OCDataReceiver) Stop() error { + if err := or.traceReceiver.Shutdown(context.Background()); err != nil { + return err + } + if err := or.metricsReceiver.Shutdown(context.Background()); err != nil { + return err + } + return nil +} + +func (or *OCDataReceiver) GenConfigYAMLStr() string { + // Note that this generates an exporter config for agent. + return fmt.Sprintf(` + opencensus: + endpoint: "localhost:%d" + insecure: true`, or.Port) +} + +func (or *OCDataReceiver) ProtocolName() string { + return "opencensus" +} + +// JaegerDataReceiver implements Jaeger format receiver. +type JaegerDataReceiver struct { + DataReceiverBase + receiver component.TracesReceiver +} + +var _ DataReceiver = (*JaegerDataReceiver)(nil) + +const DefaultJaegerPort = 14250 + +func NewJaegerDataReceiver(port int) *JaegerDataReceiver { + return &JaegerDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} +} + +func (jr *JaegerDataReceiver) Start(tc consumer.TracesConsumer, _ consumer.MetricsConsumer, _ consumer.LogsConsumer) error { + factory := jaegerreceiver.NewFactory() + cfg := factory.CreateDefaultConfig().(*jaegerreceiver.Config) + cfg.SetName(jr.ProtocolName()) + cfg.Protocols.GRPC = &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{Endpoint: fmt.Sprintf("localhost:%d", jr.Port), Transport: "tcp"}, + } + var err error + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + jr.receiver, err = factory.CreateTracesReceiver(context.Background(), params, cfg, tc) + if err != nil { + return err + } + + return jr.receiver.Start(context.Background(), jr) +} + +func (jr *JaegerDataReceiver) Stop() error { + return jr.receiver.Shutdown(context.Background()) +} + +func (jr *JaegerDataReceiver) GenConfigYAMLStr() string { + // Note that this generates an exporter config for agent. + return fmt.Sprintf(` + jaeger: + endpoint: "localhost:%d" + insecure: true`, jr.Port) +} + +func (jr *JaegerDataReceiver) ProtocolName() string { + return "jaeger" +} + +// baseOTLPDataReceiver implements the OTLP format receiver. +type baseOTLPDataReceiver struct { + DataReceiverBase + // One of the "otlp" for OTLP over gRPC or "otlphttp" for OTLP over HTTP. + exporterType string + traceReceiver component.TracesReceiver + metricsReceiver component.MetricsReceiver + logReceiver component.LogsReceiver +} + +func (bor *baseOTLPDataReceiver) Start(tc consumer.TracesConsumer, mc consumer.MetricsConsumer, lc consumer.LogsConsumer) error { + factory := otlpreceiver.NewFactory() + cfg := factory.CreateDefaultConfig().(*otlpreceiver.Config) + cfg.SetName(bor.exporterType) + if bor.exporterType == "otlp" { + cfg.GRPC.NetAddr = confignet.NetAddr{Endpoint: fmt.Sprintf("localhost:%d", bor.Port), Transport: "tcp"} + cfg.HTTP = nil + } else { + cfg.HTTP.Endpoint = fmt.Sprintf("localhost:%d", bor.Port) + cfg.GRPC = nil + } + var err error + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + if bor.traceReceiver, err = factory.CreateTracesReceiver(context.Background(), params, cfg, tc); err != nil { + return err + } + if bor.metricsReceiver, err = factory.CreateMetricsReceiver(context.Background(), params, cfg, mc); err != nil { + return err + } + if bor.logReceiver, err = factory.CreateLogsReceiver(context.Background(), params, cfg, lc); err != nil { + return err + } + + if err = bor.traceReceiver.Start(context.Background(), bor); err != nil { + return err + } + if err = bor.metricsReceiver.Start(context.Background(), bor); err != nil { + return err + } + return bor.logReceiver.Start(context.Background(), bor) +} + +func (bor *baseOTLPDataReceiver) Stop() error { + if err := bor.traceReceiver.Shutdown(context.Background()); err != nil { + return err + } + if err := bor.metricsReceiver.Shutdown(context.Background()); err != nil { + return err + } + return bor.logReceiver.Shutdown(context.Background()) +} + +func (bor *baseOTLPDataReceiver) ProtocolName() string { + return bor.exporterType +} + +func (bor *baseOTLPDataReceiver) GenConfigYAMLStr() string { + addr := fmt.Sprintf("localhost:%d", bor.Port) + if bor.exporterType == "otlphttp" { + addr = "http://" + addr + } + // Note that this generates an exporter config for agent. + return fmt.Sprintf(` + %s: + endpoint: "%s" + insecure: true`, bor.exporterType, addr) +} + +const DefaultOTLPPort = 55680 + +// NewOTLPDataReceiver creates a new OTLP DataReceiver that will listen on the specified port after Start +// is called. +func NewOTLPDataReceiver(port int) DataReceiver { + return &baseOTLPDataReceiver{ + DataReceiverBase: DataReceiverBase{Port: port}, + exporterType: "otlp", + } +} + +// NewOTLPDataReceiver creates a new OTLP/HTTP DataReceiver that will listen on the specified port after Start +// is called. +func NewOTLPHTTPDataReceiver(port int) DataReceiver { + return &baseOTLPDataReceiver{ + DataReceiverBase: DataReceiverBase{Port: port}, + exporterType: "otlphttp", + } +} + +// ZipkinDataReceiver implements Zipkin format receiver. +type ZipkinDataReceiver struct { + DataReceiverBase + receiver component.TracesReceiver +} + +var _ DataReceiver = (*ZipkinDataReceiver)(nil) + +const DefaultZipkinAddressPort = 9411 + +func NewZipkinDataReceiver(port int) *ZipkinDataReceiver { + return &ZipkinDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} +} + +func (zr *ZipkinDataReceiver) Start(tc consumer.TracesConsumer, _ consumer.MetricsConsumer, _ consumer.LogsConsumer) error { + factory := zipkinreceiver.NewFactory() + cfg := factory.CreateDefaultConfig().(*zipkinreceiver.Config) + cfg.SetName(zr.ProtocolName()) + cfg.Endpoint = fmt.Sprintf("localhost:%d", zr.Port) + + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + var err error + zr.receiver, err = factory.CreateTracesReceiver(context.Background(), params, cfg, tc) + + if err != nil { + return err + } + + return zr.receiver.Start(context.Background(), zr) +} + +func (zr *ZipkinDataReceiver) Stop() error { + return zr.receiver.Shutdown(context.Background()) +} + +func (zr *ZipkinDataReceiver) GenConfigYAMLStr() string { + // Note that this generates an exporter config for agent. + return fmt.Sprintf(` + zipkin: + endpoint: http://localhost:%d/api/v2/spans + format: json`, zr.Port) +} + +func (zr *ZipkinDataReceiver) ProtocolName() string { + return "zipkin" +} + +// prometheus + +type PrometheusDataReceiver struct { + DataReceiverBase + receiver component.MetricsReceiver +} + +var _ DataReceiver = (*PrometheusDataReceiver)(nil) + +func NewPrometheusDataReceiver(port int) *PrometheusDataReceiver { + return &PrometheusDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} +} + +func (dr *PrometheusDataReceiver) Start(_ consumer.TracesConsumer, mc consumer.MetricsConsumer, _ consumer.LogsConsumer) error { + factory := prometheusreceiver.NewFactory() + cfg := factory.CreateDefaultConfig().(*prometheusreceiver.Config) + addr := fmt.Sprintf("0.0.0.0:%d", dr.Port) + cfg.PrometheusConfig = &config.Config{ + ScrapeConfigs: []*config.ScrapeConfig{{ + JobName: "testbed-job", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(time.Second), + ServiceDiscoveryConfigs: discovery.Configs{ + &discovery.StaticConfig{ + { + Targets: []model.LabelSet{{ + "__address__": model.LabelValue(addr), + "__scheme__": "http", + "__metrics_path__": "/metrics", + }}, + }, + }, + }, + }}, + } + var err error + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + dr.receiver, err = factory.CreateMetricsReceiver(context.Background(), params, cfg, mc) + if err != nil { + return err + } + return dr.receiver.Start(context.Background(), dr) +} + +func (dr *PrometheusDataReceiver) Stop() error { + return dr.receiver.Shutdown(context.Background()) +} + +// Generate exporter yaml +func (dr *PrometheusDataReceiver) GenConfigYAMLStr() string { + format := ` + prometheus: + endpoint: "localhost:%d" +` + return fmt.Sprintf(format, dr.Port) +} + +func (dr *PrometheusDataReceiver) ProtocolName() string { + return "prometheus" +} diff --git a/internal/otel_collector/testbed/testbed/results.go b/internal/otel_collector/testbed/testbed/results.go new file mode 100644 index 00000000000..809897844b6 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/results.go @@ -0,0 +1,215 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "fmt" + "io" + "log" + "os" + "path" + "time" +) + +// TestResultsSummary defines the interface to record results of one category of testing. +type TestResultsSummary interface { + // Create and open the file and write headers. + Init(resultsDir string) + // Add results for one test. + Add(testName string, result interface{}) + // Save the total results and close the file. + Save() +} + +// PerformanceResults implements the TestResultsSummary interface with fields suitable for reporting +// performance test results. +type PerformanceResults struct { + resultsDir string + resultsFile *os.File + perTestResults []*PerformanceTestResult + totalDuration time.Duration +} + +// PerformanceTestResult reports the results of a single performance test. +type PerformanceTestResult struct { + testName string + result string + duration time.Duration + cpuPercentageAvg float64 + cpuPercentageMax float64 + ramMibAvg uint32 + ramMibMax uint32 + sentSpanCount uint64 + receivedSpanCount uint64 + errorCause string +} + +func (r *PerformanceResults) Init(resultsDir string) { + r.resultsDir = resultsDir + r.perTestResults = []*PerformanceTestResult{} + + // Create resultsSummary file + os.MkdirAll(resultsDir, os.FileMode(0755)) + var err error + r.resultsFile, err = os.Create(path.Join(r.resultsDir, "TESTRESULTS.md")) + if err != nil { + log.Fatalf(err.Error()) + } + + // Write the header + _, _ = io.WriteString(r.resultsFile, + "# Test PerformanceResults\n"+ + fmt.Sprintf("Started: %s\n\n", time.Now().Format(time.RFC1123Z))+ + "Test |Result|Duration|CPU Avg%|CPU Max%|RAM Avg MiB|RAM Max MiB|Sent Items|Received Items|\n"+ + "----------------------------------------|------|-------:|-------:|-------:|----------:|----------:|---------:|-------------:|\n") +} + +// Save the total results and close the file. +func (r *PerformanceResults) Save() { + _, _ = io.WriteString(r.resultsFile, + fmt.Sprintf("\nTotal duration: %.0fs\n", r.totalDuration.Seconds())) + r.resultsFile.Close() +} + +// Add results for one test. +func (r *PerformanceResults) Add(_ string, result interface{}) { + testResult, ok := result.(*PerformanceTestResult) + if !ok { + return + } + _, _ = io.WriteString(r.resultsFile, + fmt.Sprintf("%-40s|%-6s|%7.0fs|%8.1f|%8.1f|%11d|%11d|%10d|%14d|%s\n", + testResult.testName, + testResult.result, + testResult.duration.Seconds(), + testResult.cpuPercentageAvg, + testResult.cpuPercentageMax, + testResult.ramMibAvg, + testResult.ramMibMax, + testResult.sentSpanCount, + testResult.receivedSpanCount, + testResult.errorCause, + ), + ) + r.totalDuration += testResult.duration +} + +// CorrectnessResults implements the TestResultsSummary interface with fields suitable for reporting data translation +// correctness test results. +type CorrectnessResults struct { + resultsDir string + resultsFile *os.File + perTestResults []*CorrectnessTestResult + totalAssertionFailures uint64 + totalDuration time.Duration +} + +// CorrectnessTestResult reports the results of a single correctness test. +type CorrectnessTestResult struct { + testName string + result string + duration time.Duration + sentSpanCount uint64 + receivedSpanCount uint64 + traceAssertionFailureCount uint64 + traceAssertionFailures []*TraceAssertionFailure +} + +type TraceAssertionFailure struct { + typeName string + dataComboName string + fieldPath string + expectedValue interface{} + actualValue interface{} + sumCount int +} + +func (af TraceAssertionFailure) String() string { + return fmt.Sprintf("%s/%s e=%#v a=%#v ", af.dataComboName, af.fieldPath, af.expectedValue, af.actualValue) +} + +func (r *CorrectnessResults) Init(resultsDir string) { + r.resultsDir = resultsDir + r.perTestResults = []*CorrectnessTestResult{} + + // Create resultsSummary file + os.MkdirAll(resultsDir, os.FileMode(0755)) + var err error + r.resultsFile, err = os.Create(path.Join(r.resultsDir, "CORRECTNESSRESULTS.md")) + if err != nil { + log.Fatalf(err.Error()) + } + + // Write the header + _, _ = io.WriteString(r.resultsFile, + "# Test Results\n"+ + fmt.Sprintf("Started: %s\n\n", time.Now().Format(time.RFC1123Z))+ + "Test |Result|Duration|Sent Items|Received Items|Failure Count|Failures\n"+ + "----------------------------------------|------|-------:|---------:|-------------:|------------:|--------\n") +} + +func (r *CorrectnessResults) Add(_ string, result interface{}) { + testResult, ok := result.(*CorrectnessTestResult) + if !ok { + return + } + consolidated := consolidateAssertionFailures(testResult.traceAssertionFailures) + failuresStr := "" + for _, af := range consolidated { + failuresStr = fmt.Sprintf("%s%s,%#v!=%#v,count=%d; ", failuresStr, af.fieldPath, af.expectedValue, + af.actualValue, af.sumCount) + } + _, _ = io.WriteString(r.resultsFile, + fmt.Sprintf("%-40s|%-6s|%7.0fs|%10d|%14d|%13d|%s\n", + testResult.testName, + testResult.result, + testResult.duration.Seconds(), + testResult.sentSpanCount, + testResult.receivedSpanCount, + testResult.traceAssertionFailureCount, + failuresStr, + ), + ) + r.perTestResults = append(r.perTestResults, testResult) + r.totalAssertionFailures += testResult.traceAssertionFailureCount + r.totalDuration += testResult.duration +} + +func (r *CorrectnessResults) Save() { + _, _ = io.WriteString(r.resultsFile, + fmt.Sprintf("\nTotal assertion failures: %d\n", r.totalAssertionFailures)) + _, _ = io.WriteString(r.resultsFile, + fmt.Sprintf("\nTotal duration: %.0fs\n", r.totalDuration.Seconds())) + r.resultsFile.Close() +} + +func consolidateAssertionFailures(failures []*TraceAssertionFailure) map[string]*TraceAssertionFailure { + afMap := make(map[string]*TraceAssertionFailure) + for _, f := range failures { + summary := afMap[f.fieldPath] + if summary == nil { + summary = &TraceAssertionFailure{ + typeName: f.typeName, + dataComboName: f.dataComboName + "...", + fieldPath: f.fieldPath, + expectedValue: f.expectedValue, + actualValue: f.actualValue, + } + afMap[f.fieldPath] = summary + } + summary.sumCount++ + } + return afMap +} diff --git a/internal/otel_collector/testbed/testbed/senders.go b/internal/otel_collector/testbed/testbed/senders.go new file mode 100644 index 00000000000..3e00b497d30 --- /dev/null +++ b/internal/otel_collector/testbed/testbed/senders.go @@ -0,0 +1,746 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "strconv" + "time" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configmodels" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/jaegerexporter" + "go.opentelemetry.io/collector/exporter/opencensusexporter" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "go.opentelemetry.io/collector/exporter/otlphttpexporter" + "go.opentelemetry.io/collector/exporter/prometheusexporter" + "go.opentelemetry.io/collector/exporter/zipkinexporter" +) + +// DataSender defines the interface that allows sending data. This is an interface +// that must be implemented by all protocols that want to be used in LoadGenerator. +// Note the terminology: testbed.DataSender is something that sends data to Collector +// and the corresponding entity that receives the data in the Collector is a receiver. +type DataSender interface { + // Start sender and connect to the configured endpoint. Must be called before + // sending data. + Start() error + + // Send any accumulated data. + Flush() + + // Return the port to which this sender will send data. + GetEndpoint() string + + // Generate a config string to place in receiver part of collector config + // so that it can receive data from this sender. + GenConfigYAMLStr() string + + // Return exporterType name to use in collector config pipeline. + ProtocolName() string +} + +// TraceDataSender defines the interface that allows sending trace data. It adds ability +// to send a batch of Spans to the DataSender interface. +type TraceDataSender interface { + DataSender + consumer.TracesConsumer +} + +// MetricDataSender defines the interface that allows sending metric data. It adds ability +// to send a batch of Metrics to the DataSender interface. +type MetricDataSender interface { + DataSender + consumer.MetricsConsumer +} + +// LogDataSender defines the interface that allows sending log data. It adds ability +// to send a batch of Logs to the DataSender interface. +type LogDataSender interface { + DataSender + consumer.LogsConsumer +} + +type DataSenderBase struct { + Port int + Host string +} + +func (dsb *DataSenderBase) GetEndpoint() string { + return fmt.Sprintf("%s:%d", dsb.Host, dsb.Port) +} + +func (dsb *DataSenderBase) ReportFatalError(err error) { + log.Printf("Fatal error reported: %v", err) +} + +// GetFactory of the specified kind. Returns the factory for a component type. +func (dsb *DataSenderBase) GetFactory(_ component.Kind, _ configmodels.Type) component.Factory { + return nil +} + +// Return map of extensions. Only enabled and created extensions will be returned. +func (dsb *DataSenderBase) GetExtensions() map[configmodels.Extension]component.ServiceExtension { + return nil +} + +func (dsb *DataSenderBase) GetExporters() map[configmodels.DataType]map[configmodels.Exporter]component.Exporter { + return nil +} + +func (dsb *DataSenderBase) Flush() { + // Exporter interface does not support Flush, so nothing to do. +} + +// JaegerGRPCDataSender implements TraceDataSender for Jaeger thrift_http exporterType. +type JaegerGRPCDataSender struct { + DataSenderBase + consumer.TracesConsumer +} + +// Ensure JaegerGRPCDataSender implements TraceDataSender. +var _ TraceDataSender = (*JaegerGRPCDataSender)(nil) + +// NewJaegerGRPCDataSender creates a new Jaeger exporterType sender that will send +// to the specified port after Start is called. +func NewJaegerGRPCDataSender(host string, port int) *JaegerGRPCDataSender { + return &JaegerGRPCDataSender{ + DataSenderBase: DataSenderBase{Port: port, Host: host}, + } +} + +func (je *JaegerGRPCDataSender) Start() error { + factory := jaegerexporter.NewFactory() + cfg := factory.CreateDefaultConfig().(*jaegerexporter.Config) + // Disable retries, we should push data and if error just log it. + cfg.RetrySettings.Enabled = false + // Disable sending queue, we should push data from the caller goroutine. + cfg.QueueSettings.Enabled = false + cfg.Endpoint = je.GetEndpoint() + cfg.TLSSetting = configtls.TLSClientSetting{ + Insecure: true, + } + + exp, err := factory.CreateTracesExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + je.TracesConsumer = exp + return exp.Start(context.Background(), je) +} + +func (je *JaegerGRPCDataSender) GenConfigYAMLStr() string { + return fmt.Sprintf(` + jaeger: + protocols: + grpc: + endpoint: "%s"`, je.GetEndpoint()) +} + +func (je *JaegerGRPCDataSender) ProtocolName() string { + return "jaeger" +} + +type ocDataSender struct { + DataSenderBase +} + +func (ods *ocDataSender) fillConfig(cfg *opencensusexporter.Config) *opencensusexporter.Config { + cfg.Endpoint = ods.GetEndpoint() + cfg.TLSSetting = configtls.TLSClientSetting{ + Insecure: true, + } + return cfg +} + +func (ods *ocDataSender) GenConfigYAMLStr() string { + // Note that this generates a receiver config for agent. + return fmt.Sprintf(` + opencensus: + endpoint: "%s"`, ods.GetEndpoint()) +} + +func (ods *ocDataSender) ProtocolName() string { + return "opencensus" +} + +// OCTraceDataSender implements TraceDataSender for OpenCensus trace exporterType. +type OCTraceDataSender struct { + ocDataSender + consumer.TracesConsumer +} + +// Ensure OCTraceDataSender implements TraceDataSender. +var _ TraceDataSender = (*OCTraceDataSender)(nil) + +// NewOCTraceDataSender creates a new OCTraceDataSender that will send +// to the specified port after Start is called. +func NewOCTraceDataSender(host string, port int) *OCTraceDataSender { + return &OCTraceDataSender{ + ocDataSender: ocDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + }, + } +} + +func (ote *OCTraceDataSender) Start() error { + factory := opencensusexporter.NewFactory() + cfg := ote.fillConfig(factory.CreateDefaultConfig().(*opencensusexporter.Config)) + exp, err := factory.CreateTracesExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + ote.TracesConsumer = exp + return exp.Start(context.Background(), ote) +} + +// OCMetricsDataSender implements MetricDataSender for OpenCensus metrics exporterType. +type OCMetricsDataSender struct { + ocDataSender + consumer.MetricsConsumer +} + +// Ensure OCMetricsDataSender implements MetricDataSender. +var _ MetricDataSender = (*OCMetricsDataSender)(nil) + +// NewOCMetricDataSender creates a new OpenCensus metric exporterType sender that will send +// to the specified port after Start is called. +func NewOCMetricDataSender(host string, port int) *OCMetricsDataSender { + return &OCMetricsDataSender{ + ocDataSender: ocDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + }, + } +} + +func (ome *OCMetricsDataSender) Start() error { + factory := opencensusexporter.NewFactory() + cfg := ome.fillConfig(factory.CreateDefaultConfig().(*opencensusexporter.Config)) + exp, err := factory.CreateMetricsExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + ome.MetricsConsumer = exp + return exp.Start(context.Background(), ome) +} + +type otlpHTTPDataSender struct { + DataSenderBase +} + +func (ods *otlpHTTPDataSender) fillConfig(cfg *otlphttpexporter.Config) *otlphttpexporter.Config { + cfg.Endpoint = fmt.Sprintf("http://%s", ods.GetEndpoint()) + // Disable retries, we should push data and if error just log it. + cfg.RetrySettings.Enabled = false + // Disable sending queue, we should push data from the caller goroutine. + cfg.QueueSettings.Enabled = false + cfg.TLSSetting = configtls.TLSClientSetting{ + Insecure: true, + } + return cfg +} + +func (ods *otlpHTTPDataSender) GenConfigYAMLStr() string { + // Note that this generates a receiver config for agent. + return fmt.Sprintf(` + otlp: + protocols: + http: + endpoint: "%s"`, ods.GetEndpoint()) +} + +func (ods *otlpHTTPDataSender) ProtocolName() string { + return "otlp" +} + +// OTLPHTTPTraceDataSender implements TraceDataSender for OTLP/HTTP trace exporterType. +type OTLPHTTPTraceDataSender struct { + otlpHTTPDataSender + consumer.TracesConsumer +} + +// Ensure OTLPHTTPTraceDataSender implements TraceDataSender. +var _ TraceDataSender = (*OTLPHTTPTraceDataSender)(nil) + +// NewOTLPHTTPTraceDataSender creates a new TraceDataSender for OTLP/HTTP traces exporterType. +func NewOTLPHTTPTraceDataSender(host string, port int) *OTLPHTTPTraceDataSender { + return &OTLPHTTPTraceDataSender{ + otlpHTTPDataSender: otlpHTTPDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + }, + } +} + +func (ote *OTLPHTTPTraceDataSender) Start() error { + factory := otlphttpexporter.NewFactory() + cfg := ote.fillConfig(factory.CreateDefaultConfig().(*otlphttpexporter.Config)) + exp, err := factory.CreateTracesExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + ote.TracesConsumer = exp + return exp.Start(context.Background(), ote) +} + +// OTLPHTTPMetricsDataSender implements MetricDataSender for OTLP/HTTP metrics exporterType. +type OTLPHTTPMetricsDataSender struct { + otlpHTTPDataSender + consumer.MetricsConsumer +} + +// Ensure OTLPHTTPMetricsDataSender implements MetricDataSender. +var _ MetricDataSender = (*OTLPHTTPMetricsDataSender)(nil) + +// NewOTLPHTTPMetricDataSender creates a new OTLP/HTTP metric exporterType sender that will send +// to the specified port after Start is called. +func NewOTLPHTTPMetricDataSender(host string, port int) *OTLPHTTPMetricsDataSender { + return &OTLPHTTPMetricsDataSender{ + otlpHTTPDataSender: otlpHTTPDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + }, + } +} + +func (ome *OTLPHTTPMetricsDataSender) Start() error { + factory := otlphttpexporter.NewFactory() + cfg := ome.fillConfig(factory.CreateDefaultConfig().(*otlphttpexporter.Config)) + exp, err := factory.CreateMetricsExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + ome.MetricsConsumer = exp + return exp.Start(context.Background(), ome) +} + +// OTLPHTTPLogsDataSender implements LogsDataSender for OTLP/HTTP logs exporterType. +type OTLPHTTPLogsDataSender struct { + otlpHTTPDataSender + consumer.LogsConsumer +} + +// Ensure OTLPHTTPLogsDataSender implements MetricDataSender. +var _ LogDataSender = (*OTLPHTTPLogsDataSender)(nil) + +// NewOTLPMetricDataSender creates a new OTLP/HTTP metric exporterType sender that will send +// to the specified port after Start is called. +func NewOTLPHTTPLogsDataSender(host string, port int) *OTLPHTTPLogsDataSender { + return &OTLPHTTPLogsDataSender{ + otlpHTTPDataSender: otlpHTTPDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + }, + } +} + +func (olds *OTLPHTTPLogsDataSender) Start() error { + factory := otlphttpexporter.NewFactory() + cfg := olds.fillConfig(factory.CreateDefaultConfig().(*otlphttpexporter.Config)) + exp, err := factory.CreateLogsExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + olds.LogsConsumer = exp + return exp.Start(context.Background(), olds) +} + +type otlpDataSender struct { + DataSenderBase +} + +func (ods *otlpDataSender) fillConfig(cfg *otlpexporter.Config) *otlpexporter.Config { + cfg.Endpoint = ods.GetEndpoint() + // Disable retries, we should push data and if error just log it. + cfg.RetrySettings.Enabled = false + // Disable sending queue, we should push data from the caller goroutine. + cfg.QueueSettings.Enabled = false + cfg.TLSSetting = configtls.TLSClientSetting{ + Insecure: true, + } + return cfg +} + +func (ods *otlpDataSender) GenConfigYAMLStr() string { + // Note that this generates a receiver config for agent. + return fmt.Sprintf(` + otlp: + protocols: + grpc: + endpoint: "%s"`, ods.GetEndpoint()) +} + +func (ods *otlpDataSender) ProtocolName() string { + return "otlp" +} + +// OTLPTraceDataSender implements TraceDataSender for OTLP trace exporterType. +type OTLPTraceDataSender struct { + otlpDataSender + consumer.TracesConsumer +} + +// Ensure OTLPTraceDataSender implements TraceDataSender. +var _ TraceDataSender = (*OTLPTraceDataSender)(nil) + +// NewOTLPTraceDataSender creates a new TraceDataSender for OTLP traces exporterType. +func NewOTLPTraceDataSender(host string, port int) *OTLPTraceDataSender { + return &OTLPTraceDataSender{ + otlpDataSender: otlpDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + }, + } +} + +func (ote *OTLPTraceDataSender) Start() error { + factory := otlpexporter.NewFactory() + cfg := ote.fillConfig(factory.CreateDefaultConfig().(*otlpexporter.Config)) + exp, err := factory.CreateTracesExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + ote.TracesConsumer = exp + return exp.Start(context.Background(), ote) +} + +// OTLPMetricsDataSender implements MetricDataSender for OTLP metrics exporterType. +type OTLPMetricsDataSender struct { + otlpDataSender + consumer.MetricsConsumer +} + +// Ensure OTLPMetricsDataSender implements MetricDataSender. +var _ MetricDataSender = (*OTLPMetricsDataSender)(nil) + +// NewOTLPMetricDataSender creates a new OTLP metric exporterType sender that will send +// to the specified port after Start is called. +func NewOTLPMetricDataSender(host string, port int) *OTLPMetricsDataSender { + return &OTLPMetricsDataSender{ + otlpDataSender: otlpDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + }, + } +} + +func (ome *OTLPMetricsDataSender) Start() error { + factory := otlpexporter.NewFactory() + cfg := ome.fillConfig(factory.CreateDefaultConfig().(*otlpexporter.Config)) + exp, err := factory.CreateMetricsExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + ome.MetricsConsumer = exp + return exp.Start(context.Background(), ome) +} + +// OTLPLogsDataSender implements LogsDataSender for OTLP logs exporterType. +type OTLPLogsDataSender struct { + otlpDataSender + consumer.LogsConsumer +} + +// Ensure OTLPLogsDataSender implements LogDataSender. +var _ LogDataSender = (*OTLPLogsDataSender)(nil) + +// NewOTLPMetricDataSender creates a new OTLP metric exporterType sender that will send +// to the specified port after Start is called. +func NewOTLPLogsDataSender(host string, port int) *OTLPLogsDataSender { + return &OTLPLogsDataSender{ + otlpDataSender: otlpDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + }, + } +} + +func (olds *OTLPLogsDataSender) Start() error { + factory := otlpexporter.NewFactory() + cfg := olds.fillConfig(factory.CreateDefaultConfig().(*otlpexporter.Config)) + exp, err := factory.CreateLogsExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + olds.LogsConsumer = exp + return exp.Start(context.Background(), olds) +} + +// ZipkinDataSender implements TraceDataSender for Zipkin http exporterType. +type ZipkinDataSender struct { + DataSenderBase + consumer.TracesConsumer +} + +// Ensure ZipkinDataSender implements TraceDataSender. +var _ TraceDataSender = (*ZipkinDataSender)(nil) + +// NewZipkinDataSender creates a new Zipkin exporterType sender that will send +// to the specified port after Start is called. +func NewZipkinDataSender(host string, port int) *ZipkinDataSender { + return &ZipkinDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + } +} + +func (zs *ZipkinDataSender) Start() error { + factory := zipkinexporter.NewFactory() + cfg := factory.CreateDefaultConfig().(*zipkinexporter.Config) + cfg.Endpoint = fmt.Sprintf("http://%s/api/v2/spans", zs.GetEndpoint()) + // Disable retries, we should push data and if error just log it. + cfg.RetrySettings.Enabled = false + // Disable sending queue, we should push data from the caller goroutine. + cfg.QueueSettings.Enabled = false + + exp, err := factory.CreateTracesExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + zs.TracesConsumer = exp + return exp.Start(context.Background(), zs) +} + +func (zs *ZipkinDataSender) GenConfigYAMLStr() string { + return fmt.Sprintf(` + zipkin: + endpoint: %s`, zs.GetEndpoint()) +} + +func (zs *ZipkinDataSender) ProtocolName() string { + return "zipkin" +} + +// prometheus + +type PrometheusDataSender struct { + DataSenderBase + consumer.MetricsConsumer + namespace string +} + +var _ MetricDataSender = (*PrometheusDataSender)(nil) + +func NewPrometheusDataSender(host string, port int) *PrometheusDataSender { + return &PrometheusDataSender{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + } +} + +func (pds *PrometheusDataSender) Start() error { + factory := prometheusexporter.NewFactory() + cfg := factory.CreateDefaultConfig().(*prometheusexporter.Config) + cfg.Endpoint = pds.GetEndpoint() + cfg.Namespace = pds.namespace + + exp, err := factory.CreateMetricsExporter(context.Background(), defaultExporterParams(), cfg) + if err != nil { + return err + } + + pds.MetricsConsumer = exp + return exp.Start(context.Background(), pds) +} + +func (pds *PrometheusDataSender) GenConfigYAMLStr() string { + format := ` + prometheus: + config: + scrape_configs: + - job_name: 'testbed' + scrape_interval: 100ms + static_configs: + - targets: ['%s'] +` + return fmt.Sprintf(format, pds.GetEndpoint()) +} + +func (pds *PrometheusDataSender) ProtocolName() string { + return "prometheus" +} + +type FluentBitFileLogWriter struct { + DataSenderBase + file *os.File + parsersFile *os.File +} + +// Ensure FluentBitFileLogWriter implements LogDataSender. +var _ LogDataSender = (*FluentBitFileLogWriter)(nil) + +// NewFluentBitFileLogWriter creates a new data sender that will write log entries to a +// file, to be tailed by FluentBit and sent to the collector. +func NewFluentBitFileLogWriter(host string, port int) *FluentBitFileLogWriter { + file, err := ioutil.TempFile("", "perf-logs.json") + if err != nil { + panic("failed to create temp file") + } + + parsersFile, err := ioutil.TempFile("", "parsers.json") + if err != nil { + panic("failed to create temp file") + } + + f := &FluentBitFileLogWriter{ + DataSenderBase: DataSenderBase{ + Port: port, + Host: host, + }, + file: file, + parsersFile: parsersFile, + } + f.setupParsers() + return f +} + +func (f *FluentBitFileLogWriter) Start() error { + return nil +} + +func (f *FluentBitFileLogWriter) setupParsers() { + _, err := f.parsersFile.Write([]byte(` +[PARSER] + Name json + Format json + Time_Key time + Time_Format %d/%m/%Y:%H:%M:%S %z +`)) + if err != nil { + panic("failed to write parsers") + } + + f.parsersFile.Close() +} + +func (f *FluentBitFileLogWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) error { + for i := 0; i < logs.ResourceLogs().Len(); i++ { + for j := 0; j < logs.ResourceLogs().At(i).InstrumentationLibraryLogs().Len(); j++ { + ills := logs.ResourceLogs().At(i).InstrumentationLibraryLogs().At(j) + for k := 0; k < ills.Logs().Len(); k++ { + _, err := f.file.Write(append(f.convertLogToJSON(ills.Logs().At(k)), '\n')) + if err != nil { + return err + } + } + } + } + return nil +} + +func (f *FluentBitFileLogWriter) convertLogToJSON(lr pdata.LogRecord) []byte { + rec := map[string]string{ + "time": time.Unix(0, int64(lr.Timestamp())).Format("02/01/2006:15:04:05Z"), + } + rec["log"] = lr.Body().StringVal() + + lr.Attributes().ForEach(func(k string, v pdata.AttributeValue) { + switch v.Type() { + case pdata.AttributeValueSTRING: + rec[k] = v.StringVal() + case pdata.AttributeValueINT: + rec[k] = strconv.FormatInt(v.IntVal(), 10) + case pdata.AttributeValueDOUBLE: + rec[k] = strconv.FormatFloat(v.DoubleVal(), 'f', -1, 64) + case pdata.AttributeValueBOOL: + rec[k] = strconv.FormatBool(v.BoolVal()) + default: + panic("missing case") + } + }) + b, err := json.Marshal(rec) + if err != nil { + panic("failed to write log: " + err.Error()) + } + return b +} + +func (f *FluentBitFileLogWriter) Flush() { + _ = f.file.Sync() +} + +func (f *FluentBitFileLogWriter) GenConfigYAMLStr() string { + // Note that this generates a receiver config for agent. + return fmt.Sprintf(` + fluentforward: + endpoint: "%s"`, f.GetEndpoint()) +} + +func (f *FluentBitFileLogWriter) Extensions() map[string]string { + return map[string]string{ + "fluentbit": fmt.Sprintf(` + fluentbit: + executable_path: fluent-bit + tcp_endpoint: "%s" + config: | + [SERVICE] + parsers_file %s + [INPUT] + Name tail + parser json + path %s +`, f.GetEndpoint(), f.parsersFile.Name(), f.file.Name()), + } +} + +func (f *FluentBitFileLogWriter) ProtocolName() string { + return "fluentforward" +} + +func defaultExporterParams() component.ExporterCreateParams { + return component.ExporterCreateParams{Logger: zap.L()} +} diff --git a/internal/otel_collector/testbed/testbed/test_bed.go b/internal/otel_collector/testbed/testbed/test_bed.go new file mode 100644 index 00000000000..e10f5710b6f --- /dev/null +++ b/internal/otel_collector/testbed/testbed/test_bed.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testbed allows to easily set up a test that requires running the agent +// and a load generator, measure and define resource consumption expectations +// for the agent, fail tests automatically when expectations are exceeded. +// +// Each test case requires a agent configuration file and (optionally) load +// generator spec file. Test cases are defined as regular Go tests. +// +// Agent and load generator must be pre-built and their paths must be specified in +// test bed config file. RUN_TESTBED env variable must be defined for tests to run. +package testbed + +import ( + "log" + "os" + "path/filepath" + "testing" +) + +func Start(resultsSummary TestResultsSummary) error { + dir, err := filepath.Abs("results") + if err != nil { + log.Fatalf(err.Error()) + } + resultsSummary.Init(dir) + + return err +} + +func SaveResults(resultsSummary TestResultsSummary) { + resultsSummary.Save() +} + +const testBedEnableEnvVarName = "RUN_TESTBED" + +var GlobalConfig = struct { + // Relative path to default agent executable to test. + // Can be set in the contrib repo to use a different executable name. + // Set this before calling DoTestMain(). + // + // If used in the path, {{.GOOS}} and {{.GOARCH}} will be expanded to the current + // OS and ARCH correspondingly. + // + // Individual tests can override this by setting the AgentExePath of ChildProcess + // that is passed to the TestCase. + DefaultAgentExeRelativeFile string +}{ + // The default exe that is produced by Makefile "otelcol" target relative + // to testbed/tests directory. + DefaultAgentExeRelativeFile: "../../bin/otelcol_{{.GOOS}}_{{.GOARCH}}", +} + +// DoTestMain is intended to be run from TestMain somewhere in the test suit. +// This enables the testbed. +func DoTestMain(m *testing.M, resultsSummary TestResultsSummary) { + testBedConfigFile := os.Getenv(testBedEnableEnvVarName) + if testBedConfigFile == "" { + log.Printf(testBedEnableEnvVarName + " is not defined, skipping E2E tests.") + os.Exit(0) + } + + // Load the test bed config first. + err := Start(resultsSummary) + + if err != nil { + log.Fatalf(err.Error()) + os.Exit(0) + } + + res := m.Run() + + SaveResults(resultsSummary) + + // Now run all tests. + os.Exit(res) +} diff --git a/internal/otel_collector/testbed/testbed/test_case.go b/internal/otel_collector/testbed/testbed/test_case.go new file mode 100644 index 00000000000..8ce5577f8ab --- /dev/null +++ b/internal/otel_collector/testbed/testbed/test_case.go @@ -0,0 +1,355 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "log" + "net" + "os" + "path" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestCase defines a running test case. +type TestCase struct { + t *testing.T + + // Directory where test case results and logs will be written. + resultDir string + + // does not write out results when set to true + skipResults bool + + // Agent config file path. + agentConfigFile string + + // Load generator spec file path. + // loadSpecFile string + + // Resource spec for agent. + resourceSpec ResourceSpec + + // Agent process. + agentProc OtelcolRunner + + Sender DataSender + Receiver DataReceiver + + LoadGenerator *LoadGenerator + MockBackend *MockBackend + validator TestCaseValidator + + startTime time.Time + + // ErrorSignal indicates an error in the test case execution, e.g. process execution + // failure or exceeding resource consumption, etc. The actual error message is already + // logged, this is only an indicator on which you can wait to be informed. + ErrorSignal chan struct{} + + // Duration is the requested duration of the tests. Configured via TESTBED_DURATION + // env variable and defaults to 15 seconds if env variable is unspecified. + Duration time.Duration + + doneSignal chan struct{} + + errorCause string + + resultsSummary TestResultsSummary +} + +const mibibyte = 1024 * 1024 +const testcaseDurationVar = "TESTCASE_DURATION" + +// NewTestCase creates a new TestCase. It expects agent-config.yaml in the specified directory. +func NewTestCase( + t *testing.T, + dataProvider DataProvider, + sender DataSender, + receiver DataReceiver, + agentProc OtelcolRunner, + validator TestCaseValidator, + resultsSummary TestResultsSummary, + opts ...TestCaseOption, +) *TestCase { + tc := TestCase{} + + tc.t = t + tc.ErrorSignal = make(chan struct{}) + tc.doneSignal = make(chan struct{}) + tc.startTime = time.Now() + tc.Sender = sender + tc.Receiver = receiver + tc.agentProc = agentProc + tc.validator = validator + tc.resultsSummary = resultsSummary + + // Get requested test case duration from env variable. + duration := os.Getenv(testcaseDurationVar) + if duration == "" { + duration = "15s" + } + var err error + tc.Duration, err = time.ParseDuration(duration) + if err != nil { + log.Fatalf("Invalid "+testcaseDurationVar+": %v. Expecting a valid duration string.", duration) + } + + // Apply all provided options. + for _, opt := range opts { + opt.Apply(&tc) + } + + // Prepare directory for results. + tc.resultDir, err = filepath.Abs(path.Join("results", t.Name())) + require.NoErrorf(t, err, "Cannot resolve %s", t.Name()) + require.NoErrorf(t, os.MkdirAll(tc.resultDir, os.ModePerm), "Cannot create directory %s", tc.resultDir) + + // Set default resource check period. + tc.resourceSpec.ResourceCheckPeriod = 3 * time.Second + if tc.Duration < tc.resourceSpec.ResourceCheckPeriod { + // Resource check period should not be longer than entire test duration. + tc.resourceSpec.ResourceCheckPeriod = tc.Duration + } + + tc.LoadGenerator, err = NewLoadGenerator(dataProvider, sender) + require.NoError(t, err, "Cannot create generator") + + tc.MockBackend = NewMockBackend(tc.composeTestResultFileName("backend.log"), receiver) + + go tc.logStats() + + return &tc +} + +func (tc *TestCase) composeTestResultFileName(fileName string) string { + fileName, err := filepath.Abs(path.Join(tc.resultDir, fileName)) + require.NoError(tc.t, err, "Cannot resolve %s", fileName) + return fileName +} + +// SetResourceLimits sets expected limits for resource consmption. +// Error is signaled if consumption during ResourceCheckPeriod exceeds the limits. +// Limits are modified only for non-zero fields of resourceSpec, all zero-value fields +// fo resourceSpec are ignored and their previous values remain in effect. +func (tc *TestCase) SetResourceLimits(resourceSpec ResourceSpec) { + if resourceSpec.ExpectedMaxCPU > 0 { + tc.resourceSpec.ExpectedMaxCPU = resourceSpec.ExpectedMaxCPU + } + if resourceSpec.ExpectedMaxRAM > 0 { + tc.resourceSpec.ExpectedMaxRAM = resourceSpec.ExpectedMaxRAM + } + if resourceSpec.ResourceCheckPeriod > 0 { + tc.resourceSpec.ResourceCheckPeriod = resourceSpec.ResourceCheckPeriod + } +} + +// StartAgent starts the agent and redirects its standard output and standard error +// to "agent.log" file located in the test directory. +func (tc *TestCase) StartAgent(args ...string) { + if tc.agentConfigFile != "" { + args = append(args, "--config") + args = append(args, tc.agentConfigFile) + } + logFileName := tc.composeTestResultFileName("agent.log") + + err := tc.agentProc.Start(StartParams{ + Name: "Agent", + LogFilePath: logFileName, + CmdArgs: args, + resourceSpec: &tc.resourceSpec, + }) + + if err != nil { + tc.indicateError(err) + return + } + + // Start watching resource consumption. + go func() { + err := tc.agentProc.WatchResourceConsumption() + if err != nil { + tc.indicateError(err) + } + }() + + endpoint := tc.LoadGenerator.sender.GetEndpoint() + if endpoint != "" { + // Wait for agent to start. We consider the agent started when we can + // connect to the port to which we intend to send load. We only do this + // if the endpoint is not-empty, i.e. the sender does use network (some senders + // like text log writers don't). + tc.WaitFor(func() bool { + _, err := net.Dial("tcp", tc.LoadGenerator.sender.GetEndpoint()) + return err == nil + }) + } +} + +// StopAgent stops agent process. +func (tc *TestCase) StopAgent() { + tc.agentProc.Stop() +} + +// StartLoad starts the load generator and redirects its standard output and standard error +// to "load-generator.log" file located in the test directory. +func (tc *TestCase) StartLoad(options LoadOptions) { + tc.LoadGenerator.Start(options) +} + +// StopLoad stops load generator. +func (tc *TestCase) StopLoad() { + tc.LoadGenerator.Stop() +} + +// StartBackend starts the specified backend type. +func (tc *TestCase) StartBackend() { + require.NoError(tc.t, tc.MockBackend.Start(), "Cannot start backend") +} + +// StopBackend stops the backend. +func (tc *TestCase) StopBackend() { + tc.MockBackend.Stop() +} + +// EnableRecording enables recording of all data received by MockBackend. +func (tc *TestCase) EnableRecording() { + tc.MockBackend.EnableRecording() +} + +// AgentMemoryInfo returns raw memory info struct about the agent +// as returned by github.com/shirou/gopsutil/process +func (tc *TestCase) AgentMemoryInfo() (uint32, uint32, error) { + stat, err := tc.agentProc.GetProcessMon().MemoryInfo() + if err != nil { + return 0, 0, err + } + return uint32(stat.RSS / mibibyte), uint32(stat.VMS / mibibyte), nil +} + +// Stop stops the load generator, the agent and the backend. +func (tc *TestCase) Stop() { + // Stop all components + tc.StopLoad() + tc.StopAgent() + tc.StopBackend() + + // Stop logging + close(tc.doneSignal) + + if tc.skipResults { + return + } + + // Report test results + tc.validator.RecordResults(tc) +} + +// ValidateData validates data received by mock backend against what was generated and sent to the collector +// instance(s) under test by the LoadGenerator. +func (tc *TestCase) ValidateData() { + select { + case <-tc.ErrorSignal: + // Error is already signaled and recorded. Validating data is pointless. + return + default: + } + + tc.validator.Validate(tc) +} + +// Sleep for specified duration or until error is signaled. +func (tc *TestCase) Sleep(d time.Duration) { + select { + case <-time.After(d): + case <-tc.ErrorSignal: + } +} + +// WaitForN the specific condition for up to a specified duration. Records a test error +// if time is out and condition does not become true. If error is signaled +// while waiting the function will return false, but will not record additional +// test error (we assume that signaled error is already recorded in indicateError()). +func (tc *TestCase) WaitForN(cond func() bool, duration time.Duration, errMsg ...interface{}) bool { + startTime := time.Now() + + // Start with 5 ms waiting interval between condition re-evaluation. + waitInterval := time.Millisecond * 5 + + for { + if cond() { + return true + } + + select { + case <-time.After(waitInterval): + case <-tc.ErrorSignal: + return false + } + + // Increase waiting interval exponentially up to 500 ms. + if waitInterval < time.Millisecond*500 { + waitInterval *= 2 + } + + if time.Since(startTime) > duration { + // Waited too long + tc.t.Error("Time out waiting for", errMsg) + return false + } + } +} + +// WaitFor is like WaitForN but with a fixed duration of 10 seconds +func (tc *TestCase) WaitFor(cond func() bool, errMsg ...interface{}) bool { + return tc.WaitForN(cond, time.Second*10, errMsg...) +} + +func (tc *TestCase) indicateError(err error) { + // Print to log for visibility + log.Print(err.Error()) + + // Indicate error for the test + tc.t.Error(err.Error()) + + tc.errorCause = err.Error() + + // Signal the error via channel + close(tc.ErrorSignal) +} + +func (tc *TestCase) logStats() { + t := time.NewTicker(tc.resourceSpec.ResourceCheckPeriod) + defer t.Stop() + + for { + select { + case <-t.C: + tc.logStatsOnce() + case <-tc.doneSignal: + return + } + } +} + +func (tc *TestCase) logStatsOnce() { + log.Printf("%s | %s | %s", + tc.agentProc.GetResourceConsumption(), + tc.LoadGenerator.GetStats(), + tc.MockBackend.GetStats()) +} diff --git a/internal/otel_collector/testbed/testbed/utils.go b/internal/otel_collector/testbed/testbed/utils.go new file mode 100644 index 00000000000..160a2559b9d --- /dev/null +++ b/internal/otel_collector/testbed/testbed/utils.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "testing" + + "go.opentelemetry.io/collector/testutil" +) + +func GetAvailablePort(t *testing.T) int { + return int(testutil.GetAvailablePort(t)) +} diff --git a/internal/otel_collector/testbed/testbed/validator.go b/internal/otel_collector/testbed/testbed/validator.go new file mode 100644 index 00000000000..b18b14e208e --- /dev/null +++ b/internal/otel_collector/testbed/testbed/validator.go @@ -0,0 +1,618 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strings" + "time" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" +) + +// TestCaseValidator defines the interface for validating and reporting test results. +type TestCaseValidator interface { + // Validate executes validation routines and test assertions. + Validate(tc *TestCase) + // RecordResults updates the TestResultsSummary for the test suite with results of a single test. + RecordResults(tc *TestCase) +} + +// PerfTestValidator implements TestCaseValidator for test suites using PerformanceResults for summarizing results. +type PerfTestValidator struct { +} + +func (v *PerfTestValidator) Validate(tc *TestCase) { + if assert.EqualValues(tc.t, tc.LoadGenerator.DataItemsSent(), tc.MockBackend.DataItemsReceived(), + "Received and sent counters do not match.") { + log.Printf("Sent and received data matches.") + } +} + +func (v *PerfTestValidator) RecordResults(tc *TestCase) { + rc := tc.agentProc.GetTotalConsumption() + + var result string + if tc.t.Failed() { + result = "FAIL" + } else { + result = "PASS" + } + + // Remove "Test" prefix from test name. + testName := tc.t.Name()[4:] + + tc.resultsSummary.Add(tc.t.Name(), &PerformanceTestResult{ + testName: testName, + result: result, + receivedSpanCount: tc.MockBackend.DataItemsReceived(), + sentSpanCount: tc.LoadGenerator.DataItemsSent(), + duration: time.Since(tc.startTime), + cpuPercentageAvg: rc.CPUPercentAvg, + cpuPercentageMax: rc.CPUPercentMax, + ramMibAvg: rc.RAMMiBAvg, + ramMibMax: rc.RAMMiBMax, + errorCause: tc.errorCause, + }) +} + +// CorrectnessTestValidator implements TestCaseValidator for test suites using CorrectnessResults for summarizing results. +type CorrectnessTestValidator struct { + dataProvider DataProvider + assertionFailures []*TraceAssertionFailure +} + +func NewCorrectTestValidator(provider DataProvider) *CorrectnessTestValidator { + return &CorrectnessTestValidator{ + dataProvider: provider, + assertionFailures: make([]*TraceAssertionFailure, 0), + } +} + +func (v *CorrectnessTestValidator) Validate(tc *TestCase) { + if assert.EqualValues(tc.t, tc.LoadGenerator.DataItemsSent(), tc.MockBackend.DataItemsReceived(), + "Received and sent counters do not match.") { + log.Printf("Sent and received data counters match.") + } + if len(tc.MockBackend.ReceivedTraces) > 0 { + v.assertSentRecdTracingDataEqual(tc.MockBackend.ReceivedTraces) + } + assert.EqualValues(tc.t, 0, len(v.assertionFailures), "There are span data mismatches.") +} + +func (v *CorrectnessTestValidator) RecordResults(tc *TestCase) { + var result string + if tc.t.Failed() { + result = "FAIL" + } else { + result = "PASS" + } + + // Remove "Test" prefix from test name. + testName := tc.t.Name()[4:] + tc.resultsSummary.Add(tc.t.Name(), &CorrectnessTestResult{ + testName: testName, + result: result, + duration: time.Since(tc.startTime), + receivedSpanCount: tc.MockBackend.DataItemsReceived(), + sentSpanCount: tc.LoadGenerator.DataItemsSent(), + traceAssertionFailureCount: uint64(len(v.assertionFailures)), + traceAssertionFailures: v.assertionFailures, + }) +} + +func (v *CorrectnessTestValidator) assertSentRecdTracingDataEqual(tracesList []pdata.Traces) { + for _, td := range tracesList { + resourceSpansList := pdata.TracesToOtlp(td) + for _, rs := range resourceSpansList { + for _, ils := range rs.InstrumentationLibrarySpans { + for _, recdSpan := range ils.Spans { + sentSpan := v.dataProvider.GetGeneratedSpan(pdata.TraceID(recdSpan.TraceId), pdata.SpanID(recdSpan.SpanId)) + v.diffSpan(sentSpan, recdSpan) + } + } + } + + } +} + +func (v *CorrectnessTestValidator) diffSpan(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if sentSpan == nil { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: recdSpan.Name, + } + v.assertionFailures = append(v.assertionFailures, af) + return + } + v.diffSpanTraceID(sentSpan, recdSpan) + v.diffSpanSpanID(sentSpan, recdSpan) + v.diffSpanTraceState(sentSpan, recdSpan) + v.diffSpanParentSpanID(sentSpan, recdSpan) + v.diffSpanName(sentSpan, recdSpan) + v.diffSpanKind(sentSpan, recdSpan) + v.diffSpanTimestamps(sentSpan, recdSpan) + v.diffSpanAttributes(sentSpan, recdSpan) + v.diffSpanEvents(sentSpan, recdSpan) + v.diffSpanLinks(sentSpan, recdSpan) + v.diffSpanStatus(sentSpan, recdSpan) +} + +func (v *CorrectnessTestValidator) diffSpanTraceID(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if sentSpan.TraceId.HexString() != recdSpan.TraceId.HexString() { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "TraceId", + expectedValue: sentSpan.TraceId.HexString(), + actualValue: recdSpan.TraceId.HexString(), + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanSpanID(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if sentSpan.SpanId.HexString() != recdSpan.SpanId.HexString() { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "SpanId", + expectedValue: sentSpan.SpanId.HexString(), + actualValue: recdSpan.SpanId.HexString(), + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanTraceState(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if sentSpan.TraceState != recdSpan.TraceState { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "TraceState", + expectedValue: sentSpan.TraceState, + actualValue: recdSpan.TraceState, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanParentSpanID(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if sentSpan.ParentSpanId.HexString() != recdSpan.ParentSpanId.HexString() { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "ParentSpanId", + expectedValue: sentSpan.ParentSpanId.HexString(), + actualValue: recdSpan.ParentSpanId.HexString(), + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanName(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + // Because of https://github.com/openzipkin/zipkin-go/pull/166 compare lower cases. + if !strings.EqualFold(sentSpan.Name, recdSpan.Name) { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "Name", + expectedValue: sentSpan.Name, + actualValue: recdSpan.Name, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanKind(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if sentSpan.Kind != recdSpan.Kind { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "Kind", + expectedValue: sentSpan.Kind, + actualValue: recdSpan.Kind, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanTimestamps(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if notWithinOneMillisecond(sentSpan.StartTimeUnixNano, recdSpan.StartTimeUnixNano) { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "StartTimeUnixNano", + expectedValue: sentSpan.StartTimeUnixNano, + actualValue: recdSpan.StartTimeUnixNano, + } + v.assertionFailures = append(v.assertionFailures, af) + } + if notWithinOneMillisecond(sentSpan.EndTimeUnixNano, recdSpan.EndTimeUnixNano) { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "EndTimeUnixNano", + expectedValue: sentSpan.EndTimeUnixNano, + actualValue: recdSpan.EndTimeUnixNano, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanAttributes(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if len(sentSpan.Attributes) != len(recdSpan.Attributes) { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "Attributes", + expectedValue: len(sentSpan.Attributes), + actualValue: len(recdSpan.Attributes), + } + v.assertionFailures = append(v.assertionFailures, af) + } else { + sentAttrs := sentSpan.Attributes + recdAttrs := recdSpan.Attributes + v.diffAttributesSlice(sentSpan.Name, recdAttrs, sentAttrs, "Attributes[%s]") + } + if sentSpan.DroppedAttributesCount != recdSpan.DroppedAttributesCount { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "DroppedAttributesCount", + expectedValue: sentSpan.DroppedAttributesCount, + actualValue: recdSpan.DroppedAttributesCount, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanEvents(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if len(sentSpan.Events) != len(recdSpan.Events) { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "Events", + expectedValue: len(sentSpan.Events), + actualValue: len(recdSpan.Events), + } + v.assertionFailures = append(v.assertionFailures, af) + } else { + sentEventMap := convertEventsSliceToMap(sentSpan.Events) + recdEventMap := convertEventsSliceToMap(recdSpan.Events) + for name, sentEvents := range sentEventMap { + recdEvents, match := recdEventMap[name] + if match { + match = len(sentEvents) == len(recdEvents) + } + if !match { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: fmt.Sprintf("Events[%s]", name), + expectedValue: len(sentEvents), + actualValue: len(recdEvents), + } + v.assertionFailures = append(v.assertionFailures, af) + } else { + for i, sentEvent := range sentEvents { + recdEvent := recdEvents[i] + if notWithinOneMillisecond(sentEvent.TimeUnixNano, recdEvent.TimeUnixNano) { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: fmt.Sprintf("Events[%s].TimeUnixNano", name), + expectedValue: sentEvent.TimeUnixNano, + actualValue: recdEvent.TimeUnixNano, + } + v.assertionFailures = append(v.assertionFailures, af) + } + v.diffAttributesSlice(sentSpan.Name, sentEvent.Attributes, recdEvent.Attributes, + "Events["+name+"].Attributes[%s]") + } + } + } + } + if sentSpan.DroppedEventsCount != recdSpan.DroppedEventsCount { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "DroppedEventsCount", + expectedValue: sentSpan.DroppedEventsCount, + actualValue: recdSpan.DroppedEventsCount, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanLinks(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if len(sentSpan.Links) != len(recdSpan.Links) { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "Links", + expectedValue: len(sentSpan.Links), + actualValue: len(recdSpan.Links), + } + v.assertionFailures = append(v.assertionFailures, af) + } else { + recdLinksMap := convertLinksSliceToMap(recdSpan.Links) + for i, sentLink := range sentSpan.Links { + spanID := sentLink.SpanId.HexString() + recdLink, ok := recdLinksMap[spanID] + if ok { + v.diffAttributesSlice(sentSpan.Name, sentLink.Attributes, recdLink.Attributes, + "Links["+spanID+"].Attributes[%s]") + } else { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: fmt.Sprintf("Links[%d]", i), + expectedValue: spanID, + actualValue: "", + } + v.assertionFailures = append(v.assertionFailures, af) + } + + } + } + if sentSpan.DroppedLinksCount != recdSpan.DroppedLinksCount { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "DroppedLinksCount", + expectedValue: sentSpan.DroppedLinksCount, + actualValue: recdSpan.DroppedLinksCount, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffSpanStatus(sentSpan *otlptrace.Span, recdSpan *otlptrace.Span) { + if sentSpan.Status.Code != recdSpan.Status.Code { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: sentSpan.Name, + fieldPath: "Status.Code", + expectedValue: sentSpan.Status.Code, + actualValue: recdSpan.Status.Code, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) diffAttributesSlice(spanName string, recdAttrs []otlpcommon.KeyValue, + sentAttrs []otlpcommon.KeyValue, fmtStr string) { + recdAttrsMap := convertAttributesSliceToMap(recdAttrs) + for _, sentAttr := range sentAttrs { + recdAttr, ok := recdAttrsMap[sentAttr.Key] + if ok { + sentVal := retrieveAttributeValue(sentAttr) + recdVal := retrieveAttributeValue(recdAttr) + switch val := sentVal.(type) { + case *otlpcommon.KeyValueList: + v.compareKeyValueList(spanName, val, recdVal, fmtStr, sentAttr.Key) + case *otlpcommon.ArrayValue: + v.compareArrayList(spanName, val, recdVal, fmtStr, sentAttr.Key) + default: + v.compareSimpleValues(spanName, sentVal, recdVal, fmtStr, sentAttr.Key) + } + + } else { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: spanName, + fieldPath: fmt.Sprintf("Attributes[%s]", sentAttr.Key), + expectedValue: retrieveAttributeValue(sentAttr), + actualValue: nil, + } + v.assertionFailures = append(v.assertionFailures, af) + } + } +} + +func (v *CorrectnessTestValidator) compareSimpleValues(spanName string, sentVal interface{}, recdVal interface{}, + fmtStr string, attrKey string) { + if !reflect.DeepEqual(sentVal, recdVal) { + sentStr := fmt.Sprintf("%v", sentVal) + recdStr := fmt.Sprintf("%v", recdVal) + if !strings.EqualFold(sentStr, recdStr) { + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: spanName, + fieldPath: fmt.Sprintf(fmtStr, attrKey), + expectedValue: sentVal, + actualValue: recdVal, + } + v.assertionFailures = append(v.assertionFailures, af) + } + } +} + +func (v *CorrectnessTestValidator) compareKeyValueList(spanName string, sentKVList *otlpcommon.KeyValueList, + recdVal interface{}, fmtStr string, attrKey string) { + switch val := recdVal.(type) { + case *otlpcommon.KeyValueList: + v.diffAttributesSlice(spanName, val.Values, sentKVList.Values, fmtStr) + case string: + jsonStr := convertKVListToJSONString(sentKVList.Values) + v.compareSimpleValues(spanName, jsonStr, val, fmtStr, attrKey) + default: + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: spanName, + fieldPath: fmt.Sprintf(fmtStr, attrKey), + expectedValue: sentKVList, + actualValue: recdVal, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func (v *CorrectnessTestValidator) compareArrayList(spanName string, sentArray *otlpcommon.ArrayValue, + recdVal interface{}, fmtStr string, attrKey string) { + switch val := recdVal.(type) { + case *otlpcommon.ArrayValue: + v.compareSimpleValues(spanName, sentArray.Values, val.Values, fmtStr, attrKey) + case string: + jsonStr := convertArrayValuesToJSONString(sentArray.Values) + v.compareSimpleValues(spanName, jsonStr, val, fmtStr, attrKey) + default: + af := &TraceAssertionFailure{ + typeName: "Span", + dataComboName: spanName, + fieldPath: fmt.Sprintf(fmtStr, attrKey), + expectedValue: sentArray, + actualValue: recdVal, + } + v.assertionFailures = append(v.assertionFailures, af) + } +} + +func convertAttributesSliceToMap(attributes []otlpcommon.KeyValue) map[string]otlpcommon.KeyValue { + attrMap := make(map[string]otlpcommon.KeyValue) + for _, attr := range attributes { + attrMap[attr.Key] = attr + } + return attrMap +} + +func retrieveAttributeValue(attribute otlpcommon.KeyValue) interface{} { + if attribute.Value.Value == nil { + return nil + } + + var attrVal interface{} + switch val := attribute.Value.Value.(type) { + case *otlpcommon.AnyValue_StringValue: + // Because of https://github.com/openzipkin/zipkin-go/pull/166 compare lower cases. + attrVal = strings.ToLower(val.StringValue) + case *otlpcommon.AnyValue_IntValue: + attrVal = val.IntValue + case *otlpcommon.AnyValue_DoubleValue: + attrVal = val.DoubleValue + case *otlpcommon.AnyValue_BoolValue: + attrVal = val.BoolValue + case *otlpcommon.AnyValue_ArrayValue: + attrVal = val.ArrayValue + case *otlpcommon.AnyValue_KvlistValue: + attrVal = val.KvlistValue + default: + attrVal = nil + } + return attrVal +} + +func convertEventsSliceToMap(events []*otlptrace.Span_Event) map[string][]*otlptrace.Span_Event { + eventMap := make(map[string][]*otlptrace.Span_Event) + for _, event := range events { + evtSlice, ok := eventMap[event.Name] + if !ok { + evtSlice = make([]*otlptrace.Span_Event, 0) + } + eventMap[event.Name] = append(evtSlice, event) + } + for _, eventList := range eventMap { + sortEventsByTimestamp(eventList) + } + return eventMap +} + +func sortEventsByTimestamp(eventList []*otlptrace.Span_Event) { + sort.SliceStable(eventList, func(i, j int) bool { return eventList[i].TimeUnixNano < eventList[j].TimeUnixNano }) +} + +func convertLinksSliceToMap(links []*otlptrace.Span_Link) map[string]*otlptrace.Span_Link { + eventMap := make(map[string]*otlptrace.Span_Link) + for _, link := range links { + eventMap[link.SpanId.HexString()] = link + } + return eventMap +} + +func notWithinOneMillisecond(sentNs uint64, recdNs uint64) bool { + var diff uint64 + if sentNs > recdNs { + diff = sentNs - recdNs + } else { + diff = recdNs - sentNs + } + return diff > uint64(1100000) +} + +func convertKVListToJSONString(values []otlpcommon.KeyValue) string { + jsonStr, err := json.Marshal(convertKVListToRawMap(values)) + if err == nil { + return string(jsonStr) + } + return "" +} + +func convertArrayValuesToJSONString(values []otlpcommon.AnyValue) string { + jsonStr, err := json.Marshal(convertArrayValuesToRawSlice(values)) + if err == nil { + return string(jsonStr) + } + return "" +} + +func convertKVListToRawMap(values []otlpcommon.KeyValue) map[string]interface{} { + rawMap := make(map[string]interface{}) + for i := range values { + kv := &values[i] + switch val := kv.Value.GetValue().(type) { + case *otlpcommon.AnyValue_StringValue: + rawMap[kv.Key] = val.StringValue + case *otlpcommon.AnyValue_IntValue: + rawMap[kv.Key] = val.IntValue + case *otlpcommon.AnyValue_DoubleValue: + rawMap[kv.Key] = val.DoubleValue + case *otlpcommon.AnyValue_BoolValue: + rawMap[kv.Key] = val.BoolValue + case *otlpcommon.AnyValue_KvlistValue: + rawMap[kv.Key] = convertKVListToRawMap(val.KvlistValue.Values) + case *otlpcommon.AnyValue_ArrayValue: + rawMap[kv.Key] = convertArrayValuesToRawSlice(val.ArrayValue.Values) + default: + rawMap[kv.Key] = val + } + } + return rawMap +} + +func convertArrayValuesToRawSlice(values []otlpcommon.AnyValue) []interface{} { + rawSlice := make([]interface{}, 0, len(values)) + for _, v := range values { + switch val := v.GetValue().(type) { + case *otlpcommon.AnyValue_StringValue: + rawSlice = append(rawSlice, val.StringValue) + case *otlpcommon.AnyValue_IntValue: + rawSlice = append(rawSlice, val.IntValue) + case *otlpcommon.AnyValue_DoubleValue: + rawSlice = append(rawSlice, val.DoubleValue) + case *otlpcommon.AnyValue_BoolValue: + rawSlice = append(rawSlice, val.BoolValue) + } + } + return rawSlice +} diff --git a/internal/otel_collector/testbed/tests/.gitignore b/internal/otel_collector/testbed/tests/.gitignore new file mode 100644 index 00000000000..a61c5ef81e8 --- /dev/null +++ b/internal/otel_collector/testbed/tests/.gitignore @@ -0,0 +1,2 @@ +results/* +!results/BASELINE.md diff --git a/internal/otel_collector/testbed/tests/e2e_test.go b/internal/otel_collector/testbed/tests/e2e_test.go new file mode 100644 index 00000000000..5de48f08d6b --- /dev/null +++ b/internal/otel_collector/testbed/tests/e2e_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tests contains test cases. To run the tests go to tests directory and run: +// RUN_TESTBED=1 go test -v + +package tests + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/testbed/testbed" +) + +func TestIdleMode(t *testing.T) { + options := testbed.LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} + dataProvider := testbed.NewPerfTestDataProvider(options) + tc := testbed.NewTestCase( + t, + dataProvider, + testbed.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.DefaultJaegerPort), + testbed.NewOCDataReceiver(testbed.DefaultOCPort), + &testbed.ChildProcess{}, + &testbed.PerfTestValidator{}, + performanceResultsSummary, + ) + defer tc.Stop() + + tc.SetResourceLimits(testbed.ResourceSpec{ExpectedMaxCPU: 4, ExpectedMaxRAM: 50}) + tc.StartAgent() + + tc.Sleep(tc.Duration) +} + +func TestBallastMemory(t *testing.T) { + tests := []struct { + ballastSize uint32 + maxRSS uint32 + }{ + {100, 50}, + {500, 70}, + {1000, 100}, + } + + options := testbed.LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} + dataProvider := testbed.NewPerfTestDataProvider(options) + for _, test := range tests { + tc := testbed.NewTestCase( + t, + dataProvider, + testbed.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.DefaultJaegerPort), + testbed.NewOCDataReceiver(testbed.DefaultOCPort), + &testbed.ChildProcess{}, + &testbed.PerfTestValidator{}, + performanceResultsSummary, + testbed.WithSkipResults(), + ) + tc.SetResourceLimits(testbed.ResourceSpec{ExpectedMaxRAM: test.maxRSS}) + + tc.StartAgent("--mem-ballast-size-mib", strconv.Itoa(int(test.ballastSize))) + + var rss, vms uint32 + // It is possible that the process is not ready or the ballast code path + // is not hit immediately so we give the process up to a couple of seconds + // to fire up and setup ballast. 2 seconds is a long time for this case but + // it is short enough to not be annoying if the test fails repeatedly + tc.WaitForN(func() bool { + rss, vms, _ = tc.AgentMemoryInfo() + return vms > test.ballastSize + }, time.Second*2, "VMS must be greater than %d", test.ballastSize) + + assert.True(t, rss <= test.maxRSS, fmt.Sprintf("RSS must be less than or equal to %d", test.maxRSS)) + tc.Stop() + } +} diff --git a/internal/otel_collector/testbed/tests/log_test.go b/internal/otel_collector/testbed/tests/log_test.go new file mode 100644 index 00000000000..60b88f913fb --- /dev/null +++ b/internal/otel_collector/testbed/tests/log_test.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tests + +// This file contains Test functions which initiate the tests. The tests can be either +// coded in this file or use scenarios from perf_scenarios.go. + +import ( + "testing" + + "go.opentelemetry.io/collector/testbed/testbed" +) + +func TestLog10kDPS(t *testing.T) { + flw := testbed.NewFluentBitFileLogWriter(testbed.DefaultHost, testbed.GetAvailablePort(t)) + tests := []struct { + name string + sender testbed.DataSender + receiver testbed.DataReceiver + resourceSpec testbed.ResourceSpec + extensions map[string]string + }{ + { + name: "OTLP", + sender: testbed.NewOTLPLogsDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + receiver: testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 20, + ExpectedMaxRAM: 70, + }, + }, + { + name: "OTLP-HTTP", + sender: testbed.NewOTLPHTTPLogsDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + receiver: testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)), + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 30, + ExpectedMaxRAM: 70, + }, + }, + { + name: "FluentBitToOTLP", + sender: flw, + receiver: testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 50, + ExpectedMaxRAM: 150, + }, + extensions: flw.Extensions(), + }, + } + + processors := map[string]string{ + "batch": ` + batch: +`, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + Scenario10kItemsPerSecond( + t, + test.sender, + test.receiver, + test.resourceSpec, + performanceResultsSummary, + processors, + test.extensions, + ) + }) + } +} diff --git a/internal/otel_collector/testbed/tests/metric_test.go b/internal/otel_collector/testbed/tests/metric_test.go new file mode 100644 index 00000000000..c33ff5ba792 --- /dev/null +++ b/internal/otel_collector/testbed/tests/metric_test.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tests + +// This file contains Test functions which initiate the tests. The tests can be either +// coded in this file or use scenarios from perf_scenarios.go. + +import ( + "testing" + + "go.opentelemetry.io/collector/testbed/testbed" +) + +func TestMetricNoBackend10kDPSOpenCensus(t *testing.T) { + options := testbed.LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} + dataProvider := testbed.NewPerfTestDataProvider(options) + tc := testbed.NewTestCase( + t, + dataProvider, + testbed.NewOCMetricDataSender(testbed.DefaultHost, 55678), + testbed.NewOCDataReceiver(testbed.DefaultOCPort), + &testbed.ChildProcess{}, + &testbed.PerfTestValidator{}, + performanceResultsSummary, + ) + defer tc.Stop() + + tc.SetResourceLimits(testbed.ResourceSpec{ExpectedMaxCPU: 200, ExpectedMaxRAM: 200}) + tc.StartAgent() + + tc.StartLoad(testbed.LoadOptions{DataItemsPerSecond: 10_000}) + + tc.Sleep(tc.Duration) +} + +func TestMetric10kDPS(t *testing.T) { + tests := []struct { + name string + sender testbed.DataSender + receiver testbed.DataReceiver + resourceSpec testbed.ResourceSpec + }{ + { + "OpenCensus", + testbed.NewOCMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOCDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 85, + ExpectedMaxRAM: 75, + }, + }, + { + "OTLP", + testbed.NewOTLPMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 50, + ExpectedMaxRAM: 60, + }, + }, + { + "OTLP-HTTP", + testbed.NewOTLPHTTPMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 50, + ExpectedMaxRAM: 60, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + Scenario10kItemsPerSecond( + t, + test.sender, + test.receiver, + test.resourceSpec, + performanceResultsSummary, + nil, + nil, + ) + }) + } + +} diff --git a/internal/otel_collector/testbed/tests/resource_processor_test.go b/internal/otel_collector/testbed/tests/resource_processor_test.go new file mode 100644 index 00000000000..c1c6dfc95b5 --- /dev/null +++ b/internal/otel_collector/testbed/tests/resource_processor_test.go @@ -0,0 +1,291 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tests + +import ( + "context" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + otlpcommon "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/common/v1" + otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1" + otlpresource "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/resource/v1" + "go.opentelemetry.io/collector/testbed/testbed" +) + +var ( + mockedConsumedResourceWithType = &otlpmetrics.ResourceMetrics{ + Resource: otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "opencensus.resourcetype", + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "host", + }, + }, + }, + { + Key: "label-key", + Value: otlpcommon.AnyValue{ + Value: &otlpcommon.AnyValue_StringValue{ + StringValue: "label-value", + }, + }, + }, + }, + }, + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + { + Name: "metric-name", + Description: "metric-description", + Unit: "metric-unit", + Data: &otlpmetrics.Metric_IntGauge{ + IntGauge: &otlpmetrics.IntGauge{ + DataPoints: []*otlpmetrics.IntDataPoint{ + { + Value: 0, + }, + }, + }, + }, + }, + }, + }, + }, + } + + mockedConsumedResourceNil = &otlpmetrics.ResourceMetrics{ + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + { + Name: "metric-name", + Description: "metric-description", + Unit: "metric-unit", + Data: &otlpmetrics.Metric_IntGauge{ + IntGauge: &otlpmetrics.IntGauge{ + DataPoints: []*otlpmetrics.IntDataPoint{ + { + Value: 0, + }, + }, + }, + }, + }, + }, + }, + }, + } + + mockedConsumedResourceWithoutAttributes = &otlpmetrics.ResourceMetrics{ + Resource: otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{}, + }, + InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ + { + Metrics: []*otlpmetrics.Metric{ + { + Name: "metric-name", + Description: "metric-description", + Unit: "metric-unit", + Data: &otlpmetrics.Metric_IntGauge{ + IntGauge: &otlpmetrics.IntGauge{ + DataPoints: []*otlpmetrics.IntDataPoint{ + { + Value: 0, + }, + }, + }, + }, + }, + }, + }, + }, + } +) + +type resourceProcessorTestCase struct { + name string + resourceProcessorConfig string + mockedConsumedMetricData pdata.Metrics + expectedMetricData pdata.Metrics +} + +func getResourceProcessorTestCases() []resourceProcessorTestCase { + + tests := []resourceProcessorTestCase{ + { + name: "update_and_rename_existing_attributes", + resourceProcessorConfig: ` + resource: + attributes: + - key: label-key + value: new-label-value + action: update + - key: resource-type + from_attribute: opencensus.resourcetype + action: upsert + - key: opencensus.resourcetype + action: delete +`, + mockedConsumedMetricData: getMetricDataFrom(mockedConsumedResourceWithType), + expectedMetricData: getMetricDataFromResourceMetrics(&otlpmetrics.ResourceMetrics{ + Resource: otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "resource-type", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "host"}}, + }, + { + Key: "label-key", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "new-label-value"}}, + }, + }, + }, + }), + }, + { + name: "set_attribute_on_nil_resource", + resourceProcessorConfig: ` + resource: + attributes: + - key: additional-label-key + value: additional-label-value + action: insert + +`, + mockedConsumedMetricData: getMetricDataFrom(mockedConsumedResourceNil), + expectedMetricData: getMetricDataFromResourceMetrics(&otlpmetrics.ResourceMetrics{ + Resource: otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "additional-label-key", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "additional-label-value"}}, + }, + }, + }, + }), + }, + { + name: "set_attribute_on_empty_resource", + resourceProcessorConfig: ` + resource: + attributes: + - key: additional-label-key + value: additional-label-value + action: insert +`, + mockedConsumedMetricData: getMetricDataFrom(mockedConsumedResourceWithoutAttributes), + expectedMetricData: getMetricDataFromResourceMetrics(&otlpmetrics.ResourceMetrics{ + Resource: otlpresource.Resource{ + Attributes: []otlpcommon.KeyValue{ + { + Key: "additional-label-key", + Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "additional-label-value"}}, + }, + }, + }, + }), + }, + } + + return tests +} + +func getMetricDataFromResourceMetrics(rm *otlpmetrics.ResourceMetrics) pdata.Metrics { + return pdata.MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{rm}) +} + +func getMetricDataFrom(rm *otlpmetrics.ResourceMetrics) pdata.Metrics { + return pdata.MetricsFromOtlp([]*otlpmetrics.ResourceMetrics{rm}) +} + +func TestMetricResourceProcessor(t *testing.T) { + sender := testbed.NewOTLPMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + receiver := testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)) + + tests := getResourceProcessorTestCases() + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + resultDir, err := filepath.Abs(path.Join("results", t.Name())) + require.NoError(t, err) + + agentProc := &testbed.ChildProcess{} + processors := map[string]string{ + "resource": test.resourceProcessorConfig, + } + configStr := createConfigYaml(t, sender, receiver, resultDir, processors, nil) + configCleanup, err := agentProc.PrepareConfig(configStr) + require.NoError(t, err) + defer configCleanup() + + options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10} + dataProvider := testbed.NewPerfTestDataProvider(options) + tc := testbed.NewTestCase( + t, + dataProvider, + sender, + receiver, + agentProc, + &testbed.PerfTestValidator{}, + performanceResultsSummary, + ) + defer tc.Stop() + + tc.StartBackend() + tc.StartAgent() + defer tc.StopAgent() + + tc.EnableRecording() + + require.NoError(t, sender.Start()) + + // Clear previously received metrics. + tc.MockBackend.ClearReceivedItems() + startCounter := tc.MockBackend.DataItemsReceived() + + sender, ok := tc.Sender.(testbed.MetricDataSender) + require.True(t, ok, "unsupported metric sender") + + require.NoError(t, sender.ConsumeMetrics(context.Background(), test.mockedConsumedMetricData)) + + // We bypass the load generator in this test, but make sure to increment the + // counter since it is used in final reports. + tc.LoadGenerator.IncDataItemsSent() + + tc.WaitFor(func() bool { return tc.MockBackend.DataItemsReceived() == startCounter+1 }, + "datapoints received") + + // Assert Resources + m := tc.MockBackend.ReceivedMetrics[0] + rm := m.ResourceMetrics() + require.Equal(t, 1, rm.Len()) + + expectidMD := test.expectedMetricData + require.Equal(t, + attributesToMap(expectidMD.ResourceMetrics().At(0).Resource().Attributes()), + attributesToMap(rm.At(0).Resource().Attributes()), + ) + }) + } +} diff --git a/internal/otel_collector/testbed/tests/results/BASELINE.md b/internal/otel_collector/testbed/tests/results/BASELINE.md new file mode 100644 index 00000000000..d8e233c018a --- /dev/null +++ b/internal/otel_collector/testbed/tests/results/BASELINE.md @@ -0,0 +1,25 @@ +# Test Results +Started: Fri, 13 Dec 2019 09:20:14 -0500 + +Test |Result|Duration|CPU Avg%|CPU Max%|RAM Avg MiB|RAM Max MiB|Sent Items|Received Items| +----------------------------------------|------|-------:|-------:|-------:|----------:|----------:|---------:|-------------:| +IdleMode |PASS | 15s| 1.3| 4.6| 17| 21| 0| 0| +MetricNoBackend10kDPSOpenCensus |PASS | 15s| 19.9| 22.2| 23| 28| 149940| 0| +Metric10kDPS/OpenCensus |PASS | 18s| 9.6| 11.3| 26| 33| 149900| 149900| +Trace10kSPS/JaegerReceiver |PASS | 16s| 28.9| 31.5| 46| 56| 148830| 148830| +Trace10kSPS/OpenCensusReceiver |PASS | 16s| 27.8| 30.1| 38| 46| 149340| 149340| +TraceNoBackend10kSPSJaeger |PASS | 15s| 25.7| 28.1| 99| 138| 148690| 0| +Trace1kSPSWithAttrs/0*0bytes |PASS | 15s| 16.8| 19.3| 22| 27| 15000| 15000| +Trace1kSPSWithAttrs/100*50bytes |PASS | 15s| 59.9| 65.0| 24| 30| 13920| 13920| +Trace1kSPSWithAttrs/10*1000bytes |PASS | 15s| 49.0| 59.4| 24| 30| 14370| 14370| +Trace1kSPSWithAttrs/20*5000bytes |PASS | 15s| 108.2| 114.1| 38| 53| 14730| 14730| +TraceBallast1kSPSWithAttrs/0*0bytes |PASS | 15s| 16.7| 18.4| 85| 136| 15000| 15000| +TraceBallast1kSPSWithAttrs/100*50bytes |PASS | 15s| 41.0| 47.6| 628| 975| 13900| 13900| +TraceBallast1kSPSWithAttrs/10*1000bytes |PASS | 15s| 36.3| 40.3| 448| 757| 14910| 14910| +TraceBallast1kSPSWithAttrs/20*5000bytes |PASS | 15s| 77.2| 84.5| 879| 1077| 14070| 14070| +TraceBallast1kSPSAddAttrs/0*0bytes |PASS | 15s| 17.1| 18.2| 90| 147| 15000| 15000| +TraceBallast1kSPSAddAttrs/100*50bytes |PASS | 15s| 47.1| 49.3| 676| 979| 14820| 14820| +TraceBallast1kSPSAddAttrs/10*1000bytes |PASS | 15s| 37.6| 40.0| 516| 838| 15000| 15000| +TraceBallast1kSPSAddAttrs/20*5000bytes |PASS | 15s| 53.8| 69.0| 823| 1049| 11740| 11740| + +Total duration: 278s diff --git a/internal/otel_collector/testbed/tests/runtests.sh b/internal/otel_collector/testbed/tests/runtests.sh new file mode 100644 index 00000000000..ecc478acbfd --- /dev/null +++ b/internal/otel_collector/testbed/tests/runtests.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +SED="sed" + +PASS_COLOR=$(printf "\033[32mPASS\033[0m") +FAIL_COLOR=$(printf "\033[31mFAIL\033[0m") +TEST_COLORIZE="${SED} 's/PASS/${PASS_COLOR}/' | ${SED} 's/FAIL/${FAIL_COLOR}/'" +echo ${TEST_ARGS} +RUN_TESTBED=1 go test -v ${TEST_ARGS} 2>&1 | tee results/testoutput.log | bash -c "${TEST_COLORIZE}" + +testStatus=${PIPESTATUS[0]} + +mkdir -p results/junit +go-junit-report < results/testoutput.log > results/junit/results.xml + +bash -c "cat results/TESTRESULTS.md | ${TEST_COLORIZE}" + +exit ${testStatus} diff --git a/internal/otel_collector/testbed/tests/scenarios.go b/internal/otel_collector/testbed/tests/scenarios.go new file mode 100644 index 00000000000..cb8f5e87887 --- /dev/null +++ b/internal/otel_collector/testbed/tests/scenarios.go @@ -0,0 +1,333 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tests + +// This file defines parametrized test scenarios and makes them public so that they can be +// also used by tests in custom builds of Collector (e.g. Collector Contrib). + +import ( + "fmt" + "math/rand" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/testbed/testbed" +) + +var ( + performanceResultsSummary testbed.TestResultsSummary = &testbed.PerformanceResults{} +) + +// createConfigYaml creates a collector config file that corresponds to the +// sender and receiver used in the test and returns the config file name. +// Map of processor names to their configs. Config is in YAML and must be +// indented by 2 spaces. Processors will be placed between batch and queue for traces +// pipeline. For metrics pipeline these will be sole processors. +func createConfigYaml( + t *testing.T, + sender testbed.DataSender, + receiver testbed.DataReceiver, + resultDir string, + processors map[string]string, + extensions map[string]string, +) string { + + // Create a config. Note that our DataSender is used to generate a config for Collector's + // receiver and our DataReceiver is used to generate a config for Collector's exporter. + // This is because our DataSender sends to Collector's receiver and our DataReceiver + // receives from Collector's exporter. + + // Prepare extra processor config section and comma-separated list of extra processor + // names to use in corresponding "processors" settings. + processorsSections := "" + processorsList := "" + if len(processors) > 0 { + first := true + for name, cfg := range processors { + processorsSections += cfg + "\n" + if !first { + processorsList += "," + } + processorsList += name + first = false + } + } + + // Prepare extra extension config section and comma-separated list of extra extension + // names to use in corresponding "extensions" settings. + extensionsSections := "" + extensionsList := "" + if len(extensions) > 0 { + first := true + for name, cfg := range extensions { + extensionsSections += cfg + "\n" + if !first { + extensionsList += "," + } + extensionsList += name + first = false + } + } + + // Set pipeline based on DataSender type + var pipeline string + switch sender.(type) { + case testbed.TraceDataSender: + pipeline = "traces" + case testbed.MetricDataSender: + pipeline = "metrics" + case testbed.LogDataSender: + pipeline = "logs" + default: + t.Error("Invalid DataSender type") + } + + format := ` +receivers:%v +exporters:%v +processors: + %s + +extensions: + pprof: + save_to_file: %v/cpu.prof + %s + +service: + extensions: [pprof, %s] + pipelines: + %s: + receivers: [%v] + processors: [%s] + exporters: [%v] +` + + // Put corresponding elements into the config template to generate the final config. + return fmt.Sprintf( + format, + sender.GenConfigYAMLStr(), + receiver.GenConfigYAMLStr(), + processorsSections, + resultDir, + extensionsSections, + extensionsList, + pipeline, + sender.ProtocolName(), + processorsList, + receiver.ProtocolName(), + ) +} + +// Run 10k data items/sec test using specified sender and receiver protocols. +func Scenario10kItemsPerSecond( + t *testing.T, + sender testbed.DataSender, + receiver testbed.DataReceiver, + resourceSpec testbed.ResourceSpec, + resultsSummary testbed.TestResultsSummary, + processors map[string]string, + extensions map[string]string, +) { + resultDir, err := filepath.Abs(path.Join("results", t.Name())) + require.NoError(t, err) + + options := testbed.LoadOptions{ + DataItemsPerSecond: 10_000, + ItemsPerBatch: 100, + Parallel: 1, + } + agentProc := &testbed.ChildProcess{} + + configStr := createConfigYaml(t, sender, receiver, resultDir, processors, extensions) + configCleanup, err := agentProc.PrepareConfig(configStr) + require.NoError(t, err) + defer configCleanup() + + dataProvider := testbed.NewPerfTestDataProvider(options) + tc := testbed.NewTestCase( + t, + dataProvider, + sender, + receiver, + agentProc, + &testbed.PerfTestValidator{}, + resultsSummary, + ) + defer tc.Stop() + + tc.SetResourceLimits(resourceSpec) + tc.StartBackend() + tc.StartAgent("--log-level=debug") + + tc.StartLoad(options) + + tc.Sleep(tc.Duration) + + tc.StopLoad() + + tc.WaitFor(func() bool { return tc.LoadGenerator.DataItemsSent() > 0 }, "load generator started") + tc.WaitFor(func() bool { return tc.LoadGenerator.DataItemsSent() == tc.MockBackend.DataItemsReceived() }, + "all data items received") + + tc.StopAgent() + + tc.ValidateData() +} + +// TestCase for Scenario1kSPSWithAttrs func. +type TestCase struct { + attrCount int + attrSizeByte int + expectedMaxCPU uint32 + expectedMaxRAM uint32 + resultsSummary testbed.TestResultsSummary +} + +func genRandByteString(len int) string { + b := make([]byte, len) + for i := range b { + b[i] = byte(rand.Intn(128)) + } + return string(b) +} + +// Scenario1kSPSWithAttrs runs a performance test at 1k sps with specified span attributes +// and test options. +func Scenario1kSPSWithAttrs(t *testing.T, args []string, tests []TestCase, processors map[string]string) { + for i := range tests { + test := tests[i] + + t.Run(fmt.Sprintf("%d*%dbytes", test.attrCount, test.attrSizeByte), func(t *testing.T) { + + options := constructLoadOptions(test) + + agentProc := &testbed.ChildProcess{} + + // Prepare results dir. + resultDir, err := filepath.Abs(path.Join("results", t.Name())) + require.NoError(t, err) + + // Create sender and receiver on available ports. + sender := testbed.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)) + receiver := testbed.NewOCDataReceiver(testbed.GetAvailablePort(t)) + + // Prepare config. + configStr := createConfigYaml(t, sender, receiver, resultDir, processors, nil) + configCleanup, err := agentProc.PrepareConfig(configStr) + require.NoError(t, err) + defer configCleanup() + + tc := testbed.NewTestCase( + t, + testbed.NewPerfTestDataProvider(options), + sender, + receiver, + agentProc, + &testbed.PerfTestValidator{}, + test.resultsSummary, + ) + defer tc.Stop() + + tc.SetResourceLimits(testbed.ResourceSpec{ + ExpectedMaxCPU: test.expectedMaxCPU, + ExpectedMaxRAM: test.expectedMaxRAM, + }) + + tc.StartBackend() + tc.StartAgent(args...) + + tc.StartLoad(options) + tc.Sleep(tc.Duration) + tc.StopLoad() + + tc.WaitFor(func() bool { return tc.LoadGenerator.DataItemsSent() > 0 }, "load generator started") + tc.WaitFor(func() bool { return tc.LoadGenerator.DataItemsSent() == tc.MockBackend.DataItemsReceived() }, + "all spans received") + + tc.StopAgent() + + tc.ValidateData() + }) + } +} + +// Structure used for TestTraceNoBackend10kSPS. +// Defines RAM usage range for defined processor type. +type processorConfig struct { + Name string + // map of processor types to their config YAML to use. + Processor map[string]string + ExpectedMaxRAM uint32 + ExpectedMinFinalRAM uint32 +} + +func ScenarioTestTraceNoBackend10kSPS( + t *testing.T, + sender testbed.DataSender, + receiver testbed.DataReceiver, + resourceSpec testbed.ResourceSpec, + resultsSummary testbed.TestResultsSummary, + configuration processorConfig, +) { + + resultDir, err := filepath.Abs(path.Join("results", t.Name())) + require.NoError(t, err) + + options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10} + agentProc := &testbed.ChildProcess{} + configStr := createConfigYaml(t, sender, receiver, resultDir, configuration.Processor, nil) + configCleanup, err := agentProc.PrepareConfig(configStr) + require.NoError(t, err) + defer configCleanup() + + dataProvider := testbed.NewPerfTestDataProvider(options) + tc := testbed.NewTestCase( + t, + dataProvider, + sender, + receiver, + agentProc, + &testbed.PerfTestValidator{}, + resultsSummary, + ) + + defer tc.Stop() + + tc.SetResourceLimits(resourceSpec) + + tc.StartAgent() + tc.StartLoad(options) + + tc.Sleep(tc.Duration) + + rss, _, _ := tc.AgentMemoryInfo() + assert.Less(t, configuration.ExpectedMinFinalRAM, rss) +} + +func constructLoadOptions(test TestCase) testbed.LoadOptions { + options := testbed.LoadOptions{DataItemsPerSecond: 1000, ItemsPerBatch: 10} + options.Attributes = make(map[string]string) + + // Generate attributes. + for i := 0; i < test.attrCount; i++ { + attrName := genRandByteString(rand.Intn(199) + 1) + options.Attributes[attrName] = genRandByteString(rand.Intn(test.attrSizeByte*2-1) + 1) + } + return options +} diff --git a/internal/otel_collector/testbed/tests/testdata/agent-config.yaml b/internal/otel_collector/testbed/tests/testdata/agent-config.yaml new file mode 100644 index 00000000000..48c9ff2a1b7 --- /dev/null +++ b/internal/otel_collector/testbed/tests/testdata/agent-config.yaml @@ -0,0 +1,27 @@ +receivers: + jaeger: + protocols: + grpc: + endpoint: "localhost:14250" + opencensus: + endpoint: "localhost:55678" + +exporters: + opencensus: + endpoint: "localhost:56565" + insecure: true + logging: + loglevel: info + +processors: + queued_retry: + +service: + pipelines: + traces: + receivers: [jaeger] + processors: [queued_retry] + exporters: [opencensus,logging] + metrics: + receivers: [opencensus] + exporters: [opencensus,logging] diff --git a/internal/otel_collector/testbed/tests/testdata/memory-limiter.yaml b/internal/otel_collector/testbed/tests/testdata/memory-limiter.yaml new file mode 100644 index 00000000000..2449b3548e8 --- /dev/null +++ b/internal/otel_collector/testbed/tests/testdata/memory-limiter.yaml @@ -0,0 +1,30 @@ +receivers: + jaeger: + protocols: + grpc: + endpoint: "localhost:14250" + opencensus: + endpoint: "localhost:55678" + +exporters: + opencensus: + endpoint: "localhost:56565" + insecure: true + logging: + loglevel: info + +processors: + queued_retry: + memory_limiter: + check_interval: 1s + limit_mib: 10 + +service: + pipelines: + traces: + receivers: [jaeger] + processors: [memory_limiter,queued_retry] + exporters: [opencensus,logging] + metrics: + receivers: [opencensus] + exporters: [opencensus,logging] diff --git a/internal/otel_collector/testbed/tests/trace_test.go b/internal/otel_collector/testbed/tests/trace_test.go new file mode 100644 index 00000000000..b65ea31fca5 --- /dev/null +++ b/internal/otel_collector/testbed/tests/trace_test.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tests contains test cases. To run the tests go to tests directory and run: +// RUN_TESTBED=1 go test -v + +package tests + +// This file contains Test functions which initiate the tests. The tests can be either +// coded in this file or use scenarios from perf_scenarios.go. + +import ( + "context" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/testbed/testbed" + "go.opentelemetry.io/collector/translator/conventions" +) + +// TestMain is used to initiate setup, execution and tear down of testbed. +func TestMain(m *testing.M) { + testbed.DoTestMain(m, performanceResultsSummary) +} + +func TestTrace10kSPS(t *testing.T) { + tests := []struct { + name string + sender testbed.DataSender + receiver testbed.DataReceiver + resourceSpec testbed.ResourceSpec + }{ + { + "JaegerGRPC", + testbed.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewJaegerDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 40, + ExpectedMaxRAM: 70, + }, + }, + { + "OpenCensus", + testbed.NewOCTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOCDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 39, + ExpectedMaxRAM: 82, + }, + }, + { + "OTLP", + testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 20, + ExpectedMaxRAM: 70, + }, + }, + { + "OTLP-HTTP", + testbed.NewOTLPHTTPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 20, + ExpectedMaxRAM: 100, + }, + }, + { + "Zipkin", + testbed.NewZipkinDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewZipkinDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 80, + ExpectedMaxRAM: 80, + }, + }, + } + + processors := map[string]string{ + "batch": ` + batch: +`, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + Scenario10kItemsPerSecond( + t, + test.sender, + test.receiver, + test.resourceSpec, + performanceResultsSummary, + processors, + nil, + ) + }) + } +} + +func TestTraceNoBackend10kSPS(t *testing.T) { + + limitProcessors := map[string]string{ + "memory_limiter": ` + memory_limiter: + check_interval: 100ms + limit_mib: 20 +`, + } + + noLimitProcessors := map[string]string{} + + var processorsConfig = []processorConfig{ + { + Name: "NoMemoryLimit", + Processor: noLimitProcessors, + ExpectedMaxRAM: 150, + ExpectedMinFinalRAM: 100, + }, + { + Name: "MemoryLimit", + Processor: limitProcessors, + ExpectedMaxRAM: 70, + ExpectedMinFinalRAM: 40, + }, + } + + for _, testConf := range processorsConfig { + t.Run(testConf.Name, func(t *testing.T) { + ScenarioTestTraceNoBackend10kSPS( + t, + testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), + testbed.ResourceSpec{ + ExpectedMaxCPU: 50, + ExpectedMaxRAM: testConf.ExpectedMaxRAM, + }, + performanceResultsSummary, + testConf, + ) + }) + } +} + +func TestTrace1kSPSWithAttrs(t *testing.T) { + Scenario1kSPSWithAttrs(t, []string{}, []TestCase{ + // No attributes. + { + attrCount: 0, + attrSizeByte: 0, + expectedMaxCPU: 30, + expectedMaxRAM: 100, + resultsSummary: performanceResultsSummary, + }, + + // We generate 10 attributes each with average key length of 100 bytes and + // average value length of 50 bytes so total size of attributes values is + // 15000 bytes. + { + attrCount: 100, + attrSizeByte: 50, + expectedMaxCPU: 120, + expectedMaxRAM: 100, + resultsSummary: performanceResultsSummary, + }, + + // Approx 10 KiB attributes. + { + attrCount: 10, + attrSizeByte: 1000, + expectedMaxCPU: 100, + expectedMaxRAM: 100, + resultsSummary: performanceResultsSummary, + }, + + // Approx 100 KiB attributes. + { + attrCount: 20, + attrSizeByte: 5000, + expectedMaxCPU: 250, + expectedMaxRAM: 100, + resultsSummary: performanceResultsSummary, + }, + }, nil) +} + +func TestTraceBallast1kSPSWithAttrs(t *testing.T) { + args := []string{"--mem-ballast-size-mib", "1000"} + Scenario1kSPSWithAttrs(t, args, []TestCase{ + // No attributes. + { + attrCount: 0, + attrSizeByte: 0, + expectedMaxCPU: 30, + expectedMaxRAM: 2000, + resultsSummary: performanceResultsSummary, + }, + { + attrCount: 100, + attrSizeByte: 50, + expectedMaxCPU: 80, + expectedMaxRAM: 2000, + resultsSummary: performanceResultsSummary, + }, + { + attrCount: 10, + attrSizeByte: 1000, + expectedMaxCPU: 80, + expectedMaxRAM: 2000, + resultsSummary: performanceResultsSummary, + }, + { + attrCount: 20, + attrSizeByte: 5000, + expectedMaxCPU: 120, + expectedMaxRAM: 2000, + resultsSummary: performanceResultsSummary, + }, + }, nil) +} + +func TestTraceBallast1kSPSAddAttrs(t *testing.T) { + args := []string{"--mem-ballast-size-mib", "1000"} + + attrProcCfg := ` + attributes: + actions: + - key: attrib.key00 + value: 123 + action: insert + - key: attrib.key01 + value: "a small string for this attribute" + action: insert + - key: attrib.key02 + value: true + action: insert + - key: region + value: test-region + action: insert + - key: data-center + value: test-datacenter + action: insert` + + Scenario1kSPSWithAttrs( + t, + args, + []TestCase{ + { + attrCount: 0, + attrSizeByte: 0, + expectedMaxCPU: 30, + expectedMaxRAM: 2000, + resultsSummary: performanceResultsSummary, + }, + { + attrCount: 100, + attrSizeByte: 50, + expectedMaxCPU: 80, + expectedMaxRAM: 2000, + resultsSummary: performanceResultsSummary, + }, + { + attrCount: 10, + attrSizeByte: 1000, + expectedMaxCPU: 80, + expectedMaxRAM: 2000, + resultsSummary: performanceResultsSummary, + }, + { + attrCount: 20, + attrSizeByte: 5000, + expectedMaxCPU: 120, + expectedMaxRAM: 2000, + resultsSummary: performanceResultsSummary, + }, + }, + map[string]string{"attributes": attrProcCfg}, + ) +} + +// verifySingleSpan sends a single span to Collector, waits until the span is forwarded +// and received by MockBackend and calls user-supplied verification functions on +// received span. +// Temporarily, we need two verification functions in order to verify spans in +// new and old format received by MockBackend. +func verifySingleSpan( + t *testing.T, + tc *testbed.TestCase, + serviceName string, + spanName string, + verifyReceived func(span pdata.Span), +) { + + // Clear previously received traces. + tc.MockBackend.ClearReceivedItems() + startCounter := tc.MockBackend.DataItemsReceived() + + // Send one span. + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + td.ResourceSpans().At(0).Resource().Attributes().InitFromMap(map[string]pdata.AttributeValue{ + conventions.AttributeServiceName: pdata.NewAttributeValueString(serviceName), + }) + td.ResourceSpans().At(0).InstrumentationLibrarySpans().Resize(1) + spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + spans.Resize(1) + spans.At(0).SetTraceID(testbed.GenerateSequentialTraceID(1)) + spans.At(0).SetSpanID(testbed.GenerateSequentialSpanID(1)) + spans.At(0).SetName(spanName) + + sender := tc.Sender.(testbed.TraceDataSender) + require.NoError(t, sender.ConsumeTraces(context.Background(), td)) + + // We bypass the load generator in this test, but make sure to increment the + // counter since it is used in final reports. + tc.LoadGenerator.IncDataItemsSent() + + // Wait until span is received. + tc.WaitFor(func() bool { return tc.MockBackend.DataItemsReceived() == startCounter+1 }, + "span received") + + // Verify received span. + count := 0 + for _, td := range tc.MockBackend.ReceivedTraces { + rs := td.ResourceSpans() + for i := 0; i < rs.Len(); i++ { + ils := rs.At(i).InstrumentationLibrarySpans() + for j := 0; j < ils.Len(); j++ { + spans := ils.At(j).Spans() + for k := 0; k < spans.Len(); k++ { + verifyReceived(spans.At(k)) + count++ + } + } + } + } + assert.EqualValues(t, 1, count, "must receive one span") +} + +func TestTraceAttributesProcessor(t *testing.T) { + tests := []struct { + name string + sender testbed.DataSender + receiver testbed.DataReceiver + }{ + { + "JaegerGRPC", + testbed.NewJaegerGRPCDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewJaegerDataReceiver(testbed.GetAvailablePort(t)), + }, + { + "OTLP", + testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + resultDir, err := filepath.Abs(path.Join("results", t.Name())) + require.NoError(t, err) + + // Use processor to add attributes to certain spans. + processors := map[string]string{ + "batch": ` + batch: +`, + "attributes": ` + attributes: + include: + match_type: regexp + services: ["service-to-add.*"] + span_names: ["span-to-add-.*"] + actions: + - action: insert + key: "new_attr" + value: "string value" +`, + } + + agentProc := &testbed.ChildProcess{} + configStr := createConfigYaml(t, test.sender, test.receiver, resultDir, processors, nil) + configCleanup, err := agentProc.PrepareConfig(configStr) + require.NoError(t, err) + defer configCleanup() + + options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10} + dataProvider := testbed.NewPerfTestDataProvider(options) + tc := testbed.NewTestCase( + t, + dataProvider, + test.sender, + test.receiver, + agentProc, + &testbed.PerfTestValidator{}, + performanceResultsSummary, + ) + defer tc.Stop() + + tc.StartBackend() + tc.StartAgent() + defer tc.StopAgent() + + tc.EnableRecording() + + require.NoError(t, test.sender.Start()) + + // Create a span that matches "include" filter. + spanToInclude := "span-to-add-attr" + // Create a service name that matches "include" filter. + nodeToInclude := "service-to-add-attr" + + // verifySpan verifies that attributes was added to the internal data span. + verifySpan := func(span pdata.Span) { + require.NotNil(t, span) + require.Equal(t, span.Attributes().Len(), 1) + attrVal, ok := span.Attributes().Get("new_attr") + assert.True(t, ok) + assert.EqualValues(t, "string value", attrVal.StringVal()) + } + + verifySingleSpan(t, tc, nodeToInclude, spanToInclude, verifySpan) + + // Create a service name that does not match "include" filter. + nodeToExclude := "service-not-to-add-attr" + + verifySingleSpan(t, tc, nodeToExclude, spanToInclude, func(span pdata.Span) { + // Verify attributes was not added to the new internal data span. + assert.Equal(t, span.Attributes().Len(), 0) + }) + + // Create another span that does not match "include" filter. + spanToExclude := "span-not-to-add-attr" + verifySingleSpan(t, tc, nodeToInclude, spanToExclude, func(span pdata.Span) { + // Verify attributes was not added to the new internal data span. + assert.Equal(t, span.Attributes().Len(), 0) + }) + }) + } +} diff --git a/internal/otel_collector/testbed/tests/utils.go b/internal/otel_collector/testbed/tests/utils.go new file mode 100644 index 00000000000..472659fdd5e --- /dev/null +++ b/internal/otel_collector/testbed/tests/utils.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tests + +import "go.opentelemetry.io/collector/consumer/pdata" + +func attributesToMap(attributes pdata.AttributeMap) map[string]pdata.AttributeValue { + out := map[string]pdata.AttributeValue{} + attributes.ForEach(func(k string, v pdata.AttributeValue) { + out[k] = v + }) + return out +} diff --git a/internal/otel_collector/testutil/logstest/logs.go b/internal/otel_collector/testutil/logstest/logs.go new file mode 100644 index 00000000000..962e2e6214e --- /dev/null +++ b/internal/otel_collector/testutil/logstest/logs.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logstest + +import ( + "go.opentelemetry.io/collector/consumer/pdata" +) + +type Log struct { + Timestamp int64 + Body pdata.AttributeValue + Attributes map[string]pdata.AttributeValue +} + +// A convenience function for constructing logs for tests in a way that is +// relatively easy to read and write declaratively compared to the highly +// imperative and verbose method of using pdata directly. +// Attributes are sorted by key name. +func Logs(recs ...Log) pdata.Logs { + out := pdata.NewLogs() + + logs := out.ResourceLogs() + + logs.Resize(1) + rls := logs.At(0) + + rls.InstrumentationLibraryLogs().Resize(1) + logSlice := rls.InstrumentationLibraryLogs().At(0).Logs() + + logSlice.Resize(len(recs)) + for i := range recs { + l := logSlice.At(i) + recs[i].Body.CopyTo(l.Body()) + l.SetTimestamp(pdata.TimestampUnixNano(recs[i].Timestamp)) + l.Attributes().InitFromMap(recs[i].Attributes) + l.Attributes().Sort() + } + + return out +} diff --git a/internal/otel_collector/testutil/logstest/logs_test.go b/internal/otel_collector/testutil/logstest/logs_test.go new file mode 100644 index 00000000000..5c20b398efa --- /dev/null +++ b/internal/otel_collector/testutil/logstest/logs_test.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logstest + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestLogs(t *testing.T) { + logs := Logs(Log{ + Timestamp: 1, + Body: pdata.NewAttributeValueString("asdf"), + Attributes: map[string]pdata.AttributeValue{ + "a": pdata.NewAttributeValueString("b"), + }, + }) + + require.Equal(t, 1, logs.LogRecordCount()) +} diff --git a/internal/otel_collector/testutil/metricstestutil/metricsutil.go b/internal/otel_collector/testutil/metricstestutil/metricsutil.go new file mode 100644 index 00000000000..5b9c3354915 --- /dev/null +++ b/internal/otel_collector/testutil/metricstestutil/metricsutil.go @@ -0,0 +1,204 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricstestutil + +import ( + "time" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// Gauge creates a gauge metric. +func Gauge(name string, keys []string, timeseries ...*metricspb.TimeSeries) *metricspb.Metric { + return metric(metricspb.MetricDescriptor_GAUGE_DOUBLE, name, keys, timeseries) +} + +// GaugeInt creates a gauge metric of type int64. +func GaugeInt(name string, keys []string, timeseries ...*metricspb.TimeSeries) *metricspb.Metric { + return metric(metricspb.MetricDescriptor_GAUGE_INT64, name, keys, timeseries) +} + +// GaugeDist creates a gauge distribution metric. +func GaugeDist(name string, keys []string, timeseries ...*metricspb.TimeSeries) *metricspb.Metric { + return metric(metricspb.MetricDescriptor_GAUGE_DISTRIBUTION, name, keys, timeseries) +} + +// Cumulative creates a cumulative metric. +func Cumulative(name string, keys []string, timeseries ...*metricspb.TimeSeries) *metricspb.Metric { + return metric(metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, name, keys, timeseries) +} + +// CumulativeInt creates a cumulative metric of type int64. +func CumulativeInt(name string, keys []string, timeseries ...*metricspb.TimeSeries) *metricspb.Metric { + return metric(metricspb.MetricDescriptor_CUMULATIVE_INT64, name, keys, timeseries) +} + +// CumulativeDist creates a cumulative distribution metric. +func CumulativeDist(name string, keys []string, timeseries ...*metricspb.TimeSeries) *metricspb.Metric { + return metric(metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, name, keys, timeseries) +} + +// Summary creates a summary metric. +func Summary(name string, keys []string, timeseries ...*metricspb.TimeSeries) *metricspb.Metric { + return metric(metricspb.MetricDescriptor_SUMMARY, name, keys, timeseries) +} + +// Timeseries creates a timeseries. It takes the start time stamp, a sequence of label values (associated +// with the label keys in the overall metric), and the value of the timeseries. +func Timeseries(sts time.Time, vals []string, point *metricspb.Point) *metricspb.TimeSeries { + return &metricspb.TimeSeries{ + StartTimestamp: Timestamp(sts), + Points: []*metricspb.Point{point}, + LabelValues: toVals(vals), + } +} + +// Double creates a double point. +func Double(ts time.Time, value float64) *metricspb.Point { + return &metricspb.Point{Timestamp: Timestamp(ts), Value: &metricspb.Point_DoubleValue{DoubleValue: value}} +} + +// DistPt creates a distribution point. It takes the time stamp, the bucket boundaries for the distribution, and +// the and counts for the individual buckets as input. +func DistPt(ts time.Time, bounds []float64, counts []int64) *metricspb.Point { + var count int64 + var sum float64 + buckets := make([]*metricspb.DistributionValue_Bucket, len(counts)) + + for i, bcount := range counts { + count += bcount + buckets[i] = &metricspb.DistributionValue_Bucket{Count: bcount} + // create a sum based on lower bucket bounds + // e.g. for bounds = {0.1, 0.2, 0.4} and counts = {2, 3, 7, 9) + // sum = 0*2 + 0.1*3 + 0.2*7 + 0.4*9 + if i > 0 { + sum += float64(bcount) * bounds[i-1] + } + } + distrValue := &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: bounds, + }, + }, + }, + Count: count, + Sum: sum, + Buckets: buckets, + // There's no way to compute SumOfSquaredDeviation from prometheus data + } + return &metricspb.Point{Timestamp: Timestamp(ts), Value: &metricspb.Point_DistributionValue{DistributionValue: distrValue}} +} + +// SummPt creates a summary point. +func SummPt(ts time.Time, count int64, sum float64, percent, vals []float64) *metricspb.Point { + percentiles := make([]*metricspb.SummaryValue_Snapshot_ValueAtPercentile, len(percent)) + for i := 0; i < len(percent); i++ { + percentiles[i] = &metricspb.SummaryValue_Snapshot_ValueAtPercentile{Percentile: percent[i], Value: vals[i]} + } + summaryValue := &metricspb.SummaryValue{ + Sum: &wrapperspb.DoubleValue{Value: sum}, + Count: &wrapperspb.Int64Value{Value: count}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: percentiles, + }, + } + return &metricspb.Point{Timestamp: Timestamp(ts), Value: &metricspb.Point_SummaryValue{SummaryValue: summaryValue}} +} + +// Timestamp creates a timestamp. +func Timestamp(ts time.Time) *timestamppb.Timestamp { + return ×tamppb.Timestamp{ + Seconds: ts.Unix(), + Nanos: int32(ts.Nanosecond()), + } +} + +func metric(ty metricspb.MetricDescriptor_Type, name string, keys []string, timeseries []*metricspb.TimeSeries) *metricspb.Metric { + return &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: name, + Description: "metrics description", + Unit: "", + Type: ty, + LabelKeys: toKeys(keys), + }, + Timeseries: timeseries, + } +} + +func toKeys(keys []string) []*metricspb.LabelKey { + res := make([]*metricspb.LabelKey, 0, len(keys)) + for _, key := range keys { + res = append(res, &metricspb.LabelKey{Key: key, Description: "description: " + key}) + } + return res +} + +func toVals(vals []string) []*metricspb.LabelValue { + res := make([]*metricspb.LabelValue, 0, len(vals)) + for _, val := range vals { + res = append(res, &metricspb.LabelValue{Value: val, HasValue: true}) + } + return res +} + +// SortedMetrics is mainly useful for tests. It gets all of the attributes and +// labels in sorted order so they can be consistently tested. +func SortedMetrics(metrics pdata.Metrics) pdata.Metrics { + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + rm.Resource().Attributes().Sort() + + for j := 0; j < rm.InstrumentationLibraryMetrics().Len(); j++ { + ilm := rm.InstrumentationLibraryMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + m := ilm.Metrics().At(k) + switch m.DataType() { + case pdata.MetricDataTypeIntGauge: + for l := 0; l < m.IntGauge().DataPoints().Len(); l++ { + m.IntGauge().DataPoints().At(l).LabelsMap().Sort() + } + case pdata.MetricDataTypeIntSum: + for l := 0; l < m.IntSum().DataPoints().Len(); l++ { + m.IntSum().DataPoints().At(l).LabelsMap().Sort() + } + case pdata.MetricDataTypeDoubleGauge: + for l := 0; l < m.DoubleGauge().DataPoints().Len(); l++ { + m.DoubleGauge().DataPoints().At(l).LabelsMap().Sort() + } + case pdata.MetricDataTypeDoubleSum: + for l := 0; l < m.DoubleSum().DataPoints().Len(); l++ { + m.DoubleSum().DataPoints().At(l).LabelsMap().Sort() + } + case pdata.MetricDataTypeIntHistogram: + for l := 0; l < m.IntHistogram().DataPoints().Len(); l++ { + m.IntHistogram().DataPoints().At(l).LabelsMap().Sort() + } + case pdata.MetricDataTypeDoubleHistogram: + for l := 0; l < m.DoubleHistogram().DataPoints().Len(); l++ { + m.DoubleHistogram().DataPoints().At(l).LabelsMap().Sort() + } + } + } + } + } + return metrics +} diff --git a/internal/otel_collector/testutil/metricstestutil/metricsutil_test.go b/internal/otel_collector/testutil/metricstestutil/metricsutil_test.go new file mode 100644 index 00000000000..abe7a0cedb2 --- /dev/null +++ b/internal/otel_collector/testutil/metricstestutil/metricsutil_test.go @@ -0,0 +1,180 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricstestutil + +import ( + "testing" + "time" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func TestResourceProcessor(t *testing.T) { + op1 := "op1" + op2 := "op2" + k1k2 := []string{"k1", "k2"} + v1v2 := []string{"v1", "v2"} + v10v20 := []string{"v10", "v20"} + bounds0 := []float64{1} + percent0 := []float64{10} + t1Ms := time.Unix(0, 1000000) + t3Ms := time.Unix(0, 3000000) + t5Ms := time.Unix(0, 5000000) + + k1k2Labels := []*metricspb.LabelKey{ + {Key: "k1", Description: "description: k1"}, + {Key: "k2", Description: "description: k2"}, + } + + v1v2Values := []*metricspb.LabelValue{ + {Value: "v1", HasValue: true}, + {Value: "v2", HasValue: true}, + } + + v10v20Values := []*metricspb.LabelValue{ + {Value: "v10", HasValue: true}, + {Value: "v20", HasValue: true}, + } + + ts1Ms := ×tamppb.Timestamp{Seconds: 0, Nanos: 1000000} + ts3Ms := ×tamppb.Timestamp{Seconds: 0, Nanos: 3000000} + ts5Ms := ×tamppb.Timestamp{Seconds: 0, Nanos: 5000000} + + d44 := &metricspb.Point_DoubleValue{DoubleValue: 44} + d65 := &metricspb.Point_DoubleValue{DoubleValue: 65} + d90 := &metricspb.Point_DoubleValue{DoubleValue: 90} + + dist := &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{1}, + }, + }, + }, + Count: 2, + Sum: 0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 2}}, + }, + } + + summ := &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrapperspb.DoubleValue{Value: 40}, + Count: &wrapperspb.Int64Value{Value: 10}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + {Percentile: 10, Value: 1}, + }, + }, + }, + } + + got := []*metricspb.Metric{ + Gauge(op1, k1k2, Timeseries(t1Ms, v1v2, Double(t1Ms, 44))), + GaugeDist(op2, k1k2, Timeseries(t3Ms, v1v2, DistPt(t1Ms, bounds0, []int64{2}))), + Cumulative(op1, k1k2, Timeseries(t5Ms, v1v2, Double(t5Ms, 90)), Timeseries(t5Ms, v10v20, Double(t5Ms, 65))), + CumulativeDist(op2, k1k2, Timeseries(t1Ms, v1v2, DistPt(t1Ms, bounds0, []int64{2}))), + Summary(op1, k1k2, Timeseries(t1Ms, v1v2, SummPt(t1Ms, 10, 40, percent0, []float64{1, 5}))), + } + + want := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: op1, + Description: "metrics description", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: k1k2Labels, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1Ms, + LabelValues: v1v2Values, + Points: []*metricspb.Point{{Timestamp: ts1Ms, Value: d44}}, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: op2, + Description: "metrics description", + Type: metricspb.MetricDescriptor_GAUGE_DISTRIBUTION, + LabelKeys: k1k2Labels, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts3Ms, + LabelValues: v1v2Values, + Points: []*metricspb.Point{{Timestamp: ts1Ms, Value: dist}}, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: op1, + Description: "metrics description", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: k1k2Labels, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts5Ms, + LabelValues: v1v2Values, + Points: []*metricspb.Point{{Timestamp: ts5Ms, Value: d90}}, + }, + { + StartTimestamp: ts5Ms, + LabelValues: v10v20Values, + Points: []*metricspb.Point{{Timestamp: ts5Ms, Value: d65}}, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: op2, + Description: "metrics description", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: k1k2Labels, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1Ms, + LabelValues: v1v2Values, + Points: []*metricspb.Point{{Timestamp: ts1Ms, Value: dist}}, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: op1, + Description: "metrics description", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: k1k2Labels, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1Ms, + LabelValues: v1v2Values, + Points: []*metricspb.Point{{Timestamp: ts1Ms, Value: summ}}, + }, + }, + }, + } + assert.Equalf(t, want, got, "got %v, want %v", got, want) +} diff --git a/internal/otel_collector/testutil/testutil.go b/internal/otel_collector/testutil/testutil.go new file mode 100644 index 00000000000..b2f5f33ba62 --- /dev/null +++ b/internal/otel_collector/testutil/testutil.go @@ -0,0 +1,235 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type portpair struct { + first string + last string +} + +// GenerateNormalizedJSON generates a normalized JSON from the string +// given to the function. Useful to compare JSON contents that +// may have differences due to formatting. It returns nil in case of +// invalid JSON. +func GenerateNormalizedJSON(t *testing.T, jsonStr string) string { + var i interface{} + + err := json.Unmarshal([]byte(jsonStr), &i) + require.NoError(t, err) + + n, err := json.Marshal(i) + require.NoError(t, err) + + return string(n) +} + +func TempSocketName(t *testing.T) string { + tmpfile, err := ioutil.TempFile("", "sock") + require.NoError(t, err) + require.NoError(t, tmpfile.Close()) + socket := tmpfile.Name() + require.NoError(t, os.Remove(socket)) + return socket +} + +// GetAvailableLocalAddress finds an available local port and returns an endpoint +// describing it. The port is available for opening when this function returns +// provided that there is no race by some other code to grab the same port +// immediately. +func GetAvailableLocalAddress(t *testing.T) string { + ln, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err, "Failed to get a free local port") + // There is a possible race if something else takes this same port before + // the test uses it, however, that is unlikely in practice. + defer ln.Close() + return ln.Addr().String() +} + +// GetAvailablePort finds an available local port and returns it. The port is +// available for opening when this function returns provided that there is no +// race by some other code to grab the same port immediately. +func GetAvailablePort(t *testing.T) uint16 { + // Retry has been added for windows as net.Listen can return a port that is not actually available. Details can be + // found in https://github.com/docker/for-win/issues/3171 but to summarize Hyper-V will reserve ranges of ports + // which do not show up under the "netstat -ano" but can only be found by + // "netsh interface ipv4 show excludedportrange protocol=tcp". We'll use []exclusions to hold those ranges and + // retry if the port returned by GetAvailableLocalAddress falls in one of those them. + var exclusions []portpair + portFound := false + var port string + var err error + if runtime.GOOS == "windows" { + exclusions = getExclusionsList(t) + } + + for !portFound { + endpoint := GetAvailableLocalAddress(t) + _, port, err = net.SplitHostPort(endpoint) + require.NoError(t, err) + portFound = true + if runtime.GOOS == "windows" { + for _, pair := range exclusions { + if port >= pair.first && port <= pair.last { + portFound = false + break + } + } + } + } + + portInt, err := strconv.Atoi(port) + require.NoError(t, err) + + return uint16(portInt) +} + +// Get excluded ports on Windows from the command: netsh interface ipv4 show excludedportrange protocol=tcp +func getExclusionsList(t *testing.T) []portpair { + cmd := exec.Command("netsh", "interface", "ipv4", "show", "excludedportrange", "protocol=tcp") + output, err := cmd.CombinedOutput() + require.NoError(t, err) + + exclusions := createExclusionsList(string(output), t) + return exclusions +} + +func createExclusionsList(exclusionsText string, t *testing.T) []portpair { + exclusions := []portpair{} + + parts := strings.Split(exclusionsText, "--------") + require.Equal(t, len(parts), 3) + portsText := strings.Split(parts[2], "*") + require.Equal(t, len(portsText), 2) + lines := strings.Split(portsText[0], "\n") + for _, line := range lines { + if strings.TrimSpace(line) != "" { + entries := strings.Fields(strings.TrimSpace(line)) + require.Equal(t, len(entries), 2) + pair := portpair{entries[0], entries[1]} + exclusions = append(exclusions, pair) + } + } + return exclusions +} + +// WaitForPort repeatedly attempts to open a local port until it either succeeds or 5 seconds pass +// It is useful if you need to asynchronously start a service and wait for it to start +func WaitForPort(t *testing.T, port uint16) error { + t.Helper() + + totalDuration := 5 * time.Second + wait := 100 * time.Millisecond + address := fmt.Sprintf("localhost:%d", port) + + ticker := time.NewTicker(wait) + defer ticker.Stop() + + timeout := time.After(totalDuration) + + for { + select { + case <-ticker.C: + conn, err := net.Dial("tcp", address) + if err == nil && conn != nil { + conn.Close() + return nil + } + + case <-timeout: + return fmt.Errorf("failed to wait for port %d", port) + } + } +} + +// HostPortFromAddr extracts host and port from a network address +func HostPortFromAddr(addr net.Addr) (host string, port int, err error) { + addrStr := addr.String() + sepIndex := strings.LastIndex(addrStr, ":") + if sepIndex < 0 { + return "", -1, errors.New("failed to parse host:port") + } + host, portStr := addrStr[:sepIndex], addrStr[sepIndex+1:] + port, err = strconv.Atoi(portStr) + return host, port, err +} + +// WaitFor the specific condition for up to 10 seconds. Records a test error +// if condition does not become true. +func WaitFor(t *testing.T, cond func() bool, errMsg ...interface{}) bool { + t.Helper() + + startTime := time.Now() + + // Start with 5 ms waiting interval between condition re-evaluation. + waitInterval := time.Millisecond * 5 + + for { + time.Sleep(waitInterval) + + // Increase waiting interval exponentially up to 500 ms. + if waitInterval < time.Millisecond*500 { + waitInterval *= 2 + } + + if cond() { + return true + } + + if time.Since(startTime) > time.Second*10 { + // Waited too long + t.Error("Time out waiting for", errMsg) + return false + } + } +} + +// LimitedWriter is an io.Writer that will return an EOF error after MaxLen has +// been reached. If MaxLen is 0, Writes will always succeed. +type LimitedWriter struct { + bytes.Buffer + MaxLen int +} + +var _ io.Writer = new(LimitedWriter) + +func (lw *LimitedWriter) Write(p []byte) (n int, err error) { + if lw.MaxLen != 0 && len(p)+lw.Len() > lw.MaxLen { + return 0, io.EOF + } + return lw.Buffer.Write(p) +} + +func (lw *LimitedWriter) Close() error { + return nil +} diff --git a/internal/otel_collector/testutil/testutil_test.go b/internal/otel_collector/testutil/testutil_test.go new file mode 100644 index 00000000000..08f9514eb1f --- /dev/null +++ b/internal/otel_collector/testutil/testutil_test.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + "net" + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetAvailableLocalAddress(t *testing.T) { + testEndpointAvailable(t, GetAvailableLocalAddress(t)) +} + +func TestGetAvailablePort(t *testing.T) { + portStr := strconv.Itoa(int(GetAvailablePort(t))) + require.NotEqual(t, "", portStr) + + testEndpointAvailable(t, "localhost:"+portStr) +} + +func TestWaitForPort(t *testing.T) { + port := GetAvailablePort(t) + err := WaitForPort(t, port) + require.Error(t, err) + + port = GetAvailablePort(t) + l, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + require.NoError(t, err) + + err = WaitForPort(t, port) + require.NoError(t, err) + + err = l.Close() + require.NoError(t, err) +} + +func testEndpointAvailable(t *testing.T, endpoint string) { + // Endpoint should be free. + ln0, err := net.Listen("tcp", endpoint) + require.NoError(t, err) + require.NotNil(t, ln0) + defer ln0.Close() + + // Ensure that the endpoint wasn't something like ":0" by checking that a + // second listener will fail. + ln1, err := net.Listen("tcp", endpoint) + require.Error(t, err) + require.Nil(t, ln1) +} + +func TestCreateExclusionsList(t *testing.T) { + // Test two examples of typical output from "netsh interface ipv4 show excludedportrange protocol=tcp" + emptyExclusionsText := ` + +Protocol tcp Port Exclusion Ranges + +Start Port End Port +---------- -------- + +* - Administered port exclusions.` + + exclusionsText := ` + +Start Port End Port +---------- -------- + 49697 49796 + 49797 49896 + +* - Administered port exclusions. +` + exclusions := createExclusionsList(exclusionsText, t) + require.Equal(t, len(exclusions), 2) + + emptyExclusions := createExclusionsList(emptyExclusionsText, t) + require.Equal(t, len(emptyExclusions), 0) +} diff --git a/internal/otel_collector/translator/conventions/opencensus.go b/internal/otel_collector/translator/conventions/opencensus.go new file mode 100644 index 00000000000..727af4625fd --- /dev/null +++ b/internal/otel_collector/translator/conventions/opencensus.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package conventions + +// OTLP attributes to map certain OpenCensus proto fields. These fields don't have +// corresponding fields in OTLP, nor are defined in OTLP semantic conventions. +// TODO: decide if any of these must be in OTLP semantic conventions. +const ( + OCAttributeProcessStartTime = "opencensus.starttime" + OCAttributeProcessID = "opencensus.pid" + OCAttributeExporterVersion = "opencensus.exporterversion" + OCAttributeResourceType = "opencensus.resourcetype" + OCAttributeSameProcessAsParentSpan = "opencensus.same_process_as_parent_span" + OCTimeEventMessageEventType = "opencensus.timeevent.messageevent.type" + OCTimeEventMessageEventID = "opencensus.timeevent.messageevent.id" + OCTimeEventMessageEventUSize = "opencensus.timeevent.messageevent.usize" + OCTimeEventMessageEventCSize = "opencensus.timeevent.messageevent.csize" +) diff --git a/internal/otel_collector/translator/conventions/opentelemetry.go b/internal/otel_collector/translator/conventions/opentelemetry.go new file mode 100644 index 00000000000..3f3b81cdf55 --- /dev/null +++ b/internal/otel_collector/translator/conventions/opentelemetry.go @@ -0,0 +1,278 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package conventions + +// OpenTelemetry Semantic Convention values for Resource attribute names. +// See: https://github.com/open-telemetry/opentelemetry-specification/tree/master/specification/resource/semantic_conventions/README.md +const ( + AttributeCloudAccount = "cloud.account.id" + AttributeCloudProvider = "cloud.provider" + AttributeCloudRegion = "cloud.region" + AttributeCloudZone = "cloud.zone" + AttributeContainerID = "container.id" + AttributeContainerImage = "container.image.name" + AttributeContainerName = "container.name" + AttributeContainerTag = "container.image.tag" + AttributeDeploymentEnvironment = "deployment.environment" + AttributeFaasID = "faas.id" + AttributeFaasInstance = "faas.instance" + AttributeFaasName = "faas.name" + AttributeFaasVersion = "faas.version" + AttributeHostID = "host.id" + AttributeHostImageID = "host.image.id" + AttributeHostImageName = "host.image.name" + AttributeHostImageVersion = "host.image.version" + AttributeHostName = "host.name" + AttributeHostType = "host.type" + AttributeK8sCluster = "k8s.cluster.name" + AttributeK8sContainer = "k8s.container.name" + AttributeK8sCronJob = "k8s.cronjob.name" + AttributeK8sCronJobUID = "k8s.cronjob.uid" + AttributeK8sDaemonSet = "k8s.daemonset.name" + AttributeK8sDaemonSetUID = "k8s.daemonset.uid" + AttributeK8sDeployment = "k8s.deployment.name" + AttributeK8sDeploymentUID = "k8s.deployment.uid" + AttributeK8sJob = "k8s.job.name" + AttributeK8sJobUID = "k8s.job.uid" + AttributeK8sNamespace = "k8s.namespace.name" + AttributeK8sPod = "k8s.pod.name" + AttributeK8sPodUID = "k8s.pod.uid" + AttributeK8sReplicaSet = "k8s.replicaset.name" + AttributeK8sReplicaSetUID = "k8s.replicaset.uid" + AttributeK8sStatefulSet = "k8s.statefulset.name" + AttributeK8sStatefulSetUID = "k8s.statefulset.uid" + AttributeOSType = "os.type" + AttributeOSDescription = "os.description" + AttributeProcessCommand = "process.command" + AttributeProcessCommandLine = "process.command_line" + AttributeProcessExecutableName = "process.executable.name" + AttributeProcessExecutablePath = "process.executable.path" + AttributeProcessID = "process.pid" + AttributeProcessOwner = "process.owner" + AttributeServiceInstance = "service.instance.id" + AttributeServiceName = "service.name" + AttributeServiceNamespace = "service.namespace" + AttributeServiceVersion = "service.version" + AttributeTelemetryAutoVersion = "telemetry.auto.version" + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + AttributeTelemetrySDKName = "telemetry.sdk.name" + AttributeTelemetrySDKVersion = "telemetry.sdk.version" +) + +// OpenTelemetry Semantic Convention values for Resource attribute "telemetry.sdk.language" values. +// See: https://github.com/open-telemetry/opentelemetry-specification/tree/master/specification/resource/semantic_conventions/README.md +const ( + AttributeSDKLangValueCPP = "cpp" + AttributeSDKLangValueDotNET = "dotnet" + AttributeSDKLangValueErlang = "erlang" + AttributeSDKLangValueGo = "go" + AttributeSDKLangValueJava = "java" + AttributeSDKLangValueNodeJS = "nodejs" + AttributeSDKLangValuePHP = "php" + AttributeSDKLangValuePython = "python" + AttributeSDKLangValueRuby = "ruby" + AttributeSDKLangValueWebJS = "webjs" +) + +// OpenTelemetry Semantic Convention values for Resource attribute "cloud.provider" values. +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/cloud.md +const ( + AttributeCloudProviderAWS = "aws" + AttributeCloudProviderAzure = "azure" + AttributeCloudProviderGCP = "gcp" +) + +// GetResourceSemanticConventionAttributeNames a slice with all the Resource Semantic Conventions attribute names. +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeCloudAccount, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudZone, + AttributeContainerID, + AttributeContainerImage, + AttributeContainerName, + AttributeContainerTag, + AttributeDeploymentEnvironment, + AttributeFaasID, + AttributeFaasInstance, + AttributeFaasName, + AttributeFaasVersion, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostName, + AttributeHostType, + AttributeK8sCluster, + AttributeK8sContainer, + AttributeK8sCronJob, + AttributeK8sCronJobUID, + AttributeK8sDaemonSet, + AttributeK8sDaemonSetUID, + AttributeK8sDeployment, + AttributeK8sDeploymentUID, + AttributeK8sJob, + AttributeK8sJobUID, + AttributeK8sNamespace, + AttributeK8sPod, + AttributeK8sPodUID, + AttributeK8sReplicaSet, + AttributeK8sReplicaSetUID, + AttributeK8sStatefulSet, + AttributeK8sStatefulSetUID, + AttributeOSType, + AttributeOSDescription, + AttributeProcessCommand, + AttributeProcessCommandLine, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessID, + AttributeProcessOwner, + AttributeServiceInstance, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceVersion, + AttributeTelemetryAutoVersion, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + } +} + +// OpenTelemetry Semantic Convention values for general Span attribute names. +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/span-general.md +const ( + AttributeComponent = "component" + AttributeEnduserID = "enduser.id" + AttributeEnduserRole = "enduser.role" + AttributeEnduserScope = "enduser.scope" + AttributeNetHostIP = "net.host.ip" + AttributeNetHostName = "net.host.name" + AttributeNetHostPort = "net.host.port" + AttributeNetPeerIP = "net.peer.ip" + AttributeNetPeerName = "net.peer.name" + AttributeNetPeerPort = "net.peer.port" + AttributeNetTransport = "net.transport" + AttributePeerService = "peer.service" +) + +// OpenTelemetry Semantic Convention values for component attribute values. +// Possibly being removed due to issue #336 +const ( + ComponentTypeHTTP = "http" + ComponentTypeGRPC = "grpc" +) + +// OpenTelemetry Semantic Convention attribute names for HTTP related attributes +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md +const ( + AttributeHTTPClientIP = "http.client_ip" + AttributeHTTPFlavor = "http.flavor" + AttributeHTTPHost = "http.host" + AttributeHTTPHostName = "host.name" + AttributeHTTPHostPort = "host.port" + AttributeHTTPMethod = "http.method" + AttributeHTTPRequestContentLength = "http.request_content_length" + AttributeHTTPRequestContentLengthUncompressed = "http.request_content_length_uncompressed" + AttributeHTTPResponseContentLength = "http.response_content_length" + AttributeHTTPResponseContentLengthUncompressed = "http.response_content_length_uncompressed" + AttributeHTTPRoute = "http.route" + AttributeHTTPScheme = "http.scheme" + AttributeHTTPServerName = "http.server_name" + AttributeHTTPStatusCode = "http.status_code" + AttributeHTTPStatusText = "http.status_text" + AttributeHTTPTarget = "http.target" + AttributeHTTPURL = "http.url" + AttributeHTTPUserAgent = "http.user_agent" +) + +// OpenTelemetry Semantic Convention attribute names for database related attributes +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/database.md +const ( + AttributeDBConnectionString = "db.connection_string" + + AttributeDBCassandraKeyspace = "db.cassandra.keyspace" + AttributeDBHBaseNamespace = "db.hbase.namespace" + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" + AttributeDBMongoDBCollection = "db.mongodb.collection" + AttributeDBMsSQLInstanceName = "db.mssql.instance_name" + + AttributeDBName = "db.name" + AttributeDBOperation = "db.operation" + AttributeDBRedisDatabaseIndex = "db.redis.database_index" + AttributeDBStatement = "db.statement" + AttributeDBSystem = "db.system" + AttributeDBUser = "db.user" +) + +// OpenTelemetry Semantic Convention attribute names for gRPC related attributes +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/rpc.md +const ( + AttributeMessageCompressedSize = "message.compressed_size" + AttributeMessageID = "message.id" + AttributeMessageType = "message.type" + AttributeMessageUncompressedSize = "message.uncompressed_size" + AttributeRPCMethod = "rpc.method" + AttributeRPCService = "rpc.service" + AttributeRPCSystem = "rpc.system" + EventTypeMessage = "message" + MessageTypeReceived = "RECEIVED" + MessageTypeSent = "SENT" +) + +// OpenTelemetry Semantic Convention attribute names for FaaS related attributes +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/faas.md +const ( + AttributeFaaSCron = "faas.cron" + AttributeFaaSDocumentCollection = "faas.document.collection" + AttributeFaaSDocumentName = "faas.document.name" + AttributeFaaSDocumentOperation = "faas.document.operation" + AttributeFaaSDocumentTime = "faas.document.time" + AttributeFaaSExecution = "faas.execution" + AttributeFaaSTime = "faas.time" + AttributeFaaSTrigger = "faas.trigger" + FaaSTriggerDataSource = "datasource" + FaaSTriggerHTTP = "http" + FaaSTriggerOther = "other" + FaaSTriggerPubSub = "pubsub" + FaaSTriggerTimer = "timer" +) + +// OpenTelemetry Semantic Convention attribute names for messaging system related attributes +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/messaging.md +const ( + AttributeMessagingConversationID = "messaging.conversation_id" + AttributeMessagingDestination = "messaging.destination" + AttributeMessagingDestinationKind = "messaging.destination_kind" + AttributeMessagingMessageID = "messaging.message_id" + AttributeMessagingOperation = "messaging.operation" + AttributeMessagingPayloadCompressedSize = "messaging.message_payload_compressed_size_bytes" + AttributeMessagingPayloadSize = "messaging.message_payload_size_bytes" + AttributeMessagingProtocol = "messaging.protocol" + AttributeMessagingProtocolVersion = "messaging.protocol_version" + AttributeMessagingSystem = "messaging.system" + AttributeMessagingTempDestination = "messaging.temp_destination" + AttributeMessagingURL = "messaging.url" +) + +// OpenTelemetry Semantic Convention attribute names for exceptions +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/exceptions.md +const ( + AttributeExceptionEventName = "exception" + AttributeExceptionMessage = "exception.message" + AttributeExceptionStacktrace = "exception.stacktrace" + AttributeExceptionType = "exception.type" +) diff --git a/internal/otel_collector/translator/internaldata/metrics_to_oc.go b/internal/otel_collector/translator/internaldata/metrics_to_oc.go new file mode 100644 index 00000000000..612d5de048b --- /dev/null +++ b/internal/otel_collector/translator/internaldata/metrics_to_oc.go @@ -0,0 +1,523 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "sort" + + ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/golang/protobuf/ptypes/wrappers" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" +) + +type labelKeys struct { + // ordered OC label keys + keys []*ocmetrics.LabelKey + // map from a label key literal + // to its index in the slice above + keyIndices map[string]int +} + +// MetricsToOC may be used only by OpenCensus receiver and exporter implementations. +// TODO: move this function to OpenCensus package. +func MetricsToOC(md pdata.Metrics) []consumerdata.MetricsData { + resourceMetrics := md.ResourceMetrics() + + if resourceMetrics.Len() == 0 { + return nil + } + + ocResourceMetricsList := make([]consumerdata.MetricsData, 0, resourceMetrics.Len()) + for i := 0; i < resourceMetrics.Len(); i++ { + ocResourceMetricsList = append(ocResourceMetricsList, resourceMetricsToOC(resourceMetrics.At(i))) + } + + return ocResourceMetricsList +} + +func resourceMetricsToOC(rm pdata.ResourceMetrics) consumerdata.MetricsData { + ocMetricsData := consumerdata.MetricsData{} + ocMetricsData.Node, ocMetricsData.Resource = internalResourceToOC(rm.Resource()) + ilms := rm.InstrumentationLibraryMetrics() + if ilms.Len() == 0 { + return ocMetricsData + } + // Approximate the number of the metrics as the number of the metrics in the first + // instrumentation library info. + ocMetrics := make([]*ocmetrics.Metric, 0, ilms.At(0).Metrics().Len()) + for i := 0; i < ilms.Len(); i++ { + ilm := ilms.At(i) + // TODO: Handle instrumentation library name and version. + metrics := ilm.Metrics() + for j := 0; j < metrics.Len(); j++ { + ocMetrics = append(ocMetrics, metricToOC(metrics.At(j))) + } + } + if len(ocMetrics) != 0 { + ocMetricsData.Metrics = ocMetrics + } + return ocMetricsData +} + +func metricToOC(metric pdata.Metric) *ocmetrics.Metric { + labelKeys := collectLabelKeys(metric) + return &ocmetrics.Metric{ + MetricDescriptor: descriptorToOC(metric, labelKeys), + Timeseries: dataPointsToTimeseries(metric, labelKeys), + Resource: nil, + } +} + +func collectLabelKeys(metric pdata.Metric) *labelKeys { + // NOTE: Internal data structure and OpenCensus have different representations of labels: + // - OC has a single "global" ordered list of label keys per metric in the MetricDescriptor; + // then, every data point has an ordered list of label values matching the key index. + // - Internally labels are stored independently as key-value storage for each point. + // + // So what we do in this translator: + // - Scan all points and their labels to find all label keys used across the metric, + // sort them and set in the MetricDescriptor. + // - For each point we generate an ordered list of label values, + // matching the order of label keys returned here (see `labelValuesToOC` function). + // - If the value for particular label key is missing in the point, we set it to default + // to preserve 1:1 matching between label keys and values. + + // First, collect a set of all labels present in the metric + keySet := make(map[string]struct{}) + + switch metric.DataType() { + case pdata.MetricDataTypeIntGauge: + collectLabelKeysIntDataPoints(metric.IntGauge().DataPoints(), keySet) + case pdata.MetricDataTypeDoubleGauge: + collectLabelKeysDoubleDataPoints(metric.DoubleGauge().DataPoints(), keySet) + case pdata.MetricDataTypeIntSum: + collectLabelKeysIntDataPoints(metric.IntSum().DataPoints(), keySet) + case pdata.MetricDataTypeDoubleSum: + collectLabelKeysDoubleDataPoints(metric.DoubleSum().DataPoints(), keySet) + case pdata.MetricDataTypeIntHistogram: + collectLabelKeysIntHistogramDataPoints(metric.IntHistogram().DataPoints(), keySet) + case pdata.MetricDataTypeDoubleHistogram: + collectLabelKeysDoubleHistogramDataPoints(metric.DoubleHistogram().DataPoints(), keySet) + case pdata.MetricDataTypeDoubleSummary: + collectLabelKeysDoubleSummaryDataPoints(metric.DoubleSummary().DataPoints(), keySet) + } + + if len(keySet) == 0 { + return &labelKeys{} + } + + // Sort keys: while not mandatory, this helps to make the + // output OC metric deterministic and easy to test, i.e. + // the same set of labels will always produce + // OC labels in the alphabetically sorted order. + sortedKeys := make([]string, 0, len(keySet)) + for key := range keySet { + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + + // Construct a resulting list of label keys + keys := make([]*ocmetrics.LabelKey, 0, len(sortedKeys)) + // Label values will have to match keys by index + // so this map will help with fast lookups. + indices := make(map[string]int, len(sortedKeys)) + for i, key := range sortedKeys { + keys = append(keys, &ocmetrics.LabelKey{ + Key: key, + }) + indices[key] = i + } + + return &labelKeys{ + keys: keys, + keyIndices: indices, + } +} + +func collectLabelKeysIntDataPoints(ips pdata.IntDataPointSlice, keySet map[string]struct{}) { + for i := 0; i < ips.Len(); i++ { + addLabelKeys(keySet, ips.At(i).LabelsMap()) + } +} + +func collectLabelKeysDoubleDataPoints(dps pdata.DoubleDataPointSlice, keySet map[string]struct{}) { + for i := 0; i < dps.Len(); i++ { + addLabelKeys(keySet, dps.At(i).LabelsMap()) + } +} + +func collectLabelKeysIntHistogramDataPoints(ihdp pdata.IntHistogramDataPointSlice, keySet map[string]struct{}) { + for i := 0; i < ihdp.Len(); i++ { + addLabelKeys(keySet, ihdp.At(i).LabelsMap()) + } +} + +func collectLabelKeysDoubleHistogramDataPoints(dhdp pdata.DoubleHistogramDataPointSlice, keySet map[string]struct{}) { + for i := 0; i < dhdp.Len(); i++ { + addLabelKeys(keySet, dhdp.At(i).LabelsMap()) + } +} + +func collectLabelKeysDoubleSummaryDataPoints(dhdp pdata.DoubleSummaryDataPointSlice, keySet map[string]struct{}) { + for i := 0; i < dhdp.Len(); i++ { + addLabelKeys(keySet, dhdp.At(i).LabelsMap()) + } +} + +func addLabelKeys(keySet map[string]struct{}, labels pdata.StringMap) { + labels.ForEach(func(k string, v string) { + keySet[k] = struct{}{} + }) +} + +func descriptorToOC(metric pdata.Metric, labelKeys *labelKeys) *ocmetrics.MetricDescriptor { + return &ocmetrics.MetricDescriptor{ + Name: metric.Name(), + Description: metric.Description(), + Unit: metric.Unit(), + Type: descriptorTypeToOC(metric), + LabelKeys: labelKeys.keys, + } +} + +func descriptorTypeToOC(metric pdata.Metric) ocmetrics.MetricDescriptor_Type { + switch metric.DataType() { + case pdata.MetricDataTypeIntGauge: + return ocmetrics.MetricDescriptor_GAUGE_INT64 + case pdata.MetricDataTypeDoubleGauge: + return ocmetrics.MetricDescriptor_GAUGE_DOUBLE + case pdata.MetricDataTypeIntSum: + sd := metric.IntSum() + if sd.IsMonotonic() || sd.AggregationTemporality() == pdata.AggregationTemporalityCumulative { + return ocmetrics.MetricDescriptor_CUMULATIVE_INT64 + } + return ocmetrics.MetricDescriptor_GAUGE_INT64 + case pdata.MetricDataTypeDoubleSum: + sd := metric.DoubleSum() + if sd.IsMonotonic() || sd.AggregationTemporality() == pdata.AggregationTemporalityCumulative { + return ocmetrics.MetricDescriptor_CUMULATIVE_DOUBLE + } + return ocmetrics.MetricDescriptor_GAUGE_DOUBLE + case pdata.MetricDataTypeDoubleHistogram: + hd := metric.DoubleHistogram() + if hd.AggregationTemporality() == pdata.AggregationTemporalityCumulative { + return ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION + } + return ocmetrics.MetricDescriptor_GAUGE_DISTRIBUTION + case pdata.MetricDataTypeIntHistogram: + hd := metric.IntHistogram() + if hd.AggregationTemporality() == pdata.AggregationTemporalityCumulative { + return ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION + } + return ocmetrics.MetricDescriptor_GAUGE_DISTRIBUTION + case pdata.MetricDataTypeDoubleSummary: + return ocmetrics.MetricDescriptor_SUMMARY + } + return ocmetrics.MetricDescriptor_UNSPECIFIED +} + +func dataPointsToTimeseries(metric pdata.Metric, labelKeys *labelKeys) []*ocmetrics.TimeSeries { + switch metric.DataType() { + case pdata.MetricDataTypeIntGauge: + return intPointsToOC(metric.IntGauge().DataPoints(), labelKeys) + case pdata.MetricDataTypeDoubleGauge: + return doublePointToOC(metric.DoubleGauge().DataPoints(), labelKeys) + case pdata.MetricDataTypeIntSum: + return intPointsToOC(metric.IntSum().DataPoints(), labelKeys) + case pdata.MetricDataTypeDoubleSum: + return doublePointToOC(metric.DoubleSum().DataPoints(), labelKeys) + case pdata.MetricDataTypeIntHistogram: + return intHistogramPointToOC(metric.IntHistogram().DataPoints(), labelKeys) + case pdata.MetricDataTypeDoubleHistogram: + return doubleHistogramPointToOC(metric.DoubleHistogram().DataPoints(), labelKeys) + case pdata.MetricDataTypeDoubleSummary: + return doubleSummaryPointToOC(metric.DoubleSummary().DataPoints(), labelKeys) + } + + return nil +} + +func intPointsToOC(dps pdata.IntDataPointSlice, labelKeys *labelKeys) []*ocmetrics.TimeSeries { + if dps.Len() == 0 { + return nil + } + timeseries := make([]*ocmetrics.TimeSeries, 0, dps.Len()) + for i := 0; i < dps.Len(); i++ { + ip := dps.At(i) + ts := &ocmetrics.TimeSeries{ + StartTimestamp: pdata.UnixNanoToTimestamp(ip.StartTime()), + LabelValues: labelValuesToOC(ip.LabelsMap(), labelKeys), + Points: []*ocmetrics.Point{ + { + Timestamp: pdata.UnixNanoToTimestamp(ip.Timestamp()), + Value: &ocmetrics.Point_Int64Value{ + Int64Value: ip.Value(), + }, + }, + }, + } + timeseries = append(timeseries, ts) + } + return timeseries +} + +func doublePointToOC(dps pdata.DoubleDataPointSlice, labelKeys *labelKeys) []*ocmetrics.TimeSeries { + if dps.Len() == 0 { + return nil + } + timeseries := make([]*ocmetrics.TimeSeries, 0, dps.Len()) + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + ts := &ocmetrics.TimeSeries{ + StartTimestamp: pdata.UnixNanoToTimestamp(dp.StartTime()), + LabelValues: labelValuesToOC(dp.LabelsMap(), labelKeys), + Points: []*ocmetrics.Point{ + { + Timestamp: pdata.UnixNanoToTimestamp(dp.Timestamp()), + Value: &ocmetrics.Point_DoubleValue{ + DoubleValue: dp.Value(), + }, + }, + }, + } + timeseries = append(timeseries, ts) + } + return timeseries +} + +func doubleHistogramPointToOC(dps pdata.DoubleHistogramDataPointSlice, labelKeys *labelKeys) []*ocmetrics.TimeSeries { + if dps.Len() == 0 { + return nil + } + timeseries := make([]*ocmetrics.TimeSeries, 0, dps.Len()) + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + buckets := histogramBucketsToOC(dp.BucketCounts()) + doubleExemplarsToOC(dp.ExplicitBounds(), buckets, dp.Exemplars()) + + ts := &ocmetrics.TimeSeries{ + StartTimestamp: pdata.UnixNanoToTimestamp(dp.StartTime()), + LabelValues: labelValuesToOC(dp.LabelsMap(), labelKeys), + Points: []*ocmetrics.Point{ + { + Timestamp: pdata.UnixNanoToTimestamp(dp.Timestamp()), + Value: &ocmetrics.Point_DistributionValue{ + DistributionValue: &ocmetrics.DistributionValue{ + Count: int64(dp.Count()), + Sum: dp.Sum(), + SumOfSquaredDeviation: 0, + BucketOptions: histogramExplicitBoundsToOC(dp.ExplicitBounds()), + Buckets: buckets, + }, + }, + }, + }, + } + timeseries = append(timeseries, ts) + } + return timeseries +} + +func intHistogramPointToOC(dps pdata.IntHistogramDataPointSlice, labelKeys *labelKeys) []*ocmetrics.TimeSeries { + if dps.Len() == 0 { + return nil + } + timeseries := make([]*ocmetrics.TimeSeries, 0, dps.Len()) + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + buckets := histogramBucketsToOC(dp.BucketCounts()) + intExemplarsToOC(dp.ExplicitBounds(), buckets, dp.Exemplars()) + + ts := &ocmetrics.TimeSeries{ + StartTimestamp: pdata.UnixNanoToTimestamp(dp.StartTime()), + LabelValues: labelValuesToOC(dp.LabelsMap(), labelKeys), + Points: []*ocmetrics.Point{ + { + Timestamp: pdata.UnixNanoToTimestamp(dp.Timestamp()), + Value: &ocmetrics.Point_DistributionValue{ + DistributionValue: &ocmetrics.DistributionValue{ + Count: int64(dp.Count()), + Sum: float64(dp.Sum()), + SumOfSquaredDeviation: 0, + BucketOptions: histogramExplicitBoundsToOC(dp.ExplicitBounds()), + Buckets: buckets, + }, + }, + }, + }, + } + timeseries = append(timeseries, ts) + } + return timeseries +} + +func histogramExplicitBoundsToOC(bounds []float64) *ocmetrics.DistributionValue_BucketOptions { + if len(bounds) == 0 { + return nil + } + + return &ocmetrics.DistributionValue_BucketOptions{ + Type: &ocmetrics.DistributionValue_BucketOptions_Explicit_{ + Explicit: &ocmetrics.DistributionValue_BucketOptions_Explicit{ + Bounds: bounds, + }, + }, + } +} + +func histogramBucketsToOC(bcts []uint64) []*ocmetrics.DistributionValue_Bucket { + if len(bcts) == 0 { + return nil + } + + ocBuckets := make([]*ocmetrics.DistributionValue_Bucket, 0, len(bcts)) + for _, bucket := range bcts { + ocBuckets = append(ocBuckets, &ocmetrics.DistributionValue_Bucket{ + Count: int64(bucket), + }) + } + return ocBuckets +} + +func doubleSummaryPointToOC(dps pdata.DoubleSummaryDataPointSlice, labelKeys *labelKeys) []*ocmetrics.TimeSeries { + if dps.Len() == 0 { + return nil + } + timeseries := make([]*ocmetrics.TimeSeries, 0, dps.Len()) + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + percentileValues := summaryPercentilesToOC(dp.QuantileValues()) + + ts := &ocmetrics.TimeSeries{ + StartTimestamp: pdata.UnixNanoToTimestamp(dp.StartTime()), + LabelValues: labelValuesToOC(dp.LabelsMap(), labelKeys), + Points: []*ocmetrics.Point{ + { + Timestamp: pdata.UnixNanoToTimestamp(dp.Timestamp()), + Value: &ocmetrics.Point_SummaryValue{ + SummaryValue: &ocmetrics.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: dp.Sum()}, + Count: &wrappers.Int64Value{Value: int64(dp.Count())}, + Snapshot: &ocmetrics.SummaryValue_Snapshot{ + PercentileValues: percentileValues, + }, + }, + }, + }, + }, + } + timeseries = append(timeseries, ts) + } + return timeseries +} + +func summaryPercentilesToOC(qtls pdata.ValueAtQuantileSlice) []*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile { + if qtls.Len() == 0 { + return nil + } + + ocPercentiles := make([]*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile, 0, qtls.Len()) + for i := 0; i < qtls.Len(); i++ { + quantile := qtls.At(i) + ocPercentiles = append(ocPercentiles, &ocmetrics.SummaryValue_Snapshot_ValueAtPercentile{ + Percentile: quantile.Quantile() * 100, + Value: quantile.Value(), + }) + } + return ocPercentiles +} + +func doubleExemplarsToOC(bounds []float64, ocBuckets []*ocmetrics.DistributionValue_Bucket, exemplars pdata.DoubleExemplarSlice) { + if exemplars.Len() == 0 { + return + } + + for i := 0; i < exemplars.Len(); i++ { + exemplar := exemplars.At(i) + val := exemplar.Value() + pos := 0 + for ; pos < len(bounds); pos++ { + if val > bounds[pos] { + continue + } + break + } + ocBuckets[pos].Exemplar = exemplarToOC(exemplar.FilteredLabels(), val, exemplar.Timestamp()) + } +} + +func intExemplarsToOC(bounds []float64, ocBuckets []*ocmetrics.DistributionValue_Bucket, exemplars pdata.IntExemplarSlice) { + if exemplars.Len() == 0 { + return + } + + for i := 0; i < exemplars.Len(); i++ { + exemplar := exemplars.At(i) + val := float64(exemplar.Value()) + pos := 0 + for ; pos < len(bounds); pos++ { + if val > bounds[pos] { + continue + } + break + } + ocBuckets[pos].Exemplar = exemplarToOC(exemplar.FilteredLabels(), val, exemplar.Timestamp()) + } +} + +func exemplarToOC(filteredLabels pdata.StringMap, value float64, timestamp pdata.TimestampUnixNano) *ocmetrics.DistributionValue_Exemplar { + var labels map[string]string + if filteredLabels.Len() != 0 { + labels = make(map[string]string, filteredLabels.Len()) + filteredLabels.ForEach(func(k string, v string) { + labels[k] = v + }) + } + + return &ocmetrics.DistributionValue_Exemplar{ + Value: value, + Timestamp: pdata.UnixNanoToTimestamp(timestamp), + Attachments: labels, + } +} + +func labelValuesToOC(labels pdata.StringMap, labelKeys *labelKeys) []*ocmetrics.LabelValue { + if len(labelKeys.keys) == 0 { + return nil + } + + // Initialize label values with defaults + // (The order matches key indices) + labelValuesOrig := make([]ocmetrics.LabelValue, len(labelKeys.keys)) + labelValues := make([]*ocmetrics.LabelValue, len(labelKeys.keys)) + for i := 0; i < len(labelKeys.keys); i++ { + labelValues[i] = &labelValuesOrig[i] + } + + // Visit all defined labels in the point and override defaults with actual values + labels.ForEach(func(k string, v string) { + // Find the appropriate label value that we need to update + keyIndex := labelKeys.keyIndices[k] + labelValue := labelValues[keyIndex] + + // Update label value + labelValue.Value = v + labelValue.HasValue = true + }) + + return labelValues +} diff --git a/internal/otel_collector/translator/internaldata/metrics_to_oc_test.go b/internal/otel_collector/translator/internaldata/metrics_to_oc_test.go new file mode 100644 index 00000000000..e6488e9666c --- /dev/null +++ b/internal/otel_collector/translator/internaldata/metrics_to_oc_test.go @@ -0,0 +1,184 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "testing" + "time" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +func TestMetricsToOC(t *testing.T) { + sampleMetricData := testdata.GeneratMetricsAllTypesWithSampleDatapoints() + attrs := sampleMetricData.ResourceMetrics().At(0).Resource().Attributes() + attrs.Upsert(conventions.AttributeHostName, pdata.NewAttributeValueString("host1")) + attrs.Upsert(conventions.OCAttributeProcessID, pdata.NewAttributeValueInt(123)) + attrs.Upsert(conventions.OCAttributeProcessStartTime, pdata.NewAttributeValueString("2020-02-11T20:26:00Z")) + attrs.Upsert(conventions.AttributeTelemetrySDKLanguage, pdata.NewAttributeValueString("cpp")) + attrs.Upsert(conventions.AttributeTelemetrySDKVersion, pdata.NewAttributeValueString("v2.0.1")) + attrs.Upsert(conventions.OCAttributeExporterVersion, pdata.NewAttributeValueString("v1.2.0")) + + tests := []struct { + name string + internal pdata.Metrics + oc []consumerdata.MetricsData + }{ + { + name: "empty", + internal: testdata.GenerateMetricsEmpty(), + oc: []consumerdata.MetricsData(nil), + }, + + { + name: "one-empty-resource-metrics", + internal: testdata.GenerateMetricsOneEmptyResourceMetrics(), + oc: []consumerdata.MetricsData{ + {}, + }, + }, + + { + name: "no-libraries", + internal: testdata.GenerateMetricsNoLibraries(), + oc: []consumerdata.MetricsData{ + generateOCTestDataNoMetrics(), + }, + }, + + { + name: "one-empty-instrumentation-library", + internal: testdata.GenerateMetricsOneEmptyInstrumentationLibrary(), + oc: []consumerdata.MetricsData{ + generateOCTestDataNoMetrics(), + }, + }, + + { + name: "one-metric-no-resource", + internal: testdata.GenerateMetricsOneMetricNoResource(), + oc: []consumerdata.MetricsData{ + { + Metrics: []*ocmetrics.Metric{ + generateOCTestMetricInt(), + }, + }, + }, + }, + + { + name: "one-metric", + internal: testdata.GenerateMetricsOneMetric(), + oc: []consumerdata.MetricsData{ + generateOCTestDataMetricsOneMetric(), + }, + }, + + { + name: "one-metric-no-labels", + internal: testdata.GenerateMetricsOneMetricNoLabels(), + oc: []consumerdata.MetricsData{ + generateOCTestDataNoLabels(), + }, + }, + + { + name: "all-types-no-data-points", + internal: testdata.GenerateMetricsAllTypesNoDataPoints(), + oc: []consumerdata.MetricsData{ + generateOCTestDataNoPoints(), + }, + }, + + { + name: "sample-metric", + internal: sampleMetricData, + oc: []consumerdata.MetricsData{ + generateOCTestData(), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := MetricsToOC(test.internal) + assert.EqualValues(t, test.oc, got) + }) + } +} + +func TestMetricsToOC_InvalidDataType(t *testing.T) { + internal := testdata.GenerateMetricsMetricTypeInvalid() + want := []consumerdata.MetricsData{ + { + Node: &occommon.Node{}, + Resource: &ocresource.Resource{ + Labels: map[string]string{"resource-attr": "resource-attr-val-1"}, + }, + Metrics: []*ocmetrics.Metric{ + { + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestCounterIntMetricName, + Unit: "1", + Type: ocmetrics.MetricDescriptor_UNSPECIFIED, + LabelKeys: nil, + }, + }, + }, + }, + } + got := MetricsToOC(internal) + assert.EqualValues(t, want, got) +} + +func generateOCTestData() consumerdata.MetricsData { + ts := timestamppb.New(time.Date(2020, 2, 11, 20, 26, 0, 0, time.UTC)) + + return consumerdata.MetricsData{ + Node: &occommon.Node{ + Identifier: &occommon.ProcessIdentifier{ + HostName: "host1", + Pid: 123, + StartTimestamp: ts, + }, + LibraryInfo: &occommon.LibraryInfo{ + Language: occommon.LibraryInfo_CPP, + ExporterVersion: "v1.2.0", + CoreLibraryVersion: "v2.0.1", + }, + }, + Resource: &ocresource.Resource{ + Labels: map[string]string{ + "resource-attr": "resource-attr-val-1", + }, + }, + Metrics: []*ocmetrics.Metric{ + generateOCTestMetricInt(), + generateOCTestMetricDouble(), + generateOCTestMetricDoubleHistogram(), + generateOCTestMetricIntHistogram(), + generateOCTestMetricDoubleSummary(), + }, + } +} diff --git a/internal/otel_collector/translator/internaldata/oc_testdata_test.go b/internal/otel_collector/translator/internaldata/oc_testdata_test.go new file mode 100644 index 00000000000..1fffff2471f --- /dev/null +++ b/internal/otel_collector/translator/internaldata/oc_testdata_test.go @@ -0,0 +1,542 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "time" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +func generateOCTestDataNoMetrics() consumerdata.MetricsData { + return consumerdata.MetricsData{ + Node: &occommon.Node{}, + Resource: &ocresource.Resource{ + Labels: map[string]string{"resource-attr": "resource-attr-val-1"}, + }, + } +} + +func generateOCTestDataNoPoints() consumerdata.MetricsData { + return consumerdata.MetricsData{ + Node: &occommon.Node{}, + Resource: &ocresource.Resource{ + Labels: map[string]string{"resource-attr": "resource-attr-val-1"}, + }, + Metrics: []*ocmetrics.Metric{ + { + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestGaugeDoubleMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_GAUGE_DOUBLE, + }, + }, + { + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestGaugeIntMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_GAUGE_INT64, + }, + }, + { + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestCounterDoubleMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_CUMULATIVE_DOUBLE, + }, + }, + { + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestCounterIntMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_CUMULATIVE_INT64, + }, + }, + { + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestDoubleHistogramMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + }, + }, + { + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestIntHistogramMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + }, + }, + { + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestDoubleSummaryMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_SUMMARY, + }, + }, + }, + } +} + +func generateOCTestDataNoLabels() consumerdata.MetricsData { + m := generateOCTestMetricInt() + m.MetricDescriptor.LabelKeys = nil + m.Timeseries[0].LabelValues = nil + m.Timeseries[1].LabelValues = nil + return consumerdata.MetricsData{ + Node: &occommon.Node{}, + Resource: &ocresource.Resource{ + Labels: map[string]string{"resource-attr": "resource-attr-val-1"}, + }, + Metrics: []*ocmetrics.Metric{m}, + } +} + +func generateOCTestDataMetricsOneMetric() consumerdata.MetricsData { + return consumerdata.MetricsData{ + Node: &occommon.Node{}, + Resource: &ocresource.Resource{ + Labels: map[string]string{"resource-attr": "resource-attr-val-1"}, + }, + Metrics: []*ocmetrics.Metric{generateOCTestMetricInt()}, + } +} + +func generateOCTestDataMetricsOneMetricOneNil() consumerdata.MetricsData { + return consumerdata.MetricsData{ + Node: &occommon.Node{}, + Resource: &ocresource.Resource{ + Labels: map[string]string{"resource-attr": "resource-attr-val-1"}, + }, + Metrics: []*ocmetrics.Metric{generateOCTestMetricInt(), nil}, + } +} + +func generateOCTestDataMetricsOneMetricOneNilTimeseries() consumerdata.MetricsData { + m := generateOCTestMetricInt() + m.Timeseries = append(m.Timeseries, nil) + return consumerdata.MetricsData{ + Node: &occommon.Node{}, + Resource: &ocresource.Resource{ + Labels: map[string]string{"resource-attr": "resource-attr-val-1"}, + }, + Metrics: []*ocmetrics.Metric{m}, + } +} + +func generateOCTestDataMetricsOneMetricOneNilPoint() consumerdata.MetricsData { + m := generateOCTestMetricInt() + m.Timeseries[0].Points = append(m.Timeseries[0].Points, nil) + return consumerdata.MetricsData{ + Node: &occommon.Node{}, + Resource: &ocresource.Resource{ + Labels: map[string]string{"resource-attr": "resource-attr-val-1"}, + }, + Metrics: []*ocmetrics.Metric{m}, + } +} + +func generateOCTestMetricInt() *ocmetrics.Metric { + return &ocmetrics.Metric{ + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestCounterIntMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_CUMULATIVE_INT64, + LabelKeys: []*ocmetrics.LabelKey{ + {Key: testdata.TestLabelKey1}, + {Key: testdata.TestLabelKey2}, + }, + }, + Timeseries: []*ocmetrics.TimeSeries{ + { + StartTimestamp: timestamppb.New(testdata.TestMetricStartTime), + LabelValues: []*ocmetrics.LabelValue{ + { + // key1 + Value: testdata.TestLabelValue1, + HasValue: true, + }, + { + // key2 + HasValue: false, + }, + }, + Points: []*ocmetrics.Point{ + { + Timestamp: timestamppb.New(testdata.TestMetricTime), + Value: &ocmetrics.Point_Int64Value{ + Int64Value: 123, + }, + }, + }, + }, + { + StartTimestamp: timestamppb.New(testdata.TestMetricStartTime), + LabelValues: []*ocmetrics.LabelValue{ + { + // key1 + HasValue: false, + }, + { + // key2 + Value: testdata.TestLabelValue2, + HasValue: true, + }, + }, + Points: []*ocmetrics.Point{ + { + Timestamp: timestamppb.New(testdata.TestMetricTime), + Value: &ocmetrics.Point_Int64Value{ + Int64Value: 456, + }, + }, + }, + }, + }, + } +} + +func generateOCTestMetricDouble() *ocmetrics.Metric { + return &ocmetrics.Metric{ + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: "counter-double", + Unit: "1", + Type: ocmetrics.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*ocmetrics.LabelKey{ + {Key: testdata.TestLabelKey1}, + {Key: testdata.TestLabelKey2}, + {Key: testdata.TestLabelKey3}, + }, + }, + Timeseries: []*ocmetrics.TimeSeries{ + { + StartTimestamp: timestamppb.New(testdata.TestMetricStartTime), + LabelValues: []*ocmetrics.LabelValue{ + { + // key1 + Value: testdata.TestLabelValue1, + HasValue: true, + }, + { + // key2 + Value: testdata.TestLabelValue2, + HasValue: true, + }, + { + // key3 + HasValue: false, + }, + }, + Points: []*ocmetrics.Point{ + { + Timestamp: timestamppb.New(testdata.TestMetricTime), + Value: &ocmetrics.Point_DoubleValue{ + DoubleValue: 1.23, + }, + }, + }, + }, + { + StartTimestamp: timestamppb.New(testdata.TestMetricStartTime), + LabelValues: []*ocmetrics.LabelValue{ + { + // key1 + Value: testdata.TestLabelValue1, + HasValue: true, + }, + { + // key2 + HasValue: false, + }, + { + // key3 + Value: testdata.TestLabelValue3, + HasValue: true, + }, + }, + Points: []*ocmetrics.Point{ + { + Timestamp: timestamppb.New(testdata.TestMetricTime), + Value: &ocmetrics.Point_DoubleValue{ + DoubleValue: 4.56, + }, + }, + }, + }, + }, + } +} + +func generateOCTestMetricDoubleHistogram() *ocmetrics.Metric { + return &ocmetrics.Metric{ + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestDoubleHistogramMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*ocmetrics.LabelKey{ + {Key: testdata.TestLabelKey1}, + {Key: testdata.TestLabelKey2}, + {Key: testdata.TestLabelKey3}, + }, + }, + Timeseries: []*ocmetrics.TimeSeries{ + { + StartTimestamp: timestamppb.New(testdata.TestMetricStartTime), + LabelValues: []*ocmetrics.LabelValue{ + { + // key1 + Value: testdata.TestLabelValue1, + HasValue: true, + }, + { + // key2 + HasValue: false, + }, + { + // key3 + Value: testdata.TestLabelValue3, + HasValue: true, + }, + }, + Points: []*ocmetrics.Point{ + { + Timestamp: timestamppb.New(testdata.TestMetricTime), + Value: &ocmetrics.Point_DistributionValue{ + DistributionValue: &ocmetrics.DistributionValue{ + Count: 1, + Sum: 15, + }, + }, + }, + }, + }, + { + StartTimestamp: timestamppb.New(testdata.TestMetricStartTime), + LabelValues: []*ocmetrics.LabelValue{ + { + // key1 + HasValue: false, + }, + { + // key2 + Value: testdata.TestLabelValue2, + HasValue: true, + }, + { + // key3 + HasValue: false, + }, + }, + Points: []*ocmetrics.Point{ + { + Timestamp: timestamppb.New(testdata.TestMetricTime), + Value: &ocmetrics.Point_DistributionValue{ + DistributionValue: &ocmetrics.DistributionValue{ + Count: 1, + Sum: 15, + BucketOptions: &ocmetrics.DistributionValue_BucketOptions{ + Type: &ocmetrics.DistributionValue_BucketOptions_Explicit_{ + Explicit: &ocmetrics.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{1}, + }, + }, + }, + Buckets: []*ocmetrics.DistributionValue_Bucket{ + { + Count: 0, + }, + { + Count: 1, + Exemplar: &ocmetrics.DistributionValue_Exemplar{ + Timestamp: timestamppb.New(testdata.TestMetricExemplarTime), + Value: 15, + Attachments: map[string]string{testdata.TestAttachmentKey: testdata.TestAttachmentValue}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func generateOCTestMetricIntHistogram() *ocmetrics.Metric { + m := generateOCTestMetricDoubleHistogram() + m.MetricDescriptor.Name = testdata.TestIntHistogramMetricName + return m +} + +func generateOCTestMetricDoubleSummary() *ocmetrics.Metric { + return &ocmetrics.Metric{ + MetricDescriptor: &ocmetrics.MetricDescriptor{ + Name: testdata.TestDoubleSummaryMetricName, + Description: "", + Unit: "1", + Type: ocmetrics.MetricDescriptor_SUMMARY, + LabelKeys: []*ocmetrics.LabelKey{ + {Key: testdata.TestLabelKey1}, + {Key: testdata.TestLabelKey2}, + {Key: testdata.TestLabelKey3}, + }, + }, + Timeseries: []*ocmetrics.TimeSeries{ + { + StartTimestamp: timestamppb.New(testdata.TestMetricStartTime), + LabelValues: []*ocmetrics.LabelValue{ + { + // key1 + Value: testdata.TestLabelValue1, + HasValue: true, + }, + { + // key2 + HasValue: false, + }, + { + // key3 + Value: testdata.TestLabelValue3, + HasValue: true, + }, + }, + Points: []*ocmetrics.Point{ + { + Timestamp: timestamppb.New(testdata.TestMetricTime), + Value: &ocmetrics.Point_SummaryValue{ + SummaryValue: &ocmetrics.SummaryValue{ + Count: &wrapperspb.Int64Value{ + Value: 1, + }, + Sum: &wrapperspb.DoubleValue{ + Value: 15, + }, + Snapshot: &ocmetrics.SummaryValue_Snapshot{ + PercentileValues: nil, + }, + }, + }, + }, + }, + }, + { + StartTimestamp: timestamppb.New(testdata.TestMetricStartTime), + LabelValues: []*ocmetrics.LabelValue{ + { + // key1 + HasValue: false, + }, + { + // key2 + Value: testdata.TestLabelValue2, + HasValue: true, + }, + { + // key3 + HasValue: false, + }, + }, + Points: []*ocmetrics.Point{ + { + Timestamp: timestamppb.New(testdata.TestMetricTime), + Value: &ocmetrics.Point_SummaryValue{ + SummaryValue: &ocmetrics.SummaryValue{ + Count: &wrapperspb.Int64Value{ + Value: 1, + }, + Sum: &wrapperspb.DoubleValue{ + Value: 15, + }, + Snapshot: &ocmetrics.SummaryValue_Snapshot{ + PercentileValues: []*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile{ + { + Percentile: 1, + Value: 15, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func generateResourceWithOcNodeAndResource() pdata.Resource { + resource := pdata.NewResource() + resource.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + conventions.OCAttributeProcessStartTime: pdata.NewAttributeValueString("2020-02-11T20:26:00Z"), + conventions.AttributeHostName: pdata.NewAttributeValueString("host1"), + conventions.OCAttributeProcessID: pdata.NewAttributeValueInt(123), + conventions.AttributeTelemetrySDKVersion: pdata.NewAttributeValueString("v2.0.1"), + conventions.OCAttributeExporterVersion: pdata.NewAttributeValueString("v1.2.0"), + conventions.AttributeTelemetrySDKLanguage: pdata.NewAttributeValueString("cpp"), + conventions.OCAttributeResourceType: pdata.NewAttributeValueString("good-resource"), + "node-str-attr": pdata.NewAttributeValueString("node-str-attr-val"), + "resource-str-attr": pdata.NewAttributeValueString("resource-str-attr-val"), + "resource-int-attr": pdata.NewAttributeValueInt(123), + }) + return resource +} + +func generateOcNode() *occommon.Node { + ts := timestamppb.New(time.Date(2020, 2, 11, 20, 26, 0, 0, time.UTC)) + + return &occommon.Node{ + Identifier: &occommon.ProcessIdentifier{ + HostName: "host1", + Pid: 123, + StartTimestamp: ts, + }, + LibraryInfo: &occommon.LibraryInfo{ + Language: occommon.LibraryInfo_CPP, + ExporterVersion: "v1.2.0", + CoreLibraryVersion: "v2.0.1", + }, + Attributes: map[string]string{ + "node-str-attr": "node-str-attr-val", + }, + } +} + +func generateOcResource() *ocresource.Resource { + return &ocresource.Resource{ + Type: "good-resource", + Labels: map[string]string{ + "resource-str-attr": "resource-str-attr-val", + "resource-int-attr": "123", + }, + } +} diff --git a/internal/otel_collector/translator/internaldata/oc_to_metrics.go b/internal/otel_collector/translator/internaldata/oc_to_metrics.go new file mode 100644 index 00000000000..3233f4d4e59 --- /dev/null +++ b/internal/otel_collector/translator/internaldata/oc_to_metrics.go @@ -0,0 +1,434 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// OCSliceToMetricData converts a slice of OC data format to data.MetricData. +// Deprecated: use pdata.Metrics instead. +func OCSliceToMetrics(ocmds []consumerdata.MetricsData) pdata.Metrics { + metricData := pdata.NewMetrics() + if len(ocmds) == 0 { + return metricData + } + for _, ocmd := range ocmds { + appendOcToMetrics(ocmd, metricData) + } + return metricData +} + +// OCToMetricData converts OC data format to data.MetricData. +// Deprecated: use pdata.Metrics instead. OCToMetrics may be used only by OpenCensus +// receiver and exporter implementations. +func OCToMetrics(md consumerdata.MetricsData) pdata.Metrics { + metricData := pdata.NewMetrics() + appendOcToMetrics(md, metricData) + return metricData +} + +func appendOcToMetrics(md consumerdata.MetricsData, dest pdata.Metrics) { + if md.Node == nil && md.Resource == nil && len(md.Metrics) == 0 { + return + } + + rms := dest.ResourceMetrics() + initialRmsLen := rms.Len() + + if len(md.Metrics) == 0 { + // At least one of the md.Node or md.Resource is not nil. Set the resource and return. + rms.Resize(initialRmsLen + 1) + ocNodeResourceToInternal(md.Node, md.Resource, rms.At(initialRmsLen).Resource()) + return + } + + // We may need to split OC metrics into several ResourceMetrics. OC metrics can have a + // Resource field inside them set to nil which indicates they use the Resource + // specified in "md.Resource", or they can have the Resource field inside them set + // to non-nil which indicates they have overridden Resource field and "md.Resource" + // does not apply to those metrics. + // + // Each OC metric that has its own Resource field set to non-nil must be placed in a + // separate ResourceMetrics instance, containing only that metric. All other OC Metrics + // that have nil Resource field must be placed in one other ResourceMetrics instance, + // which will gets its Resource field from "md.Resource". + // + // We will end up with with one or more ResourceMetrics like this: + // + // ResourceMetrics ResourceMetrics ResourceMetrics + // +-------+-------+---+-------+ +--------------+ +--------------+ + // |Metric1|Metric2|...|MetricM| |Metric | |Metric | ... + // +-------+-------+---+-------+ +--------------+ +--------------+ + + // Count the number of metrics that have nil Resource and need to be combined + // in one slice. + combinedMetricCount := 0 + distinctResourceCount := 0 + for _, ocMetric := range md.Metrics { + if ocMetric == nil { + // Skip nil metrics. + continue + } + if ocMetric.Resource == nil { + combinedMetricCount++ + } else { + distinctResourceCount++ + } + } + // Total number of resources is equal to: + // initial + numMetricsWithResource + (optional) 1 + resourceCount := initialRmsLen + distinctResourceCount + if combinedMetricCount > 0 { + // +1 for all metrics with nil resource + resourceCount++ + } + rms.Resize(resourceCount) + + // Translate "combinedMetrics" first + + if combinedMetricCount > 0 { + rm0 := rms.At(initialRmsLen) + ocNodeResourceToInternal(md.Node, md.Resource, rm0.Resource()) + + // Allocate a slice for metrics that need to be combined into first ResourceMetrics. + ilms := rm0.InstrumentationLibraryMetrics() + ilms.Resize(1) + combinedMetrics := ilms.At(0).Metrics() + combinedMetrics.Resize(combinedMetricCount) + + // Index to next available slot in "combinedMetrics" slice. + combinedMetricIdx := 0 + + for _, ocMetric := range md.Metrics { + if ocMetric == nil { + // Skip nil metrics. + continue + } + + if ocMetric.Resource != nil { + continue // Those are processed separately below. + } + + // Add the metric to the "combinedMetrics". combinedMetrics length is equal + // to combinedMetricCount. The loop above that calculates combinedMetricCount + // has exact same conditions as we have here in this loop. + ocMetricToMetrics(ocMetric, combinedMetrics.At(combinedMetricIdx)) + combinedMetricIdx++ + } + } + + // Translate distinct metrics + + resourceMetricIdx := 0 + if combinedMetricCount > 0 { + // First resourcemetric is used for the default resource, so start with 1. + resourceMetricIdx = 1 + } + for _, ocMetric := range md.Metrics { + if ocMetric == nil { + // Skip nil metrics. + continue + } + + if ocMetric.Resource == nil { + continue // Already processed above. + } + + // This metric has a different Resource and must be placed in a different + // ResourceMetrics instance. Create a separate ResourceMetrics item just for this metric + // and store at resourceMetricIdx. + ocMetricToResourceMetrics(ocMetric, md.Node, rms.At(initialRmsLen+resourceMetricIdx)) + resourceMetricIdx++ + } +} + +func ocMetricToResourceMetrics(ocMetric *ocmetrics.Metric, node *occommon.Node, out pdata.ResourceMetrics) { + ocNodeResourceToInternal(node, ocMetric.Resource, out.Resource()) + ilms := out.InstrumentationLibraryMetrics() + ilms.Resize(1) + metrics := ilms.At(0).Metrics() + metrics.Resize(1) + ocMetricToMetrics(ocMetric, metrics.At(0)) +} + +func ocMetricToMetrics(ocMetric *ocmetrics.Metric, metric pdata.Metric) { + ocDescriptor := ocMetric.GetMetricDescriptor() + if ocDescriptor == nil { + pdata.NewMetric().CopyTo(metric) + return + } + + descriptorType := descriptorTypeToMetrics(ocDescriptor.Type, metric) + if descriptorType == pdata.MetricDataTypeNone { + pdata.NewMetric().CopyTo(metric) + return + } + + metric.SetDescription(ocDescriptor.GetDescription()) + metric.SetName(ocDescriptor.GetName()) + metric.SetUnit(ocDescriptor.GetUnit()) + + setDataPoints(ocMetric, metric) +} + +func descriptorTypeToMetrics(t ocmetrics.MetricDescriptor_Type, metric pdata.Metric) pdata.MetricDataType { + switch t { + case ocmetrics.MetricDescriptor_GAUGE_INT64: + metric.SetDataType(pdata.MetricDataTypeIntGauge) + return pdata.MetricDataTypeIntGauge + case ocmetrics.MetricDescriptor_GAUGE_DOUBLE: + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + return pdata.MetricDataTypeDoubleGauge + case ocmetrics.MetricDescriptor_CUMULATIVE_INT64: + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return pdata.MetricDataTypeIntSum + case ocmetrics.MetricDescriptor_CUMULATIVE_DOUBLE: + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + sum := metric.DoubleSum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return pdata.MetricDataTypeDoubleSum + case ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION: + metric.SetDataType(pdata.MetricDataTypeDoubleHistogram) + histo := metric.DoubleHistogram() + histo.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return pdata.MetricDataTypeDoubleHistogram + case ocmetrics.MetricDescriptor_SUMMARY: + metric.SetDataType(pdata.MetricDataTypeDoubleSummary) + // no temporality specified for summary metric + return pdata.MetricDataTypeDoubleSummary + } + return pdata.MetricDataTypeNone +} + +// setDataPoints converts OC timeseries to internal datapoints based on metric type +func setDataPoints(ocMetric *ocmetrics.Metric, metric pdata.Metric) { + switch metric.DataType() { + case pdata.MetricDataTypeIntGauge: + fillIntDataPoint(ocMetric, metric.IntGauge().DataPoints()) + case pdata.MetricDataTypeDoubleGauge: + fillDoubleDataPoint(ocMetric, metric.DoubleGauge().DataPoints()) + case pdata.MetricDataTypeIntSum: + fillIntDataPoint(ocMetric, metric.IntSum().DataPoints()) + case pdata.MetricDataTypeDoubleSum: + fillDoubleDataPoint(ocMetric, metric.DoubleSum().DataPoints()) + case pdata.MetricDataTypeDoubleHistogram: + fillDoubleHistogramDataPoint(ocMetric, metric.DoubleHistogram().DataPoints()) + case pdata.MetricDataTypeDoubleSummary: + fillDoubleSummaryDataPoint(ocMetric, metric.DoubleSummary().DataPoints()) + } +} + +func setLabelsMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocmetrics.LabelValue, labelsMap pdata.StringMap) { + if len(ocLabelsKeys) == 0 || len(ocLabelValues) == 0 { + return + } + + lablesCount := len(ocLabelsKeys) + + // Handle invalid length of OC label values list + if len(ocLabelValues) < lablesCount { + lablesCount = len(ocLabelValues) + } + + labelsMap.InitEmptyWithCapacity(lablesCount) + for i := 0; i < lablesCount; i++ { + if !ocLabelValues[i].GetHasValue() { + continue + } + labelsMap.Insert(ocLabelsKeys[i].Key, ocLabelValues[i].Value) + } +} + +func fillIntDataPoint(ocMetric *ocmetrics.Metric, dps pdata.IntDataPointSlice) { + ocPointsCount := getPointsCount(ocMetric) + dps.Resize(ocPointsCount) + ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() + pos := 0 + for _, timeseries := range ocMetric.GetTimeseries() { + if timeseries == nil { + continue + } + startTimestamp := pdata.TimestampToUnixNano(timeseries.GetStartTimestamp()) + + for _, point := range timeseries.GetPoints() { + if point == nil { + continue + } + + dp := dps.At(pos) + pos++ + + dp.SetStartTime(startTimestamp) + dp.SetTimestamp(pdata.TimestampToUnixNano(point.GetTimestamp())) + setLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap()) + dp.SetValue(point.GetInt64Value()) + } + } + dps.Resize(pos) +} + +func fillDoubleDataPoint(ocMetric *ocmetrics.Metric, dps pdata.DoubleDataPointSlice) { + ocPointsCount := getPointsCount(ocMetric) + dps.Resize(ocPointsCount) + ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() + pos := 0 + for _, timeseries := range ocMetric.GetTimeseries() { + if timeseries == nil { + continue + } + startTimestamp := pdata.TimestampToUnixNano(timeseries.GetStartTimestamp()) + + for _, point := range timeseries.GetPoints() { + if point == nil { + continue + } + + dp := dps.At(pos) + pos++ + + dp.SetStartTime(startTimestamp) + dp.SetTimestamp(pdata.TimestampToUnixNano(point.GetTimestamp())) + setLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap()) + dp.SetValue(point.GetDoubleValue()) + } + } + dps.Resize(pos) +} + +func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.DoubleHistogramDataPointSlice) { + ocPointsCount := getPointsCount(ocMetric) + dps.Resize(ocPointsCount) + ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() + pos := 0 + for _, timeseries := range ocMetric.GetTimeseries() { + if timeseries == nil { + continue + } + startTimestamp := pdata.TimestampToUnixNano(timeseries.GetStartTimestamp()) + + for _, point := range timeseries.GetPoints() { + if point == nil { + continue + } + + dp := dps.At(pos) + pos++ + + dp.SetStartTime(startTimestamp) + dp.SetTimestamp(pdata.TimestampToUnixNano(point.GetTimestamp())) + setLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap()) + distributionValue := point.GetDistributionValue() + dp.SetSum(distributionValue.GetSum()) + dp.SetCount(uint64(distributionValue.GetCount())) + ocHistogramBucketsToMetrics(distributionValue.GetBuckets(), dp) + dp.SetExplicitBounds(distributionValue.GetBucketOptions().GetExplicit().GetBounds()) + } + } + dps.Resize(pos) +} + +func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.DoubleSummaryDataPointSlice) { + ocPointsCount := getPointsCount(ocMetric) + dps.Resize(ocPointsCount) + ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() + pos := 0 + for _, timeseries := range ocMetric.GetTimeseries() { + if timeseries == nil { + continue + } + startTimestamp := pdata.TimestampToUnixNano(timeseries.GetStartTimestamp()) + + for _, point := range timeseries.GetPoints() { + if point == nil { + continue + } + + dp := dps.At(pos) + pos++ + + dp.SetStartTime(startTimestamp) + dp.SetTimestamp(pdata.TimestampToUnixNano(point.GetTimestamp())) + setLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap()) + summaryValue := point.GetSummaryValue() + dp.SetSum(summaryValue.GetSum().GetValue()) + dp.SetCount(uint64(summaryValue.GetCount().GetValue())) + ocSummaryPercentilesToMetrics(summaryValue.GetSnapshot().GetPercentileValues(), dp) + } + } + dps.Resize(pos) +} + +func ocHistogramBucketsToMetrics(ocBuckets []*ocmetrics.DistributionValue_Bucket, dp pdata.DoubleHistogramDataPoint) { + if len(ocBuckets) == 0 { + return + } + buckets := make([]uint64, len(ocBuckets)) + for i := range buckets { + buckets[i] = uint64(ocBuckets[i].GetCount()) + if ocBuckets[i].GetExemplar() != nil { + exemplar := pdata.NewDoubleExemplar() + exemplarToMetrics(ocBuckets[i].GetExemplar(), exemplar) + dp.Exemplars().Append(exemplar) + } + } + dp.SetBucketCounts(buckets) +} + +func ocSummaryPercentilesToMetrics(ocPercentiles []*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile, dp pdata.DoubleSummaryDataPoint) { + if len(ocPercentiles) == 0 { + return + } + + quantiles := pdata.NewValueAtQuantileSlice() + quantiles.Resize(len(ocPercentiles)) + + for i, percentile := range ocPercentiles { + quantiles.At(i).SetQuantile(percentile.GetPercentile() / 100) + quantiles.At(i).SetValue(percentile.GetValue()) + } + + quantiles.CopyTo(dp.QuantileValues()) +} + +func exemplarToMetrics(ocExemplar *ocmetrics.DistributionValue_Exemplar, exemplar pdata.DoubleExemplar) { + if ocExemplar.GetTimestamp() != nil { + exemplar.SetTimestamp(pdata.TimestampToUnixNano(ocExemplar.GetTimestamp())) + } + exemplar.SetValue(ocExemplar.GetValue()) + attachments := exemplar.FilteredLabels() + ocAttachments := ocExemplar.GetAttachments() + attachments.InitEmptyWithCapacity(len(ocAttachments)) + for k, v := range ocAttachments { + attachments.Upsert(k, v) + } +} + +func getPointsCount(ocMetric *ocmetrics.Metric) int { + timeseriesSlice := ocMetric.GetTimeseries() + var count int + for _, timeseries := range timeseriesSlice { + count += len(timeseries.GetPoints()) + } + return count +} diff --git a/internal/otel_collector/translator/internaldata/oc_to_metrics_test.go b/internal/otel_collector/translator/internaldata/oc_to_metrics_test.go new file mode 100644 index 00000000000..b4d989659ba --- /dev/null +++ b/internal/otel_collector/translator/internaldata/oc_to_metrics_test.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "testing" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestOCToMetrics(t *testing.T) { + // From OC we never generate Int Histograms, will generate Double Histogram always. + allTypesNoDataPoints := testdata.GenerateMetricsAllTypesNoDataPoints() + dh := allTypesNoDataPoints.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(4) + ih := allTypesNoDataPoints.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(5) + ih.SetDataType(pdata.MetricDataTypeDoubleHistogram) + dh.DoubleHistogram().CopyTo(ih.DoubleHistogram()) + + sampleMetricData := testdata.GeneratMetricsAllTypesWithSampleDatapoints() + dh = sampleMetricData.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(2) + ih = sampleMetricData.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(3) + ih.SetDataType(pdata.MetricDataTypeDoubleHistogram) + dh.DoubleHistogram().CopyTo(ih.DoubleHistogram()) + + tests := []struct { + name string + oc consumerdata.MetricsData + internal pdata.Metrics + }{ + { + name: "empty", + oc: consumerdata.MetricsData{}, + internal: testdata.GenerateMetricsEmpty(), + }, + + { + name: "one-empty-resource-metrics", + oc: consumerdata.MetricsData{ + Node: &occommon.Node{}, + Resource: &ocresource.Resource{}, + }, + internal: testdata.GenerateMetricsOneEmptyResourceMetrics(), + }, + + { + name: "no-libraries", + oc: generateOCTestDataNoMetrics(), + internal: testdata.GenerateMetricsNoLibraries(), + }, + + { + name: "all-types-no-data-points", + oc: generateOCTestDataNoPoints(), + internal: allTypesNoDataPoints, + }, + + { + name: "one-metric-no-labels", + oc: generateOCTestDataNoLabels(), + internal: testdata.GenerateMetricsOneMetricNoLabels(), + }, + + { + name: "one-metric", + oc: generateOCTestDataMetricsOneMetric(), + internal: testdata.GenerateMetricsOneMetric(), + }, + + { + name: "one-metric-one-summary", + oc: consumerdata.MetricsData{ + Resource: generateOCTestResource(), + Metrics: []*ocmetrics.Metric{ + generateOCTestMetricInt(), + generateOCTestMetricDoubleSummary(), + }, + }, + internal: testdata.GenerateMetricsOneCounterOneSummaryMetrics(), + }, + + { + name: "one-metric-one-nil", + oc: generateOCTestDataMetricsOneMetricOneNil(), + internal: testdata.GenerateMetricsOneMetric(), + }, + + { + name: "one-metric-one-nil-timeseries", + oc: generateOCTestDataMetricsOneMetricOneNilTimeseries(), + internal: testdata.GenerateMetricsOneMetric(), + }, + + { + name: "one-metric-one-nil-point", + oc: generateOCTestDataMetricsOneMetricOneNilPoint(), + internal: testdata.GenerateMetricsOneMetric(), + }, + + { + name: "one-metric-one-nil-point", + oc: generateOCTestDataMetricsOneMetricOneNilPoint(), + internal: testdata.GenerateMetricsOneMetric(), + }, + + { + name: "sample-metric", + oc: consumerdata.MetricsData{ + Resource: generateOCTestResource(), + Metrics: []*ocmetrics.Metric{ + generateOCTestMetricInt(), + generateOCTestMetricDouble(), + generateOCTestMetricDoubleHistogram(), + generateOCTestMetricIntHistogram(), + generateOCTestMetricDoubleSummary(), + }, + }, + internal: sampleMetricData, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := OCToMetrics(test.oc) + assert.EqualValues(t, test.internal, got) + + ocslice := []consumerdata.MetricsData{ + test.oc, + test.oc, + } + wantSlice := pdata.NewMetrics() + // Double the ResourceMetrics only if not empty. + if test.internal.ResourceMetrics().Len() != 0 { + test.internal.Clone().ResourceMetrics().MoveAndAppendTo(wantSlice.ResourceMetrics()) + test.internal.Clone().ResourceMetrics().MoveAndAppendTo(wantSlice.ResourceMetrics()) + } + gotSlice := OCSliceToMetrics(ocslice) + assert.EqualValues(t, wantSlice, gotSlice) + }) + } +} + +func TestOCToMetrics_ResourceInMetric(t *testing.T) { + internal := testdata.GenerateMetricsOneMetric() + want := pdata.NewMetrics() + internal.Clone().ResourceMetrics().MoveAndAppendTo(want.ResourceMetrics()) + internal.Clone().ResourceMetrics().MoveAndAppendTo(want.ResourceMetrics()) + want.ResourceMetrics().At(1).Resource().Attributes().UpsertString("resource-attr", "another-value") + oc := generateOCTestDataMetricsOneMetric() + oc2 := generateOCTestDataMetricsOneMetric() + oc.Metrics = append(oc.Metrics, oc2.Metrics...) + oc.Metrics[1].Resource = oc2.Resource + oc.Metrics[1].Resource.Labels["resource-attr"] = "another-value" + got := OCToMetrics(oc) + assert.EqualValues(t, want, got) +} + +func TestOCToMetrics_ResourceInMetricOnly(t *testing.T) { + internal := testdata.GenerateMetricsOneMetric() + want := pdata.NewMetrics() + internal.Clone().ResourceMetrics().MoveAndAppendTo(want.ResourceMetrics()) + oc := generateOCTestDataMetricsOneMetric() + // Move resource to metric level. + // We shouldn't have a "combined" resource after conversion + oc.Metrics[0].Resource = oc.Resource + oc.Resource = nil + got := OCToMetrics(oc) + assert.EqualValues(t, want, got) +} + +func BenchmarkMetricIntOCToMetrics(b *testing.B) { + ocMetric := consumerdata.MetricsData{ + Resource: generateOCTestResource(), + Metrics: []*ocmetrics.Metric{ + generateOCTestMetricInt(), + generateOCTestMetricInt(), + generateOCTestMetricInt(), + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + OCToMetrics(ocMetric) + } +} + +func BenchmarkMetricDoubleOCToMetrics(b *testing.B) { + ocMetric := consumerdata.MetricsData{ + Resource: generateOCTestResource(), + Metrics: []*ocmetrics.Metric{ + generateOCTestMetricDouble(), + generateOCTestMetricDouble(), + generateOCTestMetricDouble(), + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + OCToMetrics(ocMetric) + } +} + +func BenchmarkMetricHistogramOCToMetrics(b *testing.B) { + ocMetric := consumerdata.MetricsData{ + Resource: generateOCTestResource(), + Metrics: []*ocmetrics.Metric{ + generateOCTestMetricDoubleHistogram(), + generateOCTestMetricDoubleHistogram(), + generateOCTestMetricDoubleHistogram(), + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + OCToMetrics(ocMetric) + } +} + +func generateOCTestResource() *ocresource.Resource { + return &ocresource.Resource{ + Labels: map[string]string{ + "resource-attr": "resource-attr-val-1", + }, + } +} diff --git a/internal/otel_collector/translator/internaldata/oc_to_resource.go b/internal/otel_collector/translator/internaldata/oc_to_resource.go new file mode 100644 index 00000000000..310b2ca48eb --- /dev/null +++ b/internal/otel_collector/translator/internaldata/oc_to_resource.go @@ -0,0 +1,132 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "time" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +var ocLangCodeToLangMap = getOCLangCodeToLangMap() + +func getOCLangCodeToLangMap() map[occommon.LibraryInfo_Language]string { + mappings := make(map[occommon.LibraryInfo_Language]string) + mappings[1] = conventions.AttributeSDKLangValueCPP + mappings[2] = conventions.AttributeSDKLangValueDotNET + mappings[3] = conventions.AttributeSDKLangValueErlang + mappings[4] = conventions.AttributeSDKLangValueGo + mappings[5] = conventions.AttributeSDKLangValueJava + mappings[6] = conventions.AttributeSDKLangValueNodeJS + mappings[7] = conventions.AttributeSDKLangValuePHP + mappings[8] = conventions.AttributeSDKLangValuePython + mappings[9] = conventions.AttributeSDKLangValueRuby + mappings[10] = conventions.AttributeSDKLangValueWebJS + return mappings +} + +func ocNodeResourceToInternal(ocNode *occommon.Node, ocResource *ocresource.Resource, dest pdata.Resource) { + if ocNode == nil && ocResource == nil { + return + } + + // Number of special fields in OC that will be translated to Attributes + const serviceInfoAttrCount = 1 // Number of Node.ServiceInfo fields. + const nodeIdentifierAttrCount = 3 // Number of Node.Identifier fields. + const libraryInfoAttrCount = 3 // Number of Node.LibraryInfo fields. + const specialResourceAttrCount = 1 // Number of Resource fields. + + // Calculate maximum total number of attributes for capacity reservation. + maxTotalAttrCount := 0 + if ocNode != nil { + maxTotalAttrCount += len(ocNode.Attributes) + if ocNode.ServiceInfo != nil { + maxTotalAttrCount += serviceInfoAttrCount + } + if ocNode.Identifier != nil { + maxTotalAttrCount += nodeIdentifierAttrCount + } + if ocNode.LibraryInfo != nil { + maxTotalAttrCount += libraryInfoAttrCount + } + } + if ocResource != nil { + maxTotalAttrCount += len(ocResource.Labels) + if ocResource.Type != "" { + maxTotalAttrCount += specialResourceAttrCount + } + } + + // There are no attributes to be set. + if maxTotalAttrCount == 0 { + return + } + + attrs := dest.Attributes() + attrs.InitEmptyWithCapacity(maxTotalAttrCount) + + if ocNode != nil { + // Copy all Attributes. + for k, v := range ocNode.Attributes { + attrs.InsertString(k, v) + } + + // Add all special fields. + if ocNode.ServiceInfo != nil { + if ocNode.ServiceInfo.Name != "" { + attrs.UpsertString(conventions.AttributeServiceName, ocNode.ServiceInfo.Name) + } + } + if ocNode.Identifier != nil { + if ocNode.Identifier.StartTimestamp != nil { + attrs.UpsertString(conventions.OCAttributeProcessStartTime, ocNode.Identifier.StartTimestamp.AsTime().Format(time.RFC3339Nano)) + } + if ocNode.Identifier.HostName != "" { + attrs.UpsertString(conventions.AttributeHostName, ocNode.Identifier.HostName) + } + if ocNode.Identifier.Pid != 0 { + attrs.UpsertInt(conventions.OCAttributeProcessID, int64(ocNode.Identifier.Pid)) + } + } + if ocNode.LibraryInfo != nil { + if ocNode.LibraryInfo.CoreLibraryVersion != "" { + attrs.UpsertString(conventions.AttributeTelemetrySDKVersion, ocNode.LibraryInfo.CoreLibraryVersion) + } + if ocNode.LibraryInfo.ExporterVersion != "" { + attrs.UpsertString(conventions.OCAttributeExporterVersion, ocNode.LibraryInfo.ExporterVersion) + } + if ocNode.LibraryInfo.Language != occommon.LibraryInfo_LANGUAGE_UNSPECIFIED { + if str, ok := ocLangCodeToLangMap[ocNode.LibraryInfo.Language]; ok { + attrs.UpsertString(conventions.AttributeTelemetrySDKLanguage, str) + } + } + } + } + + if ocResource != nil { + // Copy resource Labels. + for k, v := range ocResource.Labels { + attrs.InsertString(k, v) + } + // Add special fields. + if ocResource.Type != "" { + attrs.UpsertString(conventions.OCAttributeResourceType, ocResource.Type) + } + } +} diff --git a/internal/otel_collector/translator/internaldata/oc_to_resource_test.go b/internal/otel_collector/translator/internaldata/oc_to_resource_test.go new file mode 100644 index 00000000000..37d85fabe90 --- /dev/null +++ b/internal/otel_collector/translator/internaldata/oc_to_resource_test.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "strings" + "testing" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +func TestOcNodeResourceToInternal(t *testing.T) { + resource := pdata.NewResource() + ocNodeResourceToInternal(nil, nil, resource) + assert.Equal(t, 0, resource.Attributes().Len()) + + ocNode := &occommon.Node{} + ocResource := &ocresource.Resource{} + ocNodeResourceToInternal(ocNode, ocResource, resource) + assert.Equal(t, 0, resource.Attributes().Len()) + + ocNode = generateOcNode() + ocResource = generateOcResource() + expectedAttrs := generateResourceWithOcNodeAndResource().Attributes() + // We don't have type information in ocResource, so need to make int attr string + expectedAttrs.Upsert("resource-int-attr", pdata.NewAttributeValueString("123")) + ocNodeResourceToInternal(ocNode, ocResource, resource) + assert.EqualValues(t, expectedAttrs.Sort(), resource.Attributes().Sort()) + + // Make sure hard-coded fields override same-name values in Attributes. + // To do that add Attributes with same-name. + expectedAttrs.ForEach(func(k string, v pdata.AttributeValue) { + // Set all except "attr1" which is not a hard-coded field to some bogus values. + if !strings.Contains(k, "-attr") { + ocNode.Attributes[k] = "this will be overridden 1" + } + }) + ocResource.Labels[conventions.OCAttributeResourceType] = "this will be overridden 2" + + // Convert again. + resource = pdata.NewResource() + ocNodeResourceToInternal(ocNode, ocResource, resource) + // And verify that same-name attributes were ignored. + assert.EqualValues(t, expectedAttrs.Sort(), resource.Attributes().Sort()) +} + +func BenchmarkOcNodeResourceToInternal(b *testing.B) { + ocNode := generateOcNode() + ocResource := generateOcResource() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + resource := pdata.NewResource() + ocNodeResourceToInternal(ocNode, ocResource, resource) + if ocNode.Identifier.Pid != 123 { + b.Fail() + } + } +} + +func BenchmarkOcResourceNodeUnmarshal(b *testing.B) { + oc := &agenttracepb.ExportTraceServiceRequest{ + Node: generateOcNode(), + Spans: nil, + Resource: generateOcResource(), + } + + bytes, err := proto.Marshal(oc) + if err != nil { + b.Fail() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + unmarshalOc := &agenttracepb.ExportTraceServiceRequest{} + if err := proto.Unmarshal(bytes, unmarshalOc); err != nil { + b.Fail() + } + if unmarshalOc.Node.Identifier.Pid != 123 { + b.Fail() + } + } +} diff --git a/internal/otel_collector/translator/internaldata/oc_to_traces.go b/internal/otel_collector/translator/internaldata/oc_to_traces.go new file mode 100644 index 00000000000..8e83611b646 --- /dev/null +++ b/internal/otel_collector/translator/internaldata/oc_to_traces.go @@ -0,0 +1,395 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "strings" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + octrace "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "go.opencensus.io/trace" + "google.golang.org/protobuf/types/known/wrapperspb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +// OCToTraceData converts OC data format to Traces. +// Deprecated: use pdata.Traces instead. OCToTraceData may be used only by OpenCensus +// receiver and exporter implementations. +func OCToTraceData(td consumerdata.TraceData) pdata.Traces { + traceData := pdata.NewTraces() + if td.Node == nil && td.Resource == nil && len(td.Spans) == 0 { + return traceData + } + + if len(td.Spans) == 0 { + // At least one of the td.Node or td.Resource is not nil. Set the resource and return. + rss := traceData.ResourceSpans() + rss.Resize(1) + ocNodeResourceToInternal(td.Node, td.Resource, rss.At(0).Resource()) + return traceData + } + + // We may need to split OC spans into several ResourceSpans. OC Spans can have a + // Resource field inside them set to nil which indicates they use the Resource + // specified in "td.Resource", or they can have the Resource field inside them set + // to non-nil which indicates they have overridden Resource field and "td.Resource" + // does not apply to those spans. + // + // Each OC Span that has its own Resource field set to non-nil must be placed in a + // separate ResourceSpans instance, containing only that span. All other OC Spans + // that have nil Resource field must be placed in one other ResourceSpans instance, + // which will gets its Resource field from "td.Resource". + // + // We will end up with with one or more ResourceSpans like this: + // + // ResourceSpans ResourceSpans ResourceSpans + // +-----+-----+---+-----+ +------------+ +------------+ + // |Span1|Span2|...|SpanM| |Span | |Span | ... + // +-----+-----+---+-----+ +------------+ +------------+ + + // Count the number of spans that have nil Resource and need to be combined + // in one slice. + combinedSpanCount := 0 + distinctResourceCount := 0 + for _, ocSpan := range td.Spans { + if ocSpan == nil { + // Skip nil spans. + continue + } + if ocSpan.Resource == nil { + combinedSpanCount++ + } else { + distinctResourceCount++ + } + } + // Total number of resources is equal to: + // 1 (for all spans with nil resource) + numSpansWithResource (distinctResourceCount). + + rss := traceData.ResourceSpans() + rss.Resize(distinctResourceCount + 1) + rs0 := rss.At(0) + ocNodeResourceToInternal(td.Node, td.Resource, rs0.Resource()) + + // Allocate a slice for spans that need to be combined into first ResourceSpans. + ilss := rs0.InstrumentationLibrarySpans() + ilss.Resize(1) + ils0 := ilss.At(0) + combinedSpans := ils0.Spans() + combinedSpans.Resize(combinedSpanCount) + + // Now do the span translation and place them in appropriate ResourceSpans + // instances. + + // Index to next available slot in "combinedSpans" slice. + combinedSpanIdx := 0 + // First ResourceSpans is used for the default resource, so start with 1. + resourceSpanIdx := 1 + for _, ocSpan := range td.Spans { + if ocSpan == nil { + // Skip nil spans. + continue + } + + if ocSpan.Resource == nil { + // Add the span to the "combinedSpans". combinedSpans length is equal + // to combinedSpanCount. The loop above that calculates combinedSpanCount + // has exact same conditions as we have here in this loop. + ocSpanToInternal(ocSpan, combinedSpans.At(combinedSpanIdx)) + combinedSpanIdx++ + } else { + // This span has a different Resource and must be placed in a different + // ResourceSpans instance. Create a separate ResourceSpans item just for this span. + ocSpanToResourceSpans(ocSpan, td.Node, traceData.ResourceSpans().At(resourceSpanIdx)) + resourceSpanIdx++ + } + } + + return traceData +} + +func ocSpanToResourceSpans(ocSpan *octrace.Span, node *occommon.Node, dest pdata.ResourceSpans) { + ocNodeResourceToInternal(node, ocSpan.Resource, dest.Resource()) + ilss := dest.InstrumentationLibrarySpans() + ilss.Resize(1) + ils0 := ilss.At(0) + spans := ils0.Spans() + spans.Resize(1) + ocSpanToInternal(ocSpan, spans.At(0)) +} + +func ocSpanToInternal(src *octrace.Span, dest pdata.Span) { + // Note that ocSpanKindToInternal must be called before initAttributeMapFromOC + // since it may modify src.Attributes (remove the attribute which represents the + // span kind). + dest.SetKind(ocSpanKindToInternal(src.Kind, src.Attributes)) + + dest.SetTraceID(traceIDToInternal(src.TraceId)) + dest.SetSpanID(spanIDToInternal(src.SpanId)) + dest.SetTraceState(ocTraceStateToInternal(src.Tracestate)) + dest.SetParentSpanID(spanIDToInternal(src.ParentSpanId)) + + dest.SetName(src.Name.GetValue()) + dest.SetStartTime(pdata.TimestampToUnixNano(src.StartTime)) + dest.SetEndTime(pdata.TimestampToUnixNano(src.EndTime)) + + ocStatusToInternal(src.Status, src.Attributes, dest.Status()) + + initAttributeMapFromOC(src.Attributes, dest.Attributes()) + dest.SetDroppedAttributesCount(ocAttrsToDroppedAttributes(src.Attributes)) + ocEventsToInternal(src.TimeEvents, dest) + ocLinksToInternal(src.Links, dest) + ocSameProcessAsParentSpanToInternal(src.SameProcessAsParentSpan, dest) +} + +// Transforms the byte slice trace ID into a [16]byte internal pdata.TraceID. +// If larger input then it is truncated to 16 bytes. +func traceIDToInternal(traceID []byte) pdata.TraceID { + tid := [16]byte{} + copy(tid[:], traceID) + return pdata.NewTraceID(tid) +} + +// Transforms the byte slice span ID into a [8]byte internal pdata.SpanID. +// If larger input then it is truncated to 8 bytes. +func spanIDToInternal(spanID []byte) pdata.SpanID { + sid := [8]byte{} + copy(sid[:], spanID) + return pdata.NewSpanID(sid) +} + +func ocStatusToInternal(ocStatus *octrace.Status, ocAttrs *octrace.Span_Attributes, dest pdata.SpanStatus) { + if ocStatus == nil { + return + } + + var code pdata.StatusCode + switch ocStatus.Code { + case trace.StatusCodeOK: + code = pdata.StatusCodeUnset + default: + // all other OC status codes are errors. + code = pdata.StatusCodeError + } + + if ocAttrs != nil { + // tracetranslator.TagStatusCode is set it must override the status code value. + // See the reverse translation in traces_to_oc.go:statusToOC(). + if attr, ok := ocAttrs.AttributeMap[tracetranslator.TagStatusCode]; ok { + code = pdata.StatusCode(attr.GetIntValue()) + delete(ocAttrs.AttributeMap, tracetranslator.TagStatusCode) + } + } + + dest.SetCode(code) + dest.SetMessage(ocStatus.Message) +} + +// Convert tracestate to W3C format. See the https://w3c.github.io/trace-context/ +func ocTraceStateToInternal(ocTracestate *octrace.Span_Tracestate) pdata.TraceState { + if ocTracestate == nil { + return "" + } + var sb strings.Builder + for i, entry := range ocTracestate.Entries { + sb.Grow(1 + len(entry.Key) + 1 + len(entry.Value)) + if i > 0 { + sb.WriteString(",") + } + sb.WriteString(entry.Key) + sb.WriteString("=") + sb.WriteString(entry.Value) + } + return pdata.TraceState(sb.String()) +} + +func ocAttrsToDroppedAttributes(ocAttrs *octrace.Span_Attributes) uint32 { + if ocAttrs == nil { + return 0 + } + return uint32(ocAttrs.DroppedAttributesCount) +} + +// initAttributeMapFromOC initialize AttributeMap from OC attributes +func initAttributeMapFromOC(ocAttrs *octrace.Span_Attributes, dest pdata.AttributeMap) { + if ocAttrs == nil { + return + } + + if len(ocAttrs.AttributeMap) > 0 { + dest.InitEmptyWithCapacity(len(ocAttrs.AttributeMap)) + for key, ocAttr := range ocAttrs.AttributeMap { + switch attribValue := ocAttr.Value.(type) { + case *octrace.AttributeValue_StringValue: + dest.UpsertString(key, attribValue.StringValue.GetValue()) + + case *octrace.AttributeValue_IntValue: + dest.UpsertInt(key, attribValue.IntValue) + + case *octrace.AttributeValue_BoolValue: + dest.UpsertBool(key, attribValue.BoolValue) + + case *octrace.AttributeValue_DoubleValue: + dest.UpsertDouble(key, attribValue.DoubleValue) + + default: + dest.UpsertString(key, "") + } + } + } +} + +func ocSpanKindToInternal(ocKind octrace.Span_SpanKind, ocAttrs *octrace.Span_Attributes) pdata.SpanKind { + switch ocKind { + case octrace.Span_SERVER: + return pdata.SpanKindSERVER + + case octrace.Span_CLIENT: + return pdata.SpanKindCLIENT + + case octrace.Span_SPAN_KIND_UNSPECIFIED: + // Span kind field is unspecified, check if TagSpanKind attribute is set. + // This can happen if span kind had no equivalent in OC, so we could represent it in + // the SpanKind. In that case the span kind may be a special attribute TagSpanKind. + if ocAttrs != nil { + kindAttr := ocAttrs.AttributeMap[tracetranslator.TagSpanKind] + if kindAttr != nil { + strVal, ok := kindAttr.Value.(*octrace.AttributeValue_StringValue) + if ok && strVal != nil { + var otlpKind pdata.SpanKind + switch tracetranslator.OpenTracingSpanKind(strVal.StringValue.GetValue()) { + case tracetranslator.OpenTracingSpanKindConsumer: + otlpKind = pdata.SpanKindCONSUMER + case tracetranslator.OpenTracingSpanKindProducer: + otlpKind = pdata.SpanKindPRODUCER + case tracetranslator.OpenTracingSpanKindInternal: + otlpKind = pdata.SpanKindINTERNAL + default: + return pdata.SpanKindUNSPECIFIED + } + delete(ocAttrs.AttributeMap, tracetranslator.TagSpanKind) + return otlpKind + } + } + } + return pdata.SpanKindUNSPECIFIED + + default: + return pdata.SpanKindUNSPECIFIED + } +} + +func ocEventsToInternal(ocEvents *octrace.Span_TimeEvents, dest pdata.Span) { + if ocEvents == nil { + return + } + + dest.SetDroppedEventsCount(uint32(ocEvents.DroppedMessageEventsCount + ocEvents.DroppedAnnotationsCount)) + + if len(ocEvents.TimeEvent) == 0 { + return + } + + events := dest.Events() + events.Resize(len(ocEvents.TimeEvent)) + + i := 0 + for _, ocEvent := range ocEvents.TimeEvent { + if ocEvent == nil { + // Skip nil source events. + continue + } + + event := events.At(i) + i++ + + event.SetTimestamp(pdata.TimestampToUnixNano(ocEvent.Time)) + + switch teValue := ocEvent.Value.(type) { + case *octrace.Span_TimeEvent_Annotation_: + if teValue.Annotation != nil { + event.SetName(teValue.Annotation.Description.GetValue()) + initAttributeMapFromOC(teValue.Annotation.Attributes, event.Attributes()) + event.SetDroppedAttributesCount(ocAttrsToDroppedAttributes(teValue.Annotation.Attributes)) + } + + case *octrace.Span_TimeEvent_MessageEvent_: + ocMessageEventToInternalAttrs(teValue.MessageEvent, event.Attributes()) + // No dropped attributes for this case. + event.SetDroppedAttributesCount(0) + + default: + event.SetName("An unknown OpenCensus TimeEvent type was detected when translating") + } + } + + // Truncate the slice to only include populated items. + events.Resize(i) +} + +func ocLinksToInternal(ocLinks *octrace.Span_Links, dest pdata.Span) { + if ocLinks == nil { + return + } + + dest.SetDroppedLinksCount(uint32(ocLinks.DroppedLinksCount)) + + if len(ocLinks.Link) == 0 { + return + } + + links := dest.Links() + links.Resize(len(ocLinks.Link)) + + i := 0 + for _, ocLink := range ocLinks.Link { + if ocLink == nil { + continue + } + + link := links.At(i) + i++ + + link.SetTraceID(traceIDToInternal(ocLink.TraceId)) + link.SetSpanID(spanIDToInternal(ocLink.SpanId)) + link.SetTraceState(ocTraceStateToInternal(ocLink.Tracestate)) + initAttributeMapFromOC(ocLink.Attributes, link.Attributes()) + link.SetDroppedAttributesCount(ocAttrsToDroppedAttributes(ocLink.Attributes)) + } + + // Truncate the slice to only include populated items. + links.Resize(i) +} + +func ocMessageEventToInternalAttrs(msgEvent *octrace.Span_TimeEvent_MessageEvent, dest pdata.AttributeMap) { + if msgEvent == nil { + return + } + + dest.UpsertString(conventions.OCTimeEventMessageEventType, msgEvent.Type.String()) + dest.UpsertInt(conventions.OCTimeEventMessageEventID, int64(msgEvent.Id)) + dest.UpsertInt(conventions.OCTimeEventMessageEventUSize, int64(msgEvent.UncompressedSize)) + dest.UpsertInt(conventions.OCTimeEventMessageEventCSize, int64(msgEvent.CompressedSize)) +} + +func ocSameProcessAsParentSpanToInternal(spaps *wrapperspb.BoolValue, dest pdata.Span) { + if spaps == nil { + return + } + dest.Attributes().UpsertBool(conventions.OCAttributeSameProcessAsParentSpan, spaps.Value) +} diff --git a/internal/otel_collector/translator/internaldata/oc_to_traces_test.go b/internal/otel_collector/translator/internaldata/oc_to_traces_test.go new file mode 100644 index 00000000000..29bcd3b5ebb --- /dev/null +++ b/internal/otel_collector/translator/internaldata/oc_to_traces_test.go @@ -0,0 +1,485 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "strconv" + "testing" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + octrace "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +func TestOcTraceStateToInternal(t *testing.T) { + assert.EqualValues(t, "", ocTraceStateToInternal(nil)) + + tracestate := &octrace.Span_Tracestate{ + Entries: []*octrace.Span_Tracestate_Entry{ + { + Key: "abc", + Value: "def", + }, + }, + } + assert.EqualValues(t, "abc=def", ocTraceStateToInternal(tracestate)) + + tracestate.Entries = append(tracestate.Entries, + &octrace.Span_Tracestate_Entry{ + Key: "123", + Value: "4567", + }) + assert.EqualValues(t, "abc=def,123=4567", ocTraceStateToInternal(tracestate)) +} + +func TestInitAttributeMapFromOC(t *testing.T) { + attrs := pdata.NewAttributeMap() + initAttributeMapFromOC(nil, attrs) + assert.EqualValues(t, pdata.NewAttributeMap(), attrs) + assert.EqualValues(t, 0, ocAttrsToDroppedAttributes(nil)) + + ocAttrs := &octrace.Span_Attributes{} + attrs = pdata.NewAttributeMap() + initAttributeMapFromOC(ocAttrs, attrs) + assert.EqualValues(t, pdata.NewAttributeMap(), attrs) + assert.EqualValues(t, 0, ocAttrsToDroppedAttributes(ocAttrs)) + + ocAttrs = &octrace.Span_Attributes{ + DroppedAttributesCount: 123, + } + attrs = pdata.NewAttributeMap() + initAttributeMapFromOC(ocAttrs, attrs) + assert.EqualValues(t, pdata.NewAttributeMap(), attrs) + assert.EqualValues(t, 123, ocAttrsToDroppedAttributes(ocAttrs)) + + ocAttrs = &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{}, + DroppedAttributesCount: 234, + } + attrs = pdata.NewAttributeMap() + initAttributeMapFromOC(ocAttrs, attrs) + assert.EqualValues(t, pdata.NewAttributeMap(), attrs) + assert.EqualValues(t, 234, ocAttrsToDroppedAttributes(ocAttrs)) + + ocAttrs = &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "abc": { + Value: &octrace.AttributeValue_StringValue{StringValue: &octrace.TruncatableString{Value: "def"}}, + }, + }, + DroppedAttributesCount: 234, + } + attrs = pdata.NewAttributeMap() + initAttributeMapFromOC(ocAttrs, attrs) + assert.EqualValues(t, + pdata.NewAttributeMap().InitFromMap( + map[string]pdata.AttributeValue{ + "abc": pdata.NewAttributeValueString("def"), + }), + attrs) + assert.EqualValues(t, 234, ocAttrsToDroppedAttributes(ocAttrs)) + + ocAttrs.AttributeMap["intval"] = &octrace.AttributeValue{ + Value: &octrace.AttributeValue_IntValue{IntValue: 345}, + } + ocAttrs.AttributeMap["boolval"] = &octrace.AttributeValue{ + Value: &octrace.AttributeValue_BoolValue{BoolValue: true}, + } + ocAttrs.AttributeMap["doubleval"] = &octrace.AttributeValue{ + Value: &octrace.AttributeValue_DoubleValue{DoubleValue: 4.5}, + } + attrs = pdata.NewAttributeMap() + initAttributeMapFromOC(ocAttrs, attrs) + + expectedAttr := pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + "abc": pdata.NewAttributeValueString("def"), + "intval": pdata.NewAttributeValueInt(345), + "boolval": pdata.NewAttributeValueBool(true), + "doubleval": pdata.NewAttributeValueDouble(4.5), + }) + assert.EqualValues(t, expectedAttr.Sort(), attrs.Sort()) + assert.EqualValues(t, 234, ocAttrsToDroppedAttributes(ocAttrs)) +} + +func TestOcSpanKindToInternal(t *testing.T) { + tests := []struct { + ocAttrs *octrace.Span_Attributes + ocKind octrace.Span_SpanKind + otlpKind otlptrace.Span_SpanKind + }{ + { + ocKind: octrace.Span_CLIENT, + otlpKind: otlptrace.Span_SPAN_KIND_CLIENT, + }, + { + ocKind: octrace.Span_SERVER, + otlpKind: otlptrace.Span_SPAN_KIND_SERVER, + }, + { + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + otlpKind: otlptrace.Span_SPAN_KIND_UNSPECIFIED, + }, + { + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + ocAttrs: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span.kind": {Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "consumer"}}}, + }, + }, + otlpKind: otlptrace.Span_SPAN_KIND_CONSUMER, + }, + { + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + ocAttrs: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span.kind": {Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "producer"}}}, + }, + }, + otlpKind: otlptrace.Span_SPAN_KIND_PRODUCER, + }, + { + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + ocAttrs: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span.kind": {Value: &octrace.AttributeValue_IntValue{ + IntValue: 123}}, + }, + }, + otlpKind: otlptrace.Span_SPAN_KIND_UNSPECIFIED, + }, + { + ocKind: octrace.Span_CLIENT, + ocAttrs: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span.kind": {Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "consumer"}}}, + }, + }, + otlpKind: otlptrace.Span_SPAN_KIND_CLIENT, + }, + { + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + ocAttrs: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span.kind": {Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "internal"}}}, + }, + }, + otlpKind: otlptrace.Span_SPAN_KIND_INTERNAL, + }, + } + + for _, test := range tests { + t.Run(test.otlpKind.String(), func(t *testing.T) { + got := ocSpanKindToInternal(test.ocKind, test.ocAttrs) + assert.EqualValues(t, test.otlpKind, got, "Expected "+test.otlpKind.String()+", got "+got.String()) + }) + } +} + +func TestOcToInternal(t *testing.T) { + ocNode := &occommon.Node{} + ocResource1 := &ocresource.Resource{Labels: map[string]string{"resource-attr": "resource-attr-val-1"}} + ocResource2 := &ocresource.Resource{Labels: map[string]string{"resource-attr": "resource-attr-val-2"}} + + startTime := timestamppb.New(testdata.TestSpanStartTime) + eventTime := timestamppb.New(testdata.TestSpanEventTime) + endTime := timestamppb.New(testdata.TestSpanEndTime) + + ocSpan1 := &octrace.Span{ + Name: &octrace.TruncatableString{Value: "operationA"}, + StartTime: startTime, + EndTime: endTime, + TimeEvents: &octrace.Span_TimeEvents{ + TimeEvent: []*octrace.Span_TimeEvent{ + { + Time: eventTime, + Value: &octrace.Span_TimeEvent_Annotation_{ + Annotation: &octrace.Span_TimeEvent_Annotation{ + Description: &octrace.TruncatableString{Value: "event-with-attr"}, + Attributes: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span-event-attr": { + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "span-event-attr-val"}, + }, + }, + }, + DroppedAttributesCount: 2, + }, + }, + }, + }, + { + Time: eventTime, + Value: &octrace.Span_TimeEvent_Annotation_{ + Annotation: &octrace.Span_TimeEvent_Annotation{ + Description: &octrace.TruncatableString{Value: "event"}, + Attributes: &octrace.Span_Attributes{ + DroppedAttributesCount: 2, + }, + }, + }, + }, + }, + DroppedAnnotationsCount: 1, + }, + Attributes: &octrace.Span_Attributes{ + DroppedAttributesCount: 1, + }, + Status: &octrace.Status{Message: "status-cancelled", Code: 1}, + } + + // TODO: Create another unit test fully covering ocSpanToInternal + ocSpanZeroedParentID := proto.Clone(ocSpan1).(*octrace.Span) + ocSpanZeroedParentID.ParentSpanId = []byte{0, 0, 0, 0, 0, 0, 0, 0} + + ocSpan2 := &octrace.Span{ + Name: &octrace.TruncatableString{Value: "operationB"}, + StartTime: startTime, + EndTime: endTime, + Links: &octrace.Span_Links{ + Link: []*octrace.Span_Link{ + { + Attributes: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span-link-attr": { + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "span-link-attr-val"}, + }, + }, + }, + DroppedAttributesCount: 4, + }, + }, + { + Attributes: &octrace.Span_Attributes{ + DroppedAttributesCount: 4, + }, + }, + }, + DroppedLinksCount: 3, + }, + } + + ocSpan3 := &octrace.Span{ + Name: &octrace.TruncatableString{Value: "operationC"}, + StartTime: startTime, + EndTime: endTime, + Resource: ocResource2, + Attributes: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span-attr": { + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "span-attr-val"}, + }, + }, + }, + DroppedAttributesCount: 5, + }, + } + + tests := []struct { + name string + td pdata.Traces + oc consumerdata.TraceData + }{ + { + name: "empty", + td: testdata.GenerateTraceDataEmpty(), + oc: consumerdata.TraceData{}, + }, + + { + name: "one-empty-resource-spans", + td: testdata.GenerateTraceDataOneEmptyResourceSpans(), + oc: consumerdata.TraceData{Node: ocNode}, + }, + + { + name: "no-libraries", + td: testdata.GenerateTraceDataNoLibraries(), + oc: consumerdata.TraceData{Resource: ocResource1}, + }, + + { + name: "one-span-no-resource", + td: testdata.GenerateTraceDataOneSpanNoResource(), + oc: consumerdata.TraceData{ + Node: ocNode, + Resource: &ocresource.Resource{}, + Spans: []*octrace.Span{ocSpan1}, + }, + }, + + { + name: "one-span", + td: testdata.GenerateTraceDataOneSpan(), + oc: consumerdata.TraceData{ + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpan1}, + }, + }, + + { + name: "one-span-zeroed-parent-id", + td: testdata.GenerateTraceDataOneSpan(), + oc: consumerdata.TraceData{ + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpanZeroedParentID}, + }, + }, + + { + name: "one-span-one-nil", + td: testdata.GenerateTraceDataOneSpan(), + oc: consumerdata.TraceData{ + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpan1, nil}, + }, + }, + + { + name: "two-spans-same-resource", + td: testdata.GenerateTraceDataTwoSpansSameResource(), + oc: consumerdata.TraceData{ + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpan1, nil, ocSpan2}, + }, + }, + + { + name: "two-spans-same-resource-one-different", + td: testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent(), + oc: consumerdata.TraceData{ + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpan1, ocSpan2, ocSpan3}, + }, + }, + + { + name: "two-spans-and-separate-in-the-middle", + td: testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent(), + oc: consumerdata.TraceData{ + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpan1, ocSpan3, ocSpan2}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.EqualValues(t, test.td, OCToTraceData(test.oc)) + }) + } +} + +func TestOcSameProcessAsParentSpanToInternal(t *testing.T) { + span := pdata.NewSpan() + ocSameProcessAsParentSpanToInternal(nil, span) + assert.Equal(t, 0, span.Attributes().Len()) + + ocSameProcessAsParentSpanToInternal(wrapperspb.Bool(false), span) + assert.Equal(t, 1, span.Attributes().Len()) + v, ok := span.Attributes().Get(conventions.OCAttributeSameProcessAsParentSpan) + assert.True(t, ok) + assert.EqualValues(t, pdata.AttributeValueBOOL, v.Type()) + assert.False(t, v.BoolVal()) + + ocSameProcessAsParentSpanToInternal(wrapperspb.Bool(true), span) + assert.Equal(t, 1, span.Attributes().Len()) + v, ok = span.Attributes().Get(conventions.OCAttributeSameProcessAsParentSpan) + assert.True(t, ok) + assert.EqualValues(t, pdata.AttributeValueBOOL, v.Type()) + assert.True(t, v.BoolVal()) +} + +func BenchmarkSpansWithAttributesOCToInternal(b *testing.B) { + ocSpan := generateSpanWithAttributes(15) + + ocTraceData := consumerdata.TraceData{ + Resource: generateOCTestResource(), + Spans: []*octrace.Span{ + ocSpan, + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + OCToTraceData(ocTraceData) + } +} + +func BenchmarkSpansWithAttributesUnmarshal(b *testing.B) { + ocSpan := generateSpanWithAttributes(15) + + bytes, err := proto.Marshal(ocSpan) + if err != nil { + b.Fail() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + unmarshalOc := &octrace.Span{} + if err := proto.Unmarshal(bytes, unmarshalOc); err != nil { + b.Fail() + } + if len(unmarshalOc.Attributes.AttributeMap) != 15 { + b.Fail() + } + } +} + +func generateSpanWithAttributes(len int) *octrace.Span { + startTime := timestamppb.New(testdata.TestSpanStartTime) + endTime := timestamppb.New(testdata.TestSpanEndTime) + ocSpan2 := &octrace.Span{ + Name: &octrace.TruncatableString{Value: "operationB"}, + StartTime: startTime, + EndTime: endTime, + Attributes: &octrace.Span_Attributes{ + DroppedAttributesCount: 3, + }, + } + + ocSpan2.Attributes.AttributeMap = make(map[string]*octrace.AttributeValue, len) + ocAttr := ocSpan2.Attributes.AttributeMap + for i := 0; i < len; i++ { + ocAttr["span-link-attr_"+strconv.Itoa(i)] = &octrace.AttributeValue{ + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "span-link-attr-val"}, + }, + } + } + return ocSpan2 +} diff --git a/internal/otel_collector/translator/internaldata/resource_to_oc.go b/internal/otel_collector/translator/internaldata/resource_to_oc.go new file mode 100644 index 00000000000..6c99f2b623c --- /dev/null +++ b/internal/otel_collector/translator/internaldata/resource_to_oc.go @@ -0,0 +1,172 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "strconv" + "time" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "go.opencensus.io/resource/resourcekeys" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +type ocInferredResourceType struct { + // label presence to check against + labelKeyPresent string + // inferred resource type + resourceType string +} + +// mapping of label presence to inferred OC resource type +// NOTE: defined in the priority order (first match wins) +var labelPresenceToResourceType = []ocInferredResourceType{ + { + // See https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/container.md + labelKeyPresent: conventions.AttributeContainerName, + resourceType: resourcekeys.ContainerType, + }, + { + // See https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/k8s.md#pod + labelKeyPresent: conventions.AttributeK8sPod, + // NOTE: OpenCensus is using "k8s" rather than "k8s.pod" for Pod + resourceType: resourcekeys.K8SType, + }, + { + // See https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/host.md + labelKeyPresent: conventions.AttributeHostName, + resourceType: resourcekeys.HostType, + }, + { + // See https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/cloud.md + labelKeyPresent: conventions.AttributeCloudProvider, + resourceType: resourcekeys.CloudType, + }, +} + +var langToOCLangCodeMap = getSDKLangToOCLangCodeMap() + +func getSDKLangToOCLangCodeMap() map[string]int32 { + mappings := make(map[string]int32) + mappings[conventions.AttributeSDKLangValueCPP] = 1 + mappings[conventions.AttributeSDKLangValueDotNET] = 2 + mappings[conventions.AttributeSDKLangValueErlang] = 3 + mappings[conventions.AttributeSDKLangValueGo] = 4 + mappings[conventions.AttributeSDKLangValueJava] = 5 + mappings[conventions.AttributeSDKLangValueNodeJS] = 6 + mappings[conventions.AttributeSDKLangValuePHP] = 7 + mappings[conventions.AttributeSDKLangValuePython] = 8 + mappings[conventions.AttributeSDKLangValueRuby] = 9 + mappings[conventions.AttributeSDKLangValueWebJS] = 10 + return mappings +} + +func internalResourceToOC(resource pdata.Resource) (*occommon.Node, *ocresource.Resource) { + attrs := resource.Attributes() + if attrs.Len() == 0 { + return nil, nil + } + + ocNode := &occommon.Node{} + ocResource := &ocresource.Resource{} + labels := make(map[string]string, attrs.Len()) + attrs.ForEach(func(k string, v pdata.AttributeValue) { + val := tracetranslator.AttributeValueToString(v, false) + + switch k { + case conventions.OCAttributeResourceType: + ocResource.Type = val + case conventions.AttributeServiceName: + getServiceInfo(ocNode).Name = val + case conventions.OCAttributeProcessStartTime: + t, err := time.Parse(time.RFC3339Nano, val) + if err != nil { + return + } + ts := timestamppb.New(t) + getProcessIdentifier(ocNode).StartTimestamp = ts + case conventions.AttributeHostName: + getProcessIdentifier(ocNode).HostName = val + case conventions.OCAttributeProcessID: + pid, err := strconv.Atoi(val) + if err != nil { + pid = defaultProcessID + } + getProcessIdentifier(ocNode).Pid = uint32(pid) + case conventions.AttributeTelemetrySDKVersion: + getLibraryInfo(ocNode).CoreLibraryVersion = val + case conventions.OCAttributeExporterVersion: + getLibraryInfo(ocNode).ExporterVersion = val + case conventions.AttributeTelemetrySDKLanguage: + if code, ok := langToOCLangCodeMap[val]; ok { + getLibraryInfo(ocNode).Language = occommon.LibraryInfo_Language(code) + } + default: + // Not a special attribute, put it into resource labels + labels[k] = val + } + }) + ocResource.Labels = labels + + // If resource type is missing, try to infer it + // based on the presence of resource labels (semantic conventions) + if ocResource.Type == "" { + if resType, ok := inferResourceType(ocResource.Labels); ok { + ocResource.Type = resType + } + } + + return ocNode, ocResource +} + +func getProcessIdentifier(ocNode *occommon.Node) *occommon.ProcessIdentifier { + if ocNode.Identifier == nil { + ocNode.Identifier = &occommon.ProcessIdentifier{} + } + return ocNode.Identifier +} + +func getLibraryInfo(ocNode *occommon.Node) *occommon.LibraryInfo { + if ocNode.LibraryInfo == nil { + ocNode.LibraryInfo = &occommon.LibraryInfo{} + } + return ocNode.LibraryInfo +} + +func getServiceInfo(ocNode *occommon.Node) *occommon.ServiceInfo { + if ocNode.ServiceInfo == nil { + ocNode.ServiceInfo = &occommon.ServiceInfo{} + } + return ocNode.ServiceInfo +} + +func inferResourceType(labels map[string]string) (string, bool) { + if labels == nil { + return "", false + } + + for _, mapping := range labelPresenceToResourceType { + if _, ok := labels[mapping.labelKeyPresent]; ok { + return mapping.resourceType, true + } + } + + return "", false +} diff --git a/internal/otel_collector/translator/internaldata/resource_to_oc_test.go b/internal/otel_collector/translator/internaldata/resource_to_oc_test.go new file mode 100644 index 00000000000..d2e5ccd6b60 --- /dev/null +++ b/internal/otel_collector/translator/internaldata/resource_to_oc_test.go @@ -0,0 +1,288 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "strconv" + "testing" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "go.opencensus.io/resource/resourcekeys" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + + "go.opentelemetry.io/collector/consumer/pdata" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/goldendataset" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func TestResourceToOC(t *testing.T) { + emptyResource := pdata.NewResource() + + ocNode := generateOcNode() + ocResource := generateOcResource() + // We don't differentiate between Node.Attributes and Resource when converting, + // and put everything in Resource. + ocResource.Labels["node-str-attr"] = "node-str-attr-val" + ocNode.Attributes = nil + + tests := []struct { + name string + resource pdata.Resource + ocNode *occommon.Node + ocResource *ocresource.Resource + }{ + { + name: "nil", + resource: pdata.NewResource(), + ocNode: nil, + ocResource: nil, + }, + + { + name: "empty", + resource: emptyResource, + ocNode: nil, + ocResource: nil, + }, + + { + name: "with-attributes", + resource: generateResourceWithOcNodeAndResource(), + ocNode: ocNode, + ocResource: ocResource, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ocNode, ocResource := internalResourceToOC(test.resource) + assert.EqualValues(t, test.ocNode, ocNode) + assert.EqualValues(t, test.ocResource, ocResource) + }) + } +} + +func TestContainerResourceToOC(t *testing.T) { + resource := pdata.NewResource() + resource.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + conventions.AttributeK8sCluster: pdata.NewAttributeValueString("cluster1"), + conventions.AttributeK8sPod: pdata.NewAttributeValueString("pod1"), + conventions.AttributeK8sNamespace: pdata.NewAttributeValueString("namespace1"), + conventions.AttributeContainerName: pdata.NewAttributeValueString("container-name1"), + conventions.AttributeCloudAccount: pdata.NewAttributeValueString("proj1"), + conventions.AttributeCloudZone: pdata.NewAttributeValueString("zone1"), + }) + + want := &ocresource.Resource{ + Type: resourcekeys.ContainerType, // Inferred type + Labels: map[string]string{ + resourcekeys.K8SKeyClusterName: "cluster1", + resourcekeys.K8SKeyPodName: "pod1", + resourcekeys.K8SKeyNamespaceName: "namespace1", + resourcekeys.ContainerKeyName: "container-name1", + resourcekeys.CloudKeyAccountID: "proj1", + resourcekeys.CloudKeyZone: "zone1", + }, + } + + _, ocResource := internalResourceToOC(resource) + if diff := cmp.Diff(want, ocResource, protocmp.Transform()); diff != "" { + t.Errorf("Unexpected difference:\n%v", diff) + } + + // Also test that the explicit resource type is preserved if present + resource.Attributes().InsertString(conventions.OCAttributeResourceType, "other-type") + want.Type = "other-type" + + _, ocResource = internalResourceToOC(resource) + if diff := cmp.Diff(want, ocResource, protocmp.Transform()); diff != "" { + t.Errorf("Unexpected difference:\n%v", diff) + } +} + +func TestAttributeValueToString(t *testing.T) { + assert.EqualValues(t, "", tracetranslator.AttributeValueToString(pdata.NewAttributeValueNull(), false)) + assert.EqualValues(t, "abc", tracetranslator.AttributeValueToString(pdata.NewAttributeValueString("abc"), false)) + assert.EqualValues(t, `"abc"`, tracetranslator.AttributeValueToString(pdata.NewAttributeValueString("abc"), true)) + assert.EqualValues(t, "123", tracetranslator.AttributeValueToString(pdata.NewAttributeValueInt(123), false)) + assert.EqualValues(t, "1.23", tracetranslator.AttributeValueToString(pdata.NewAttributeValueDouble(1.23), false)) + assert.EqualValues(t, "true", tracetranslator.AttributeValueToString(pdata.NewAttributeValueBool(true), false)) + + v := pdata.NewAttributeValueMap() + v.MapVal().InsertString(`a"\`, `b"\`) + v.MapVal().InsertInt("c", 123) + v.MapVal().Insert("d", pdata.NewAttributeValueNull()) + v.MapVal().Insert("e", v) + assert.EqualValues(t, `{"a\"\\":"b\"\\","c":123,"d":null,"e":{"a\"\\":"b\"\\","c":123,"d":null}}`, tracetranslator.AttributeValueToString(v, false)) + + v = pdata.NewAttributeValueArray() + av := pdata.NewAttributeValueString(`b"\`) + v.ArrayVal().Append(av) + av = pdata.NewAttributeValueInt(123) + v.ArrayVal().Append(av) + av = pdata.NewAttributeValueNull() + v.ArrayVal().Append(av) + av = pdata.NewAttributeValueArray() + v.ArrayVal().Append(av) + assert.EqualValues(t, `["b\"\\",123,null,"\u003cInvalid array value\u003e"]`, tracetranslator.AttributeValueToString(v, false)) +} + +func TestInferResourceType(t *testing.T) { + tests := []struct { + name string + labels map[string]string + wantResourceType string + wantOk bool + }{ + { + name: "empty labels", + labels: nil, + wantOk: false, + }, + { + name: "container", + labels: map[string]string{ + conventions.AttributeK8sCluster: "cluster1", + conventions.AttributeK8sPod: "pod1", + conventions.AttributeK8sNamespace: "namespace1", + conventions.AttributeContainerName: "container-name1", + conventions.AttributeCloudAccount: "proj1", + conventions.AttributeCloudZone: "zone1", + }, + wantResourceType: resourcekeys.ContainerType, + wantOk: true, + }, + { + name: "pod", + labels: map[string]string{ + conventions.AttributeK8sCluster: "cluster1", + conventions.AttributeK8sPod: "pod1", + conventions.AttributeK8sNamespace: "namespace1", + conventions.AttributeCloudZone: "zone1", + }, + wantResourceType: resourcekeys.K8SType, + wantOk: true, + }, + { + name: "host", + labels: map[string]string{ + conventions.AttributeK8sCluster: "cluster1", + conventions.AttributeCloudZone: "zone1", + conventions.AttributeHostName: "node1", + }, + wantResourceType: resourcekeys.HostType, + wantOk: true, + }, + { + name: "gce", + labels: map[string]string{ + conventions.AttributeCloudProvider: "gcp", + conventions.AttributeHostID: "inst1", + conventions.AttributeCloudZone: "zone1", + }, + wantResourceType: resourcekeys.CloudType, + wantOk: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + resourceType, ok := inferResourceType(tc.labels) + if tc.wantOk { + assert.True(t, ok) + assert.Equal(t, tc.wantResourceType, resourceType) + } else { + assert.False(t, ok) + assert.Equal(t, "", resourceType) + } + }) + } +} + +func TestResourceToOCAndBack(t *testing.T) { + tests := []goldendataset.PICTInputResource{ + goldendataset.ResourceNil, + goldendataset.ResourceEmpty, + goldendataset.ResourceVMOnPrem, + goldendataset.ResourceVMCloud, + goldendataset.ResourceK8sOnPrem, + goldendataset.ResourceK8sCloud, + goldendataset.ResourceFaas, + goldendataset.ResourceExec, + } + for _, test := range tests { + t.Run(string(test), func(t *testing.T) { + rSpans := make([]*otlptrace.ResourceSpans, 1) + rSpans[0] = &otlptrace.ResourceSpans{ + Resource: goldendataset.GenerateResource(test), + InstrumentationLibrarySpans: nil, + } + traces := pdata.TracesFromOtlp(rSpans) + expected := traces.ResourceSpans().At(0).Resource() + ocNode, ocResource := internalResourceToOC(expected) + actual := pdata.NewResource() + ocNodeResourceToInternal(ocNode, ocResource, actual) + // Remove opencensus resource type from actual. This will be added during translation. + actual.Attributes().Delete(conventions.OCAttributeResourceType) + assert.Equal(t, expected.Attributes().Len(), actual.Attributes().Len()) + expected.Attributes().ForEach(func(k string, v pdata.AttributeValue) { + a, ok := actual.Attributes().Get(k) + assert.True(t, ok) + switch v.Type() { + case pdata.AttributeValueINT: + assert.Equal(t, strconv.FormatInt(v.IntVal(), 10), a.StringVal()) + case pdata.AttributeValueMAP, pdata.AttributeValueARRAY: + assert.Equal(t, a, a) + default: + assert.Equal(t, v, a) + } + }) + }) + } +} + +func BenchmarkInternalResourceToOC(b *testing.B) { + resource := generateResourceWithOcNodeAndResource() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + ocNode, _ := internalResourceToOC(resource) + if ocNode.Identifier.Pid != 123 { + b.Fail() + } + } +} + +func BenchmarkOcResourceNodeMarshal(b *testing.B) { + oc := &agenttracepb.ExportTraceServiceRequest{ + Node: generateOcNode(), + Spans: nil, + Resource: generateOcResource(), + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + if _, err := proto.Marshal(oc); err != nil { + b.Fail() + } + } +} diff --git a/internal/otel_collector/translator/internaldata/traces_to_oc.go b/internal/otel_collector/translator/internaldata/traces_to_oc.go new file mode 100644 index 00000000000..518792f7403 --- /dev/null +++ b/internal/otel_collector/translator/internaldata/traces_to_oc.go @@ -0,0 +1,414 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "fmt" + "strings" + + octrace "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "go.opencensus.io/trace" + "google.golang.org/protobuf/types/known/wrapperspb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +const sourceFormat = "otlp_trace" + +var ( + defaultProcessID = 0 +) + +// TraceDataToOC may be used only by OpenCensus receiver and exporter implementations. +// TODO: move this function to OpenCensus package. +func TraceDataToOC(td pdata.Traces) []consumerdata.TraceData { + resourceSpans := td.ResourceSpans() + + if resourceSpans.Len() == 0 { + return nil + } + + ocResourceSpansList := make([]consumerdata.TraceData, 0, resourceSpans.Len()) + + for i := 0; i < resourceSpans.Len(); i++ { + ocResourceSpansList = append(ocResourceSpansList, resourceSpansToOC(resourceSpans.At(i))) + } + + return ocResourceSpansList +} + +func resourceSpansToOC(rs pdata.ResourceSpans) consumerdata.TraceData { + ocTraceData := consumerdata.TraceData{ + SourceFormat: sourceFormat, + } + ocTraceData.Node, ocTraceData.Resource = internalResourceToOC(rs.Resource()) + ilss := rs.InstrumentationLibrarySpans() + if ilss.Len() == 0 { + return ocTraceData + } + // Approximate the number of the spans as the number of the spans in the first + // instrumentation library info. + ocSpans := make([]*octrace.Span, 0, ilss.At(0).Spans().Len()) + for i := 0; i < ilss.Len(); i++ { + ils := ilss.At(i) + // TODO: Handle instrumentation library name and version. + spans := ils.Spans() + for j := 0; j < spans.Len(); j++ { + ocSpans = append(ocSpans, spanToOC(spans.At(j))) + } + } + ocTraceData.Spans = ocSpans + return ocTraceData +} + +func spanToOC(span pdata.Span) *octrace.Span { + spaps := attributesMapToOCSameProcessAsParentSpan(span.Attributes()) + attributes := attributesMapToOCSpanAttributes(span.Attributes(), span.DroppedAttributesCount()) + if kindAttr := spanKindToOCAttribute(span.Kind()); kindAttr != nil { + if attributes == nil { + attributes = &octrace.Span_Attributes{ + AttributeMap: make(map[string]*octrace.AttributeValue, 1), + DroppedAttributesCount: 0, + } + } + attributes.AttributeMap[tracetranslator.TagSpanKind] = kindAttr + } + + ocStatus, statusAttr := statusToOC(span.Status()) + if statusAttr != nil { + if attributes == nil { + attributes = &octrace.Span_Attributes{ + AttributeMap: make(map[string]*octrace.AttributeValue, 1), + DroppedAttributesCount: 0, + } + } + attributes.AttributeMap[tracetranslator.TagStatusCode] = statusAttr + } + + return &octrace.Span{ + TraceId: traceIDToOC(span.TraceID()), + SpanId: spanIDToOC(span.SpanID()), + Tracestate: traceStateToOC(span.TraceState()), + ParentSpanId: spanIDToOC(span.ParentSpanID()), + Name: stringToTruncatableString(span.Name()), + Kind: spanKindToOC(span.Kind()), + StartTime: pdata.UnixNanoToTimestamp(span.StartTime()), + EndTime: pdata.UnixNanoToTimestamp(span.EndTime()), + Attributes: attributes, + TimeEvents: eventsToOC(span.Events(), span.DroppedEventsCount()), + Links: linksToOC(span.Links(), span.DroppedLinksCount()), + Status: ocStatus, + ChildSpanCount: nil, // TODO(dmitryax): Handle once OTLP supports it + SameProcessAsParentSpan: spaps, + } +} + +func attributesMapToOCSpanAttributes(attributes pdata.AttributeMap, droppedCount uint32) *octrace.Span_Attributes { + if attributes.Len() == 0 && droppedCount == 0 { + return nil + } + + return &octrace.Span_Attributes{ + AttributeMap: attributesMapToOCAttributeMap(attributes), + DroppedAttributesCount: int32(droppedCount), + } +} + +func attributesMapToOCAttributeMap(attributes pdata.AttributeMap) map[string]*octrace.AttributeValue { + if attributes.Len() == 0 { + return nil + } + + ocAttributes := make(map[string]*octrace.AttributeValue, attributes.Len()) + attributes.ForEach(func(k string, v pdata.AttributeValue) { + ocAttributes[k] = attributeValueToOC(v) + }) + return ocAttributes +} + +func attributeValueToOC(attr pdata.AttributeValue) *octrace.AttributeValue { + a := &octrace.AttributeValue{} + + switch attr.Type() { + case pdata.AttributeValueSTRING: + a.Value = &octrace.AttributeValue_StringValue{ + StringValue: stringToTruncatableString(attr.StringVal()), + } + case pdata.AttributeValueBOOL: + a.Value = &octrace.AttributeValue_BoolValue{ + BoolValue: attr.BoolVal(), + } + case pdata.AttributeValueDOUBLE: + a.Value = &octrace.AttributeValue_DoubleValue{ + DoubleValue: attr.DoubleVal(), + } + case pdata.AttributeValueINT: + a.Value = &octrace.AttributeValue_IntValue{ + IntValue: attr.IntVal(), + } + case pdata.AttributeValueMAP: + a.Value = &octrace.AttributeValue_StringValue{ + StringValue: stringToTruncatableString(tracetranslator.AttributeValueToString(attr, false)), + } + case pdata.AttributeValueARRAY: + a.Value = &octrace.AttributeValue_StringValue{ + StringValue: stringToTruncatableString(tracetranslator.AttributeValueToString(attr, false)), + } + default: + a.Value = &octrace.AttributeValue_StringValue{ + StringValue: stringToTruncatableString(fmt.Sprintf("", attr.Type())), + } + } + + return a +} + +func spanKindToOCAttribute(kind pdata.SpanKind) *octrace.AttributeValue { + var ocKind tracetranslator.OpenTracingSpanKind + switch kind { + case pdata.SpanKindCONSUMER: + ocKind = tracetranslator.OpenTracingSpanKindConsumer + case pdata.SpanKindPRODUCER: + ocKind = tracetranslator.OpenTracingSpanKindProducer + case pdata.SpanKindINTERNAL: + ocKind = tracetranslator.OpenTracingSpanKindInternal + case pdata.SpanKindUNSPECIFIED: + case pdata.SpanKindSERVER: // explicitly handled as SpanKind + case pdata.SpanKindCLIENT: // explicitly handled as SpanKind + default: + + } + + if string(ocKind) == "" { + // No matching kind attribute value + return nil + } + + return stringAttributeValue(string(ocKind)) +} + +func stringAttributeValue(val string) *octrace.AttributeValue { + return &octrace.AttributeValue{ + Value: &octrace.AttributeValue_StringValue{ + StringValue: stringToTruncatableString(val), + }, + } +} + +func attributesMapToOCSameProcessAsParentSpan(attr pdata.AttributeMap) *wrapperspb.BoolValue { + val, ok := attr.Get(conventions.OCAttributeSameProcessAsParentSpan) + if !ok || val.Type() != pdata.AttributeValueBOOL { + return nil + } + return wrapperspb.Bool(val.BoolVal()) +} + +// OTLP follows the W3C format, e.g. "vendorname1=opaqueValue1,vendorname2=opaqueValue2" +func traceStateToOC(traceState pdata.TraceState) *octrace.Span_Tracestate { + if traceState == "" { + return nil + } + + // key-value pairs in the "key1=value1" format + pairs := strings.Split(string(traceState), ",") + + entries := make([]*octrace.Span_Tracestate_Entry, 0, len(pairs)) + for _, pair := range pairs { + kv := strings.SplitN(pair, "=", 2) + if len(kv) == 0 { + continue + } + + key := kv[0] + val := "" + if len(kv) >= 2 { + val = kv[1] + } + + entries = append(entries, &octrace.Span_Tracestate_Entry{ + Key: key, + Value: val, + }) + } + + return &octrace.Span_Tracestate{ + Entries: entries, + } +} + +func spanKindToOC(kind pdata.SpanKind) octrace.Span_SpanKind { + switch kind { + case pdata.SpanKindSERVER: + return octrace.Span_SERVER + case pdata.SpanKindCLIENT: + return octrace.Span_CLIENT + // NOTE: see `spanKindToOCAttribute` function for custom kinds + case pdata.SpanKindUNSPECIFIED: + case pdata.SpanKindINTERNAL: + case pdata.SpanKindPRODUCER: + case pdata.SpanKindCONSUMER: + default: + } + + return octrace.Span_SPAN_KIND_UNSPECIFIED +} + +func eventsToOC(events pdata.SpanEventSlice, droppedCount uint32) *octrace.Span_TimeEvents { + if events.Len() == 0 { + if droppedCount == 0 { + return nil + } + return &octrace.Span_TimeEvents{ + TimeEvent: nil, + DroppedMessageEventsCount: int32(droppedCount), + } + } + + ocEvents := make([]*octrace.Span_TimeEvent, 0, events.Len()) + for i := 0; i < events.Len(); i++ { + ocEvents = append(ocEvents, eventToOC(events.At(i))) + } + + return &octrace.Span_TimeEvents{ + TimeEvent: ocEvents, + DroppedAnnotationsCount: int32(droppedCount), + } +} + +func eventToOC(event pdata.SpanEvent) *octrace.Span_TimeEvent { + attrs := event.Attributes() + + // Consider TimeEvent to be of MessageEvent type if all and only relevant attributes are set + ocMessageEventAttrs := []string{ + conventions.OCTimeEventMessageEventType, + conventions.OCTimeEventMessageEventID, + conventions.OCTimeEventMessageEventUSize, + conventions.OCTimeEventMessageEventCSize, + } + // TODO: Find a better way to check for message_event. Maybe use the event.Name. + if attrs.Len() == len(ocMessageEventAttrs) { + ocMessageEventAttrValues := map[string]pdata.AttributeValue{} + var ocMessageEventAttrFound bool + for _, attr := range ocMessageEventAttrs { + akv, found := attrs.Get(attr) + if found { + ocMessageEventAttrFound = true + } + ocMessageEventAttrValues[attr] = akv + } + if ocMessageEventAttrFound { + ocMessageEventType := ocMessageEventAttrValues[conventions.OCTimeEventMessageEventType] + ocMessageEventTypeVal := octrace.Span_TimeEvent_MessageEvent_Type_value[ocMessageEventType.StringVal()] + return &octrace.Span_TimeEvent{ + Time: pdata.UnixNanoToTimestamp(event.Timestamp()), + Value: &octrace.Span_TimeEvent_MessageEvent_{ + MessageEvent: &octrace.Span_TimeEvent_MessageEvent{ + Type: octrace.Span_TimeEvent_MessageEvent_Type(ocMessageEventTypeVal), + Id: uint64(ocMessageEventAttrValues[conventions.OCTimeEventMessageEventID].IntVal()), + UncompressedSize: uint64(ocMessageEventAttrValues[conventions.OCTimeEventMessageEventUSize].IntVal()), + CompressedSize: uint64(ocMessageEventAttrValues[conventions.OCTimeEventMessageEventCSize].IntVal()), + }, + }, + } + } + } + + ocAttributes := attributesMapToOCSpanAttributes(attrs, event.DroppedAttributesCount()) + return &octrace.Span_TimeEvent{ + Time: pdata.UnixNanoToTimestamp(event.Timestamp()), + Value: &octrace.Span_TimeEvent_Annotation_{ + Annotation: &octrace.Span_TimeEvent_Annotation{ + Description: stringToTruncatableString(event.Name()), + Attributes: ocAttributes, + }, + }, + } +} + +func linksToOC(links pdata.SpanLinkSlice, droppedCount uint32) *octrace.Span_Links { + if links.Len() == 0 { + if droppedCount == 0 { + return nil + } + return &octrace.Span_Links{ + Link: nil, + DroppedLinksCount: int32(droppedCount), + } + } + + ocLinks := make([]*octrace.Span_Link, 0, links.Len()) + for i := 0; i < links.Len(); i++ { + link := links.At(i) + ocLink := &octrace.Span_Link{ + TraceId: traceIDToOC(link.TraceID()), + SpanId: spanIDToOC(link.SpanID()), + Tracestate: traceStateToOC(link.TraceState()), + Attributes: attributesMapToOCSpanAttributes(link.Attributes(), link.DroppedAttributesCount()), + } + ocLinks = append(ocLinks, ocLink) + } + + return &octrace.Span_Links{ + Link: ocLinks, + DroppedLinksCount: int32(droppedCount), + } +} + +func traceIDToOC(tid pdata.TraceID) []byte { + if !tid.IsValid() { + return nil + } + tidBytes := tid.Bytes() + return tidBytes[:] +} + +func spanIDToOC(sid pdata.SpanID) []byte { + if !sid.IsValid() { + return nil + } + sidBytes := sid.Bytes() + return sidBytes[:] +} + +func statusToOC(status pdata.SpanStatus) (*octrace.Status, *octrace.AttributeValue) { + var attr *octrace.AttributeValue + var oc int32 + switch status.Code() { + case pdata.StatusCodeUnset: + // Unset in OTLP corresponds to OK in OpenCensus. + oc = trace.StatusCodeOK + case pdata.StatusCodeOk: + // OK in OpenCensus is the closest to OK in OTLP. + oc = trace.StatusCodeOK + // We will also add an attribute to indicate that it is OTLP OK, different from OTLP Unset. + attr = &octrace.AttributeValue{Value: &octrace.AttributeValue_IntValue{IntValue: int64(status.Code())}} + case pdata.StatusCodeError: + oc = trace.StatusCodeUnknown + } + + return &octrace.Status{Code: oc, Message: status.Message()}, attr +} + +func stringToTruncatableString(str string) *octrace.TruncatableString { + if str == "" { + return nil + } + return &octrace.TruncatableString{ + Value: str, + } +} diff --git a/internal/otel_collector/translator/internaldata/traces_to_oc_test.go b/internal/otel_collector/translator/internaldata/traces_to_oc_test.go new file mode 100644 index 00000000000..1fd58cf40b6 --- /dev/null +++ b/internal/otel_collector/translator/internaldata/traces_to_oc_test.go @@ -0,0 +1,446 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internaldata + +import ( + "io" + "math/rand" + "testing" + + occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + octrace "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/goldendataset" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func TestInternalTraceStateToOC(t *testing.T) { + assert.Equal(t, (*octrace.Span_Tracestate)(nil), traceStateToOC("")) + + ocTracestate := &octrace.Span_Tracestate{ + Entries: []*octrace.Span_Tracestate_Entry{ + { + Key: "abc", + Value: "def", + }, + }, + } + assert.EqualValues(t, ocTracestate, traceStateToOC("abc=def")) + + ocTracestate.Entries = append(ocTracestate.Entries, + &octrace.Span_Tracestate_Entry{ + Key: "123", + Value: "4567", + }) + assert.EqualValues(t, ocTracestate, traceStateToOC("abc=def,123=4567")) +} + +func TestAttributesMapToOC(t *testing.T) { + assert.EqualValues(t, (*octrace.Span_Attributes)(nil), attributesMapToOCSpanAttributes(pdata.NewAttributeMap(), 0)) + + ocAttrs := &octrace.Span_Attributes{ + DroppedAttributesCount: 123, + } + assert.EqualValues(t, ocAttrs, attributesMapToOCSpanAttributes(pdata.NewAttributeMap(), 123)) + + ocAttrs = &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "abc": { + Value: &octrace.AttributeValue_StringValue{StringValue: &octrace.TruncatableString{Value: "def"}}, + }, + }, + DroppedAttributesCount: 234, + } + assert.EqualValues(t, ocAttrs, + attributesMapToOCSpanAttributes( + pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + "abc": pdata.NewAttributeValueString("def"), + }), + 234)) + + ocAttrs.AttributeMap["intval"] = &octrace.AttributeValue{ + Value: &octrace.AttributeValue_IntValue{IntValue: 345}, + } + ocAttrs.AttributeMap["boolval"] = &octrace.AttributeValue{ + Value: &octrace.AttributeValue_BoolValue{BoolValue: true}, + } + ocAttrs.AttributeMap["doubleval"] = &octrace.AttributeValue{ + Value: &octrace.AttributeValue_DoubleValue{DoubleValue: 4.5}, + } + assert.EqualValues(t, ocAttrs, + attributesMapToOCSpanAttributes(pdata.NewAttributeMap().InitFromMap( + map[string]pdata.AttributeValue{ + "abc": pdata.NewAttributeValueString("def"), + "intval": pdata.NewAttributeValueInt(345), + "boolval": pdata.NewAttributeValueBool(true), + "doubleval": pdata.NewAttributeValueDouble(4.5), + }), + 234)) +} + +func TestSpanKindToOC(t *testing.T) { + tests := []struct { + kind pdata.SpanKind + ocKind octrace.Span_SpanKind + }{ + { + kind: pdata.SpanKindCLIENT, + ocKind: octrace.Span_CLIENT, + }, + { + kind: pdata.SpanKindSERVER, + ocKind: octrace.Span_SERVER, + }, + { + kind: pdata.SpanKindCONSUMER, + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + }, + { + kind: pdata.SpanKindPRODUCER, + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + }, + { + kind: pdata.SpanKindUNSPECIFIED, + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + }, + { + kind: pdata.SpanKindINTERNAL, + ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, + }, + } + + for _, test := range tests { + t.Run(test.kind.String(), func(t *testing.T) { + got := spanKindToOC(test.kind) + assert.EqualValues(t, test.ocKind, got, "Expected "+test.ocKind.String()+", got "+got.String()) + }) + } +} + +func TestAttributesMapTOOcSameProcessAsParentSpan(t *testing.T) { + attr := pdata.NewAttributeMap() + assert.Nil(t, attributesMapToOCSameProcessAsParentSpan(attr)) + + attr.UpsertBool(conventions.OCAttributeSameProcessAsParentSpan, true) + assert.True(t, proto.Equal(wrapperspb.Bool(true), attributesMapToOCSameProcessAsParentSpan(attr))) + + attr.UpsertBool(conventions.OCAttributeSameProcessAsParentSpan, false) + assert.True(t, proto.Equal(wrapperspb.Bool(false), attributesMapToOCSameProcessAsParentSpan(attr))) + + attr.UpdateInt(conventions.OCAttributeSameProcessAsParentSpan, 13) + assert.Nil(t, attributesMapToOCSameProcessAsParentSpan(attr)) +} + +func TestSpanKindToOCAttribute(t *testing.T) { + tests := []struct { + kind pdata.SpanKind + ocAttribute *octrace.AttributeValue + }{ + { + kind: pdata.SpanKindCONSUMER, + ocAttribute: &octrace.AttributeValue{ + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{ + Value: string(tracetranslator.OpenTracingSpanKindConsumer), + }, + }, + }, + }, + { + kind: pdata.SpanKindPRODUCER, + ocAttribute: &octrace.AttributeValue{ + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{ + Value: string(tracetranslator.OpenTracingSpanKindProducer), + }, + }, + }, + }, + { + kind: pdata.SpanKindINTERNAL, + ocAttribute: &octrace.AttributeValue{ + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{ + Value: string(tracetranslator.OpenTracingSpanKindInternal), + }, + }, + }, + }, + { + kind: pdata.SpanKindUNSPECIFIED, + ocAttribute: nil, + }, + { + kind: pdata.SpanKindSERVER, + ocAttribute: nil, + }, + { + kind: pdata.SpanKindCLIENT, + ocAttribute: nil, + }, + } + + for _, test := range tests { + t.Run(test.kind.String(), func(t *testing.T) { + got := spanKindToOCAttribute(test.kind) + assert.EqualValues(t, test.ocAttribute, got, "Expected "+test.ocAttribute.String()+", got "+got.String()) + }) + } +} + +func TestInternalToOC(t *testing.T) { + ocNode := &occommon.Node{} + ocResource1 := &ocresource.Resource{Labels: map[string]string{"resource-attr": "resource-attr-val-1"}} + ocResource2 := &ocresource.Resource{Labels: map[string]string{"resource-attr": "resource-attr-val-2"}} + + startTime := timestamppb.New(testdata.TestSpanStartTime) + eventTime := timestamppb.New(testdata.TestSpanEventTime) + endTime := timestamppb.New(testdata.TestSpanEndTime) + + ocSpan1 := &octrace.Span{ + Name: &octrace.TruncatableString{Value: "operationA"}, + StartTime: startTime, + EndTime: endTime, + TimeEvents: &octrace.Span_TimeEvents{ + TimeEvent: []*octrace.Span_TimeEvent{ + { + Time: eventTime, + Value: &octrace.Span_TimeEvent_Annotation_{ + Annotation: &octrace.Span_TimeEvent_Annotation{ + Description: &octrace.TruncatableString{Value: "event-with-attr"}, + Attributes: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span-event-attr": { + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "span-event-attr-val"}, + }, + }, + }, + DroppedAttributesCount: 2, + }, + }, + }, + }, + { + Time: eventTime, + Value: &octrace.Span_TimeEvent_Annotation_{ + Annotation: &octrace.Span_TimeEvent_Annotation{ + Description: &octrace.TruncatableString{Value: "event"}, + Attributes: &octrace.Span_Attributes{ + DroppedAttributesCount: 2, + }, + }, + }, + }, + }, + DroppedAnnotationsCount: 1, + }, + Attributes: &octrace.Span_Attributes{ + DroppedAttributesCount: 1, + }, + Status: &octrace.Status{Message: "status-cancelled", Code: 2}, + } + + ocSpan2 := &octrace.Span{ + Name: &octrace.TruncatableString{Value: "operationB"}, + StartTime: startTime, + EndTime: endTime, + Links: &octrace.Span_Links{ + Link: []*octrace.Span_Link{ + { + Attributes: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span-link-attr": { + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "span-link-attr-val"}, + }, + }, + }, + DroppedAttributesCount: 4, + }, + }, + { + Attributes: &octrace.Span_Attributes{ + DroppedAttributesCount: 4, + }, + }, + }, + DroppedLinksCount: 3, + }, + Status: &octrace.Status{}, + } + + ocSpan3 := &octrace.Span{ + Name: &octrace.TruncatableString{Value: "operationC"}, + StartTime: startTime, + EndTime: endTime, + // TODO: Set resource here and put it in the same TraceDataOld. + Attributes: &octrace.Span_Attributes{ + AttributeMap: map[string]*octrace.AttributeValue{ + "span-attr": { + Value: &octrace.AttributeValue_StringValue{ + StringValue: &octrace.TruncatableString{Value: "span-attr-val"}, + }, + }, + }, + DroppedAttributesCount: 5, + }, + Status: &octrace.Status{}, + } + + tests := []struct { + name string + td pdata.Traces + oc []consumerdata.TraceData + }{ + { + name: "empty", + td: testdata.GenerateTraceDataEmpty(), + oc: []consumerdata.TraceData(nil), + }, + + { + name: "one-empty-resource-spans", + td: testdata.GenerateTraceDataOneEmptyResourceSpans(), + oc: []consumerdata.TraceData{ + { + Node: nil, + Resource: nil, + Spans: []*octrace.Span(nil), + SourceFormat: sourceFormat, + }, + }, + }, + + { + name: "no-libraries", + td: testdata.GenerateTraceDataNoLibraries(), + oc: []consumerdata.TraceData{ + { + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span(nil), + SourceFormat: sourceFormat, + }, + }, + }, + + { + name: "one-empty-instrumentation-library", + td: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + oc: []consumerdata.TraceData{ + { + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{}, + SourceFormat: sourceFormat, + }, + }, + }, + + { + name: "one-span-no-resource", + td: testdata.GenerateTraceDataOneSpanNoResource(), + oc: []consumerdata.TraceData{ + { + Node: nil, + Resource: nil, + Spans: []*octrace.Span{ocSpan1}, + SourceFormat: sourceFormat, + }, + }, + }, + + { + name: "one-span", + td: testdata.GenerateTraceDataOneSpan(), + oc: []consumerdata.TraceData{ + { + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpan1}, + SourceFormat: sourceFormat, + }, + }, + }, + + { + name: "two-spans-same-resource", + td: testdata.GenerateTraceDataTwoSpansSameResource(), + oc: []consumerdata.TraceData{ + { + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpan1, ocSpan2}, + SourceFormat: sourceFormat, + }, + }, + }, + + { + name: "two-spans-same-resource-one-different", + td: testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent(), + oc: []consumerdata.TraceData{ + { + Node: ocNode, + Resource: ocResource1, + Spans: []*octrace.Span{ocSpan1, ocSpan2}, + SourceFormat: sourceFormat, + }, + { + Node: ocNode, + Resource: ocResource2, + Spans: []*octrace.Span{ocSpan3}, + SourceFormat: sourceFormat, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.EqualValues(t, test.oc, TraceDataToOC(test.td)) + }) + } +} + +func TestInternalTracesToOCTracesAndBack(t *testing.T) { + rscSpans, err := goldendataset.GenerateResourceSpans( + "../../internal/goldendataset/testdata/generated_pict_pairs_traces.txt", + "../../internal/goldendataset/testdata/generated_pict_pairs_spans.txt", + io.Reader(rand.New(rand.NewSource(2004)))) + assert.NoError(t, err) + for _, rs := range rscSpans { + orig := make([]*otlptrace.ResourceSpans, 1) + orig[0] = rs + td := pdata.TracesFromOtlp(orig) + ocTraceData := TraceDataToOC(td) + assert.Equal(t, 1, len(ocTraceData)) + assert.Equal(t, td.SpanCount(), len(ocTraceData[0].Spans)) + tdFromOC := OCToTraceData(ocTraceData[0]) + assert.NotNil(t, tdFromOC) + assert.Equal(t, td.SpanCount(), tdFromOC.SpanCount()) + } +} diff --git a/internal/otel_collector/translator/trace/README.md b/internal/otel_collector/translator/trace/README.md new file mode 100644 index 00000000000..4fa90d504af --- /dev/null +++ b/internal/otel_collector/translator/trace/README.md @@ -0,0 +1,74 @@ +# Overview + +This package implements a number of translators that help translate spans to and from OpenCensus format to a number of other supported formats such as Jaeger Proto, Jaeger Thrift, Zipkin Thrift, Zipkin JSON. This document mentions how certain non-obvious things should be handled. + +## Links: + +* [OpenTracing Semantic Conventions](https://github.com/opentracing/specification/blob/master/semantic_conventions.md) + +## Status Codes and Messages + +### OpenCensus + +OpenCensus protocol has a special field to represent the status of an operation. The status field has two fields, an int32 field called `code` and a string field called `message`. When converting from other formats, status field must be set from the relevant tags/attributes of the source format. When converting from OC to other formats, the status field must be translated to appropriate tags/attributes of the target format. + + +### Jaeger to OC + +Jaeger spans may contain two possible sets of tags that can possibly represent the status of an operation: + +- `status.code` and `status.message` +- `http.status_code` and `http.status_message` + +When converting from Jaeger to OC, + +1. OC status should be set from `status.code` and `status.message` tags if `status.code` tag is found on the Jaeger span. Since OC already has a special status field, these tags (`status.code` and `status.message`) are redundant and should be dropped from resultant OC span. +2. If the `status.code` tag is not present, status should be set from `http.status_code` and `http.status_message` if the `http.status_code` tag is found. HTTP status code should be mapped to the appropriate gRPC status code before using it in OC status. These tags should be preserved and added to the resultant OC span as attributes. +3. If none of the tags are found, OC status should not be set. + + +### Zipkin to OC + +In addition to the two sets of tags mentioned in the previous section, Zipkin spans can possibly contain a third set of tags to represent operation status resulting in the following sets of tags: + +- `census.status_code` and `census.status_description` +- `status.code` and `status.message` +- `http.status_code` and `http.status_message` + +When converting from Zipkin to OC, + +1. OC status should be set from `census.status_code` and `census.status_description` if `census.status_code` tag is found on the Zipkin span. These tags should be dropped from the resultant OC span. +2. If the `census.status_code` tag is not found in step 1, OC status should be set from `status.code` and `status.message` tags if the `status.code` tag is present. The tags should be dropped from the resultant OC span. +3. If no tags are found in step 1 and 2, OC status should be set from `http.status_code` and `http.status_message` if either `http.status_code` tag is found. These tags should be preserved and added to the resultant OC span as attributes. +4. If none of the tags are found, OC status should not be set. + + +Note that codes and messages from different sets of tags should not be mixed to form the status field. For example, OC status should not contain code from `http.status_code` but message from `status.message` and vice-versa. Both fields must be set from the same set of tags even if it means leaving one of the two fields empty. + + +### OC to Jaeger + +When converting from OC to Jaeger, if the OC span has a status field set, then + +* `code` should be added as a `status.code` tag. +* `message` should be added as a `status.message` tag. + +### OC to Zipkin + +When converting from OC to Zipkin, if the OC span has the status field set, then + +* `code` should be added as a `census.status_code` tag. +* `message` should be added as a `census.status_description` tag. + +In addition to this, if the OC status field represents a non-OK status, then a tag with the key `error` should be added and set to the same value as that of the status message falling back to status code when status message is not available. + +### Note: + +If either target tags (`status.*` or `census.status_*`) are already present on the span, then they should be preserved and not overwritten from the status field. This is extremely unlikely to happen within the collector because of how things are implemented but any other implementations should still follow this rule. + + +## Converting HTTP status codes to OC codes + +The following guidelines should be followed for translating HTTP status codes to OC ones. https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto + +This is implemented by the `tracetranslator` package as `HTTPToOCCodeMapper`. diff --git a/internal/otel_collector/translator/trace/big_endian_converter.go b/internal/otel_collector/translator/trace/big_endian_converter.go new file mode 100644 index 00000000000..25daea54e6d --- /dev/null +++ b/internal/otel_collector/translator/trace/big_endian_converter.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetranslator + +import ( + "encoding/binary" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// UInt64ToByteTraceID takes a two uint64 representation of a TraceID and +// converts it to a []byte representation. +func UInt64ToTraceID(high, low uint64) pdata.TraceID { + traceID := [16]byte{} + binary.BigEndian.PutUint64(traceID[:8], high) + binary.BigEndian.PutUint64(traceID[8:], low) + return pdata.NewTraceID(traceID) +} + +// UInt64ToByteTraceID takes a two uint64 representation of a TraceID and +// converts it to a []byte representation. +func UInt64ToByteTraceID(high, low uint64) [16]byte { + traceID := [16]byte{} + binary.BigEndian.PutUint64(traceID[:8], high) + binary.BigEndian.PutUint64(traceID[8:], low) + return traceID +} + +// Int64ToByteTraceID takes a two int64 representation of a TraceID and +// converts it to a []byte representation. +func Int64ToTraceID(high, low int64) pdata.TraceID { + return UInt64ToTraceID(uint64(high), uint64(low)) +} + +// Int64ToByteTraceID takes a two int64 representation of a TraceID and +// converts it to a []byte representation. +func Int64ToByteTraceID(high, low int64) [16]byte { + return UInt64ToByteTraceID(uint64(high), uint64(low)) +} + +// BytesToUInt64TraceID takes a []byte representation of a TraceID and +// converts it to a two uint64 representation. +func BytesToUInt64TraceID(traceID [16]byte) (uint64, uint64) { + return binary.BigEndian.Uint64(traceID[:8]), binary.BigEndian.Uint64(traceID[8:]) +} + +// BytesToInt64TraceID takes a []byte representation of a TraceID and +// converts it to a two int64 representation. +func BytesToInt64TraceID(traceID [16]byte) (int64, int64) { + traceIDHigh, traceIDLow := BytesToUInt64TraceID(traceID) + return int64(traceIDHigh), int64(traceIDLow) +} + +// TraceIDToUInt64Pair takes a pdata.TraceID and converts it to a pair of uint64 representation. +func TraceIDToUInt64Pair(traceID pdata.TraceID) (uint64, uint64) { + return BytesToUInt64TraceID(traceID.Bytes()) +} + +// UInt64ToByteSpanID takes a uint64 representation of a SpanID and +// converts it to a []byte representation. +func UInt64ToByteSpanID(id uint64) [8]byte { + spanID := [8]byte{} + binary.BigEndian.PutUint64(spanID[:], id) + return spanID +} + +// UInt64ToSpanID takes a uint64 representation of a SpanID and +// converts it to a pdata.SpanID representation. +func UInt64ToSpanID(id uint64) pdata.SpanID { + return pdata.NewSpanID(UInt64ToByteSpanID(id)) +} + +// Int64ToByteSpanID takes a int64 representation of a SpanID and +// converts it to a []byte representation. +func Int64ToByteSpanID(id int64) [8]byte { + return UInt64ToByteSpanID(uint64(id)) +} + +// Int64ToByteSpanID takes a int64 representation of a SpanID and +// converts it to a []byte representation. +func Int64ToSpanID(id int64) pdata.SpanID { + return UInt64ToSpanID(uint64(id)) +} + +// BytesToUInt64SpanID takes a []byte representation of a SpanID and +// converts it to a uint64 representation. +func BytesToUInt64SpanID(b [8]byte) uint64 { + return binary.BigEndian.Uint64(b[:]) +} + +// BytesToInt64SpanID takes a []byte representation of a SpanID and +// converts it to a int64 representation. +func BytesToInt64SpanID(b [8]byte) int64 { + return int64(BytesToUInt64SpanID(b)) +} diff --git a/internal/otel_collector/translator/trace/big_endian_converter_test.go b/internal/otel_collector/translator/trace/big_endian_converter_test.go new file mode 100644 index 00000000000..a90e0961a70 --- /dev/null +++ b/internal/otel_collector/translator/trace/big_endian_converter_test.go @@ -0,0 +1,126 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetranslator + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUInt64ToBytesTraceIDConversion(t *testing.T) { + assert.Equal(t, + [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + UInt64ToByteTraceID(0, 0), + "Failed 0 conversion:") + assert.Equal(t, + [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}, + UInt64ToByteTraceID(256*256+256+1, 256+1), + "Failed simple conversion:") + assert.Equal(t, + [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}, + UInt64ToByteTraceID(0, 5), + "Failed to convert 0 high:") + assert.Equal(t, + UInt64ToByteTraceID(5, 0), + [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + UInt64ToByteTraceID(5, 0), + "Failed to convert 0 low:") + assert.Equal(t, + [16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}, + UInt64ToByteTraceID(math.MaxUint64, 5), + "Failed to convert MaxUint64:") +} + +func TestInt64ToBytesTraceIDConversion(t *testing.T) { + assert.Equal(t, + [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + Int64ToByteTraceID(0, 0), + "Failed 0 conversion:") + assert.Equal(t, + [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + Int64ToByteTraceID(0, -1), + "Failed to convert negative low:") + assert.Equal(t, + [16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}, + Int64ToByteTraceID(-2, 5), + "Failed to convert negative high:") + assert.Equal(t, + [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + Int64ToByteTraceID(5, math.MinInt64), + "Failed to convert MinInt64:") +} + +func TestUInt64ToBytesSpanIDConversion(t *testing.T) { + assert.Equal(t, + [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + UInt64ToByteSpanID(0), + "Failed 0 conversion:") + assert.Equal(t, + [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01}, + UInt64ToByteSpanID(256*256+256+1), + "Failed simple conversion:") + assert.Equal(t, + [8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + UInt64ToByteSpanID(math.MaxUint64), + "Failed to convert MaxUint64:") +} + +func TestInt64ToBytesSpanIDConversion(t *testing.T) { + assert.Equal(t, + [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + Int64ToByteSpanID(0), + "Failed 0 conversion:") + assert.Equal(t, + [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}, + Int64ToByteSpanID(5), + "Failed to convert positive id:") + assert.Equal(t, + [8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + Int64ToByteSpanID(-1), + "Failed to convert negative id:") + assert.Equal(t, + [8]byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + Int64ToByteSpanID(math.MinInt64), + "Failed to convert MinInt64:") +} + +func TestTraceIDInt64RoundTrip(t *testing.T) { + wh := int64(0x70605040302010FF) + wl := int64(0x0001020304050607) + gh, gl := BytesToInt64TraceID(Int64ToByteTraceID(wh, wl)) + if gh != wh || gl != wl { + t.Errorf("Round trip of TraceID failed:\n\tGot: (0x%0x, 0x%0x)\n\tWant: (0x%0x, 0x%0x)", gl, gh, wl, wh) + } +} + +func TestTraceIDUInt64RoundTrip(t *testing.T) { + wh := uint64(0x70605040302010FF) + wl := uint64(0x0001020304050607) + gh, gl := BytesToUInt64TraceID(UInt64ToByteTraceID(wh, wl)) + assert.Equal(t, wl, gl) + assert.Equal(t, wh, gh) +} + +func TestSpanIdInt64RoundTrip(t *testing.T) { + w := int64(0x0001020304050607) + assert.Equal(t, w, BytesToInt64SpanID(Int64ToByteSpanID(w))) +} + +func TestSpanIdUInt64RoundTrip(t *testing.T) { + w := uint64(0x0001020304050607) + assert.Equal(t, w, BytesToUInt64SpanID(UInt64ToByteSpanID(w))) +} diff --git a/internal/otel_collector/translator/trace/grpc_http_mapper.go b/internal/otel_collector/translator/trace/grpc_http_mapper.go new file mode 100644 index 00000000000..f997f3a3a5e --- /dev/null +++ b/internal/otel_collector/translator/trace/grpc_http_mapper.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetranslator + +import ( + "net/http" +) + +// https://github.com/googleapis/googleapis/blob/bee79fbe03254a35db125dc6d2f1e9b752b390fe/google/rpc/code.proto#L33-L186 +const ( + OCOK = 0 + OCCancelled = 1 + OCUnknown = 2 + OCInvalidArgument = 3 + OCDeadlineExceeded = 4 + OCNotFound = 5 + OCAlreadyExists = 6 + OCPermissionDenied = 7 + OCResourceExhausted = 8 + OCFailedPrecondition = 9 + OCAborted = 10 + OCOutOfRange = 11 + OCUnimplemented = 12 + OCInternal = 13 + OCUnavailable = 14 + OCDataLoss = 15 + OCUnauthenticated = 16 +) + +var httpToOCCodeMap = map[int32]int32{ + 401: OCUnauthenticated, + 403: OCPermissionDenied, + 404: OCNotFound, + 429: OCResourceExhausted, + 499: OCCancelled, + 501: OCUnimplemented, + 503: OCUnavailable, + 504: OCDeadlineExceeded, +} + +// OCStatusCodeFromHTTP takes an HTTP status code and return the appropriate OpenTelemetry status code +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/data-http.md +func OCStatusCodeFromHTTP(code int32) int32 { + if code >= 100 && code < 400 { + return OCOK + } + if c, ok := httpToOCCodeMap[code]; ok { + return c + } + if code >= 400 && code < 500 { + return OCInvalidArgument + } + if code >= 500 && code < 600 { + return OCInternal + } + return OCUnknown +} + +var ocToHTTPCodeMap = map[int32]int32{ + OCOK: http.StatusOK, + OCCancelled: 499, + OCUnknown: http.StatusInternalServerError, + OCInvalidArgument: http.StatusBadRequest, + OCDeadlineExceeded: http.StatusGatewayTimeout, + OCNotFound: http.StatusNotFound, + OCAlreadyExists: http.StatusConflict, + OCPermissionDenied: http.StatusForbidden, + OCResourceExhausted: http.StatusTooManyRequests, + OCFailedPrecondition: http.StatusPreconditionFailed, + OCAborted: http.StatusConflict, + OCOutOfRange: http.StatusRequestedRangeNotSatisfiable, + OCUnimplemented: http.StatusNotImplemented, + OCInternal: http.StatusInternalServerError, + OCUnavailable: http.StatusServiceUnavailable, + OCDataLoss: http.StatusUnprocessableEntity, + OCUnauthenticated: http.StatusUnauthorized, +} + +// HTTPStatusCodeFromOCStatus takes an OpenTelemetry status code and return the appropriate HTTP status code +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/data-http.md +func HTTPStatusCodeFromOCStatus(code int32) int32 { + if c, ok := ocToHTTPCodeMap[code]; ok { + return c + } + return http.StatusInternalServerError +} diff --git a/internal/otel_collector/translator/trace/grpc_http_mapper_test.go b/internal/otel_collector/translator/trace/grpc_http_mapper_test.go new file mode 100644 index 00000000000..c3db0539a12 --- /dev/null +++ b/internal/otel_collector/translator/trace/grpc_http_mapper_test.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetranslator + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHTTPStatusFromOTStatus(t *testing.T) { + for otelStatus := int32(OCOK); otelStatus <= OCUnauthenticated; otelStatus++ { + httpStatus := HTTPStatusCodeFromOCStatus(otelStatus) + assert.True(t, httpStatus != 0) + } +} + +func TestOTStatusFromHTTPStatus(t *testing.T) { + for httpStatus := int32(100); httpStatus <= 604; httpStatus++ { + otelStatus := OCStatusCodeFromHTTP(httpStatus) + assert.True(t, otelStatus >= OCOK && otelStatus <= OCUnauthenticated) + } +} diff --git a/internal/otel_collector/translator/trace/jaeger/constants.go b/internal/otel_collector/translator/trace/jaeger/constants.go new file mode 100644 index 00000000000..48925f21e41 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/constants.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "errors" +) + +var ( + errZeroTraceID = errors.New("OC span has an all zeros trace ID") + errZeroSpanID = errors.New("OC span has an all zeros span ID") +) diff --git a/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces.go b/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces.go new file mode 100644 index 00000000000..19e8e06b0d1 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces.go @@ -0,0 +1,377 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + + "github.com/jaegertracing/jaeger/model" + "github.com/jaegertracing/jaeger/thrift-gen/jaeger" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +var blankJaegerProtoSpan = new(jaeger.Span) + +// ProtoBatchesToInternalTraces converts multiple Jaeger proto batches to internal traces +func ProtoBatchesToInternalTraces(batches []*model.Batch) pdata.Traces { + traceData := pdata.NewTraces() + if len(batches) == 0 { + return traceData + } + + rss := traceData.ResourceSpans() + rss.Resize(len(batches)) + + i := 0 + for _, batch := range batches { + if batch.GetProcess() == nil && len(batch.GetSpans()) == 0 { + continue + } + + protoBatchToResourceSpans(*batch, rss.At(i)) + i++ + } + + // reduce traceData.ResourceSpans slice if some batched were skipped + if i < len(batches) { + rss.Resize(i) + } + + return traceData +} + +// ProtoBatchToInternalTraces converts Jeager proto batch to internal traces +func ProtoBatchToInternalTraces(batch model.Batch) pdata.Traces { + traceData := pdata.NewTraces() + + if batch.GetProcess() == nil && len(batch.GetSpans()) == 0 { + return traceData + } + + rss := traceData.ResourceSpans() + rss.Resize(1) + protoBatchToResourceSpans(batch, rss.At(0)) + + return traceData +} + +func protoBatchToResourceSpans(batch model.Batch, dest pdata.ResourceSpans) { + jSpans := batch.GetSpans() + + jProcessToInternalResource(batch.GetProcess(), dest.Resource()) + + if len(jSpans) == 0 { + return + } + + groupByLibrary := jSpansToInternal(jSpans) + ilss := dest.InstrumentationLibrarySpans() + for _, v := range groupByLibrary { + ilss.Append(v) + } +} + +func jProcessToInternalResource(process *model.Process, dest pdata.Resource) { + if process == nil || process.ServiceName == tracetranslator.ResourceNoServiceName { + return + } + + serviceName := process.ServiceName + tags := process.Tags + if serviceName == "" && tags == nil { + return + } + + attrs := dest.Attributes() + if serviceName != "" { + attrs.InitEmptyWithCapacity(len(tags) + 1) + attrs.UpsertString(conventions.AttributeServiceName, serviceName) + } else { + attrs.InitEmptyWithCapacity(len(tags)) + } + jTagsToInternalAttributes(tags, attrs) + + // Handle special keys translations. + translateHostnameAttr(attrs) + translateJaegerVersionAttr(attrs) +} + +// translateHostnameAttr translates "hostname" atttribute +func translateHostnameAttr(attrs pdata.AttributeMap) { + hostname, hostnameFound := attrs.Get("hostname") + _, convHostNameFound := attrs.Get(conventions.AttributeHostName) + if hostnameFound && !convHostNameFound { + attrs.Insert(conventions.AttributeHostName, hostname) + attrs.Delete("hostname") + } +} + +// translateHostnameAttr translates "jaeger.version" atttribute +func translateJaegerVersionAttr(attrs pdata.AttributeMap) { + jaegerVersion, jaegerVersionFound := attrs.Get("jaeger.version") + _, exporterVersionFound := attrs.Get(conventions.OCAttributeExporterVersion) + if jaegerVersionFound && !exporterVersionFound { + attrs.InsertString(conventions.OCAttributeExporterVersion, "Jaeger-"+jaegerVersion.StringVal()) + attrs.Delete("jaeger.version") + } +} + +func jSpansToInternal(spans []*model.Span) map[instrumentationLibrary]pdata.InstrumentationLibrarySpans { + spansByLibrary := make(map[instrumentationLibrary]pdata.InstrumentationLibrarySpans) + + for _, span := range spans { + if span == nil || reflect.DeepEqual(span, blankJaegerProtoSpan) { + continue + } + pSpan, library := jSpanToInternal(span) + ils, found := spansByLibrary[library] + if !found { + ils = pdata.NewInstrumentationLibrarySpans() + spansByLibrary[library] = ils + + if library.name != "" { + ils.InstrumentationLibrary().SetName(library.name) + ils.InstrumentationLibrary().SetVersion(library.version) + } + } + ils.Spans().Append(pSpan) + } + return spansByLibrary +} + +type instrumentationLibrary struct { + name, version string +} + +func jSpanToInternal(span *model.Span) (pdata.Span, instrumentationLibrary) { + dest := pdata.NewSpan() + dest.SetTraceID(tracetranslator.UInt64ToTraceID(span.TraceID.High, span.TraceID.Low)) + dest.SetSpanID(tracetranslator.UInt64ToSpanID(uint64(span.SpanID))) + dest.SetName(span.OperationName) + dest.SetStartTime(pdata.TimeToUnixNano(span.StartTime)) + dest.SetEndTime(pdata.TimeToUnixNano(span.StartTime.Add(span.Duration))) + + parentSpanID := span.ParentSpanID() + if parentSpanID != model.SpanID(0) { + dest.SetParentSpanID(tracetranslator.UInt64ToSpanID(uint64(parentSpanID))) + } + + attrs := dest.Attributes() + attrs.InitEmptyWithCapacity(len(span.Tags)) + jTagsToInternalAttributes(span.Tags, attrs) + setInternalSpanStatus(attrs, dest.Status()) + if spanKindAttr, ok := attrs.Get(tracetranslator.TagSpanKind); ok { + dest.SetKind(jSpanKindToInternal(spanKindAttr.StringVal())) + attrs.Delete(tracetranslator.TagSpanKind) + } + + il := instrumentationLibrary{} + if libraryName, ok := attrs.Get(tracetranslator.TagInstrumentationName); ok { + il.name = libraryName.StringVal() + attrs.Delete(tracetranslator.TagInstrumentationName) + if libraryVersion, ok := attrs.Get(tracetranslator.TagInstrumentationVersion); ok { + il.version = libraryVersion.StringVal() + attrs.Delete(tracetranslator.TagInstrumentationVersion) + } + } + + dest.SetTraceState(getTraceStateFromAttrs(attrs)) + + // drop the attributes slice if all of them were replaced during translation + if attrs.Len() == 0 { + attrs.InitFromMap(nil) + } + + jLogsToSpanEvents(span.Logs, dest.Events()) + jReferencesToSpanLinks(span.References, parentSpanID, dest.Links()) + + return dest, il +} + +func jTagsToInternalAttributes(tags []model.KeyValue, dest pdata.AttributeMap) { + for _, tag := range tags { + switch tag.GetVType() { + case model.ValueType_STRING: + dest.UpsertString(tag.Key, tag.GetVStr()) + case model.ValueType_BOOL: + dest.UpsertBool(tag.Key, tag.GetVBool()) + case model.ValueType_INT64: + dest.UpsertInt(tag.Key, tag.GetVInt64()) + case model.ValueType_FLOAT64: + dest.UpsertDouble(tag.Key, tag.GetVFloat64()) + case model.ValueType_BINARY: + dest.UpsertString(tag.Key, base64.StdEncoding.EncodeToString(tag.GetVBinary())) + default: + dest.UpsertString(tag.Key, fmt.Sprintf("", tag.GetVType())) + } + } +} + +func setInternalSpanStatus(attrs pdata.AttributeMap, dest pdata.SpanStatus) { + + statusCode := pdata.StatusCodeUnset + statusMessage := "" + statusExists := false + + if errorVal, ok := attrs.Get(tracetranslator.TagError); ok { + if errorVal.BoolVal() { + statusCode = pdata.StatusCodeError + attrs.Delete(tracetranslator.TagError) + statusExists = true + } + } + + if codeAttr, ok := attrs.Get(tracetranslator.TagStatusCode); ok { + statusExists = true + if code, err := getStatusCodeValFromAttr(codeAttr); err == nil { + statusCode = pdata.StatusCode(code) + attrs.Delete(tracetranslator.TagStatusCode) + } + if msgAttr, ok := attrs.Get(tracetranslator.TagStatusMsg); ok { + statusMessage = msgAttr.StringVal() + attrs.Delete(tracetranslator.TagStatusMsg) + } + } else if httpCodeAttr, ok := attrs.Get(tracetranslator.TagHTTPStatusCode); ok { + statusExists = true + if code, err := getStatusCodeFromHTTPStatusAttr(httpCodeAttr); err == nil { + + // Do not set status code in case it was set to Unset. + if code != pdata.StatusCodeUnset { + statusCode = code + } + + if msgAttr, ok := attrs.Get(tracetranslator.TagHTTPStatusMsg); ok { + statusMessage = msgAttr.StringVal() + } + } + } + + if statusExists { + dest.SetCode(statusCode) + dest.SetMessage(statusMessage) + } +} + +func getStatusCodeValFromAttr(attrVal pdata.AttributeValue) (int, error) { + var codeVal int64 + switch attrVal.Type() { + case pdata.AttributeValueINT: + codeVal = attrVal.IntVal() + case pdata.AttributeValueSTRING: + i, err := strconv.Atoi(attrVal.StringVal()) + if err != nil { + return 0, err + } + codeVal = int64(i) + default: + return 0, fmt.Errorf("invalid status code attribute type: %s", attrVal.Type().String()) + } + if codeVal > math.MaxInt32 || codeVal < math.MinInt32 { + return 0, fmt.Errorf("invalid status code value: %d", codeVal) + } + return int(codeVal), nil +} + +func getStatusCodeFromHTTPStatusAttr(attrVal pdata.AttributeValue) (pdata.StatusCode, error) { + statusCode, err := getStatusCodeValFromAttr(attrVal) + if err != nil { + return pdata.StatusCodeOk, err + } + + return tracetranslator.StatusCodeFromHTTP(statusCode), nil +} + +func jSpanKindToInternal(spanKind string) pdata.SpanKind { + switch spanKind { + case "client": + return pdata.SpanKindCLIENT + case "server": + return pdata.SpanKindSERVER + case "producer": + return pdata.SpanKindPRODUCER + case "consumer": + return pdata.SpanKindCONSUMER + case "internal": + return pdata.SpanKindINTERNAL + } + return pdata.SpanKindUNSPECIFIED +} + +func jLogsToSpanEvents(logs []model.Log, dest pdata.SpanEventSlice) { + if len(logs) == 0 { + return + } + + dest.Resize(len(logs)) + + for i, log := range logs { + event := dest.At(i) + + event.SetTimestamp(pdata.TimestampUnixNano(uint64(log.Timestamp.UnixNano()))) + if len(log.Fields) == 0 { + continue + } + + attrs := event.Attributes() + attrs.InitEmptyWithCapacity(len(log.Fields)) + jTagsToInternalAttributes(log.Fields, attrs) + if name, ok := attrs.Get(tracetranslator.TagMessage); ok { + event.SetName(name.StringVal()) + attrs.Delete(tracetranslator.TagMessage) + } + } +} + +// jReferencesToSpanLinks sets internal span links based on jaeger span references skipping excludeParentID +func jReferencesToSpanLinks(refs []model.SpanRef, excludeParentID model.SpanID, dest pdata.SpanLinkSlice) { + if len(refs) == 0 || len(refs) == 1 && refs[0].SpanID == excludeParentID && refs[0].RefType == model.ChildOf { + return + } + + dest.Resize(len(refs)) + i := 0 + for _, ref := range refs { + link := dest.At(i) + if ref.SpanID == excludeParentID && ref.RefType == model.ChildOf { + continue + } + + link.SetTraceID(tracetranslator.UInt64ToTraceID(ref.TraceID.High, ref.TraceID.Low)) + link.SetSpanID(pdata.NewSpanID(tracetranslator.UInt64ToByteSpanID(uint64(ref.SpanID)))) + i++ + } + + // Reduce slice size in case if excludeParentID was skipped + if i < len(refs) { + dest.Resize(i) + } +} + +func getTraceStateFromAttrs(attrs pdata.AttributeMap) pdata.TraceState { + traceState := pdata.TraceStateEmpty + // TODO Bring this inline with solution for jaegertracing/jaeger-client-java #702 once available + if attr, ok := attrs.Get(tracetranslator.TagW3CTraceState); ok { + traceState = pdata.TraceState(attr.StringVal()) + attrs.Delete(tracetranslator.TagW3CTraceState) + } + return traceState +} diff --git a/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces_test.go b/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces_test.go new file mode 100644 index 00000000000..947f8c25e6f --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces_test.go @@ -0,0 +1,865 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "encoding/binary" + "fmt" + "strconv" + "testing" + "time" + + "github.com/jaegertracing/jaeger/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +// Use timespamp with microsecond granularity to work well with jaeger thrift translation +var ( + testSpanStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321000, time.UTC) + testSpanStartTimestamp = pdata.TimestampUnixNano(testSpanStartTime.UnixNano()) + testSpanEventTime = time.Date(2020, 2, 11, 20, 26, 13, 123000, time.UTC) + testSpanEventTimestamp = pdata.TimestampUnixNano(testSpanEventTime.UnixNano()) + testSpanEndTime = time.Date(2020, 2, 11, 20, 26, 13, 789000, time.UTC) + testSpanEndTimestamp = pdata.TimestampUnixNano(testSpanEndTime.UnixNano()) +) + +func TestGetStatusCodeValFromAttr(t *testing.T) { + _, invalidNumErr := strconv.Atoi("inf") + + tests := []struct { + name string + attr pdata.AttributeValue + code int + err error + }{ + { + name: "ok-string", + attr: pdata.NewAttributeValueString("0"), + code: 0, + err: nil, + }, + + { + name: "ok-int", + attr: pdata.NewAttributeValueInt(1), + code: 1, + err: nil, + }, + + { + name: "wrong-type", + attr: pdata.NewAttributeValueBool(true), + code: 0, + err: fmt.Errorf("invalid status code attribute type: BOOL"), + }, + + { + name: "invalid-string", + attr: pdata.NewAttributeValueString("inf"), + code: 0, + err: invalidNumErr, + }, + + { + name: "invalid-int", + attr: pdata.NewAttributeValueInt(1844674407370955), + code: 0, + err: fmt.Errorf("invalid status code value: 1844674407370955"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + code, err := getStatusCodeValFromAttr(test.attr) + assert.EqualValues(t, test.err, err) + assert.Equal(t, test.code, code) + }) + } +} + +func TestGetStatusCodeFromHTTPStatusAttr(t *testing.T) { + tests := []struct { + name string + attr pdata.AttributeValue + code pdata.StatusCode + }{ + { + name: "string-unknown", + attr: pdata.NewAttributeValueString("10"), + code: pdata.StatusCodeError, + }, + + { + name: "string-ok", + attr: pdata.NewAttributeValueString("101"), + code: pdata.StatusCodeUnset, + }, + + { + name: "int-not-found", + attr: pdata.NewAttributeValueInt(404), + code: pdata.StatusCodeError, + }, + { + name: "int-invalid-arg", + attr: pdata.NewAttributeValueInt(408), + code: pdata.StatusCodeError, + }, + + { + name: "int-internal", + attr: pdata.NewAttributeValueInt(500), + code: pdata.StatusCodeError, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + code, err := getStatusCodeFromHTTPStatusAttr(test.attr) + assert.NoError(t, err) + assert.Equal(t, test.code, code) + }) + } +} + +func TestJTagsToInternalAttributes(t *testing.T) { + tags := []model.KeyValue{ + { + Key: "bool-val", + VType: model.ValueType_BOOL, + VBool: true, + }, + { + Key: "int-val", + VType: model.ValueType_INT64, + VInt64: 123, + }, + { + Key: "string-val", + VType: model.ValueType_STRING, + VStr: "abc", + }, + { + Key: "double-val", + VType: model.ValueType_FLOAT64, + VFloat64: 1.23, + }, + { + Key: "binary-val", + VType: model.ValueType_BINARY, + VBinary: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x7D, 0x98}, + }, + } + + expected := pdata.NewAttributeMap() + expected.InsertBool("bool-val", true) + expected.InsertInt("int-val", 123) + expected.InsertString("string-val", "abc") + expected.InsertDouble("double-val", 1.23) + expected.InsertString("binary-val", "AAAAAABkfZg=") + + got := pdata.NewAttributeMap() + jTagsToInternalAttributes(tags, got) + + require.EqualValues(t, expected, got) +} + +func TestProtoBatchToInternalTraces(t *testing.T) { + + tests := []struct { + name string + jb model.Batch + td pdata.Traces + }{ + { + name: "empty", + jb: model.Batch{}, + td: testdata.GenerateTraceDataEmpty(), + }, + + { + name: "no-spans", + jb: model.Batch{ + Process: generateProtoProcess(), + }, + td: generateTraceDataResourceOnly(), + }, + + { + name: "no-resource-attrs", + jb: model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + }, + td: generateTraceDataResourceOnlyWithNoAttrs(), + }, + + { + name: "one-span-no-resources", + jb: model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + Spans: []*model.Span{ + generateProtoSpanWithTraceState(), + }, + }, + td: generateTraceDataOneSpanNoResourceWithTraceState(), + }, + { + name: "two-spans-child-parent", + jb: model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + Spans: []*model.Span{ + generateProtoSpan(), + generateProtoChildSpan(), + }, + }, + td: generateTraceDataTwoSpansChildParent(), + }, + + { + name: "two-spans-with-follower", + jb: model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + Spans: []*model.Span{ + generateProtoSpan(), + generateProtoFollowerSpan(), + }, + }, + td: generateTraceDataTwoSpansWithFollower(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + td := ProtoBatchToInternalTraces(test.jb) + assert.EqualValues(t, test.td, td) + }) + } +} + +func TestProtoBatchToInternalTracesWithTwoLibraries(t *testing.T) { + jb := model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + Spans: []*model.Span{ + { + StartTime: testSpanStartTime, + Duration: testSpanEndTime.Sub(testSpanStartTime), + OperationName: "operation2", + Tags: []model.KeyValue{ + { + Key: tracetranslator.TagInstrumentationName, + VType: model.ValueType_STRING, + VStr: "library2", + }, { + Key: tracetranslator.TagInstrumentationVersion, + VType: model.ValueType_STRING, + VStr: "0.42.0", + }, + }, + }, + { + TraceID: model.NewTraceID(0, 0), + StartTime: testSpanStartTime, + Duration: testSpanEndTime.Sub(testSpanStartTime), + OperationName: "operation1", + Tags: []model.KeyValue{ + { + Key: tracetranslator.TagInstrumentationName, + VType: model.ValueType_STRING, + VStr: "library1", + }, { + Key: tracetranslator.TagInstrumentationVersion, + VType: model.ValueType_STRING, + VStr: "0.42.0", + }, + }, + }, + }, + } + expected := generateTraceDataTwoSpansFromTwoLibraries() + library1Span := expected.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) + library2Span := expected.ResourceSpans().At(0).InstrumentationLibrarySpans().At(1) + + actual := ProtoBatchToInternalTraces(jb) + + assert.Equal(t, actual.ResourceSpans().Len(), 1) + assert.Equal(t, actual.ResourceSpans().At(0).InstrumentationLibrarySpans().Len(), 2) + + ils0 := actual.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) + ils1 := actual.ResourceSpans().At(0).InstrumentationLibrarySpans().At(1) + if ils0.InstrumentationLibrary().Name() == "library1" { + assert.EqualValues(t, library1Span, ils0) + assert.EqualValues(t, library2Span, ils1) + } else { + assert.EqualValues(t, library1Span, ils1) + assert.EqualValues(t, library2Span, ils0) + } +} + +func TestSetInternalSpanStatus(t *testing.T) { + + emptyStatus := pdata.NewSpanStatus() + + okStatus := pdata.NewSpanStatus() + okStatus.SetCode(pdata.StatusCodeOk) + + errorStatus := pdata.NewSpanStatus() + errorStatus.SetCode(pdata.StatusCodeError) + + errorStatusWithMessage := pdata.NewSpanStatus() + errorStatusWithMessage.SetCode(pdata.StatusCodeError) + errorStatusWithMessage.SetMessage("Error: Invalid argument") + + errorStatusWith404Message := pdata.NewSpanStatus() + errorStatusWith404Message.SetCode(pdata.StatusCodeError) + errorStatusWith404Message.SetMessage("HTTP 404: Not Found") + + tests := []struct { + name string + attrs pdata.AttributeMap + status pdata.SpanStatus + attrsModifiedLen int // Length of attributes map after dropping converted fields + }{ + { + name: "No tags set -> OK status", + attrs: pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{}), + status: emptyStatus, + attrsModifiedLen: 0, + }, + { + name: "error tag set -> Error status", + attrs: pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + tracetranslator.TagError: pdata.NewAttributeValueBool(true), + }), + status: errorStatus, + attrsModifiedLen: 0, + }, + { + name: "status.code is set as int", + attrs: pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + tracetranslator.TagStatusCode: pdata.NewAttributeValueInt(1), + }), + status: okStatus, + attrsModifiedLen: 0, + }, + { + name: "status.code, status.message and error tags are set", + attrs: pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + tracetranslator.TagError: pdata.NewAttributeValueBool(true), + tracetranslator.TagStatusCode: pdata.NewAttributeValueInt(int64(pdata.StatusCodeError)), + tracetranslator.TagStatusMsg: pdata.NewAttributeValueString("Error: Invalid argument"), + }), + status: errorStatusWithMessage, + attrsModifiedLen: 0, + }, + { + name: "http.status_code tag is set as string", + attrs: pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + tracetranslator.TagHTTPStatusCode: pdata.NewAttributeValueString("404"), + }), + status: errorStatus, + attrsModifiedLen: 1, + }, + { + name: "http.status_code, http.status_message and error tags are set", + attrs: pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + tracetranslator.TagError: pdata.NewAttributeValueBool(true), + tracetranslator.TagHTTPStatusCode: pdata.NewAttributeValueInt(404), + tracetranslator.TagHTTPStatusMsg: pdata.NewAttributeValueString("HTTP 404: Not Found"), + }), + status: errorStatusWith404Message, + attrsModifiedLen: 2, + }, + { + name: "status.code has precedence over http.status_code.", + attrs: pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + tracetranslator.TagStatusCode: pdata.NewAttributeValueInt(1), + tracetranslator.TagHTTPStatusCode: pdata.NewAttributeValueInt(500), + tracetranslator.TagHTTPStatusMsg: pdata.NewAttributeValueString("Server Error"), + }), + status: okStatus, + attrsModifiedLen: 2, + }, + { + name: "Ignore http.status_code == 200 if error set to true.", + attrs: pdata.NewAttributeMap().InitFromMap(map[string]pdata.AttributeValue{ + tracetranslator.TagError: pdata.NewAttributeValueBool(true), + tracetranslator.TagHTTPStatusCode: pdata.NewAttributeValueInt(200), + }), + status: errorStatus, + attrsModifiedLen: 1, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + status := pdata.NewSpanStatus() + setInternalSpanStatus(test.attrs, status) + assert.EqualValues(t, test.status, status) + assert.Equal(t, test.attrsModifiedLen, test.attrs.Len()) + }) + } +} + +func TestProtoBatchesToInternalTraces(t *testing.T) { + batches := []*model.Batch{ + { + Process: generateProtoProcess(), + Spans: []*model.Span{ + generateProtoSpan(), + }, + }, + { + Spans: []*model.Span{ + generateProtoSpan(), + generateProtoChildSpan(), + }, + }, + { + // should be skipped + Spans: []*model.Span{}, + }, + } + + expected := generateTraceDataOneSpanNoResource() + resource := generateTraceDataResourceOnly().ResourceSpans().At(0).Resource() + resource.CopyTo(expected.ResourceSpans().At(0).Resource()) + expected.ResourceSpans().Resize(2) + twoSpans := generateTraceDataTwoSpansChildParent().ResourceSpans().At(0) + twoSpans.CopyTo(expected.ResourceSpans().At(1)) + + got := ProtoBatchesToInternalTraces(batches) + assert.EqualValues(t, expected, got) +} + +func TestJSpanKindToInternal(t *testing.T) { + tests := []struct { + jSpanKind string + otlpSpanKind pdata.SpanKind + }{ + { + jSpanKind: "client", + otlpSpanKind: pdata.SpanKindCLIENT, + }, + { + jSpanKind: "server", + otlpSpanKind: pdata.SpanKindSERVER, + }, + { + jSpanKind: "producer", + otlpSpanKind: pdata.SpanKindPRODUCER, + }, + { + jSpanKind: "consumer", + otlpSpanKind: pdata.SpanKindCONSUMER, + }, + { + jSpanKind: "internal", + otlpSpanKind: pdata.SpanKindINTERNAL, + }, + { + jSpanKind: "all-others", + otlpSpanKind: pdata.SpanKindUNSPECIFIED, + }, + } + + for _, test := range tests { + t.Run(test.jSpanKind, func(t *testing.T) { + assert.Equal(t, test.otlpSpanKind, jSpanKindToInternal(test.jSpanKind)) + }) + } +} + +func generateTraceDataResourceOnly() pdata.Traces { + td := testdata.GenerateTraceDataOneEmptyResourceSpans() + rs := td.ResourceSpans().At(0).Resource() + rs.Attributes().InsertString(conventions.AttributeServiceName, "service-1") + rs.Attributes().InsertInt("int-attr-1", 123) + return td +} + +func generateTraceDataResourceOnlyWithNoAttrs() pdata.Traces { + td := testdata.GenerateTraceDataOneEmptyResourceSpans() + td.ResourceSpans().At(0).Resource().Attributes().InitFromMap(map[string]pdata.AttributeValue{}) + return td +} + +func generateProtoProcess() *model.Process { + return &model.Process{ + ServiceName: "service-1", + Tags: []model.KeyValue{ + { + Key: "int-attr-1", + VType: model.ValueType_INT64, + VInt64: 123, + }, + }, + } +} + +func generateTraceDataOneSpanNoResource() pdata.Traces { + td := testdata.GenerateTraceDataOneSpanNoResource() + span := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) + span.SetSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) + span.SetTraceID(pdata.NewTraceID( + [16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) + span.SetDroppedAttributesCount(0) + span.SetDroppedEventsCount(0) + span.SetStartTime(testSpanStartTimestamp) + span.SetEndTime(testSpanEndTimestamp) + span.SetKind(pdata.SpanKindCLIENT) + span.Events().At(0).SetTimestamp(testSpanEventTimestamp) + span.Events().At(0).SetDroppedAttributesCount(0) + span.Events().At(0).SetName("event-with-attr") + span.Events().At(1).SetTimestamp(testSpanEventTimestamp) + span.Events().At(1).SetDroppedAttributesCount(0) + span.Events().At(1).SetName("") + span.Events().At(1).Attributes().InsertInt("attr-int", 123) + return td +} + +func generateTraceDataWithLibraryInfo() pdata.Traces { + td := generateTraceDataOneSpanNoResource() + rs0 := td.ResourceSpans().At(0) + rs0ils0 := rs0.InstrumentationLibrarySpans().At(0) + rs0ils0.InstrumentationLibrary().SetName("io.opentelemetry.test") + rs0ils0.InstrumentationLibrary().SetVersion("0.42.0") + return td +} + +func generateTraceDataOneSpanNoResourceWithTraceState() pdata.Traces { + td := generateTraceDataOneSpanNoResource() + span := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) + span.SetTraceState("lasterror=f39cd56cc44274fd5abd07ef1164246d10ce2955") + return td +} + +func generateProtoSpan() *model.Span { + return &model.Span{ + TraceID: model.NewTraceID( + binary.BigEndian.Uint64([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8}), + binary.BigEndian.Uint64([]byte{0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80}), + ), + SpanID: model.NewSpanID(binary.BigEndian.Uint64([]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + OperationName: "operationA", + StartTime: testSpanStartTime, + Duration: testSpanEndTime.Sub(testSpanStartTime), + Logs: []model.Log{ + { + Timestamp: testSpanEventTime, + Fields: []model.KeyValue{ + { + Key: tracetranslator.TagMessage, + VType: model.ValueType_STRING, + VStr: "event-with-attr", + }, + { + Key: "span-event-attr", + VType: model.ValueType_STRING, + VStr: "span-event-attr-val", + }, + }, + }, + { + Timestamp: testSpanEventTime, + Fields: []model.KeyValue{ + { + Key: "attr-int", + VType: model.ValueType_INT64, + VInt64: 123, + }, + }, + }, + }, + Tags: []model.KeyValue{ + { + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindClient), + }, + { + Key: tracetranslator.TagStatusCode, + VType: model.ValueType_INT64, + VInt64: int64(pdata.StatusCodeError), + }, + { + Key: tracetranslator.TagError, + VBool: true, + VType: model.ValueType_BOOL, + }, + { + Key: tracetranslator.TagStatusMsg, + VType: model.ValueType_STRING, + VStr: "status-cancelled", + }, + }, + } +} + +func generateProtoSpanWithLibraryInfo(libraryName string) *model.Span { + span := generateProtoSpan() + span.Tags = append([]model.KeyValue{ + { + Key: tracetranslator.TagInstrumentationName, + VType: model.ValueType_STRING, + VStr: libraryName, + }, { + Key: tracetranslator.TagInstrumentationVersion, + VType: model.ValueType_STRING, + VStr: "0.42.0", + }, + }, span.Tags...) + + return span +} +func generateProtoSpanWithTraceState() *model.Span { + return &model.Span{ + TraceID: model.NewTraceID( + binary.BigEndian.Uint64([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8}), + binary.BigEndian.Uint64([]byte{0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80}), + ), + SpanID: model.NewSpanID(binary.BigEndian.Uint64([]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + OperationName: "operationA", + StartTime: testSpanStartTime, + Duration: testSpanEndTime.Sub(testSpanStartTime), + Logs: []model.Log{ + { + Timestamp: testSpanEventTime, + Fields: []model.KeyValue{ + { + Key: tracetranslator.TagMessage, + VType: model.ValueType_STRING, + VStr: "event-with-attr", + }, + { + Key: "span-event-attr", + VType: model.ValueType_STRING, + VStr: "span-event-attr-val", + }, + }, + }, + { + Timestamp: testSpanEventTime, + Fields: []model.KeyValue{ + { + Key: "attr-int", + VType: model.ValueType_INT64, + VInt64: 123, + }, + }, + }, + }, + Tags: []model.KeyValue{ + { + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindClient), + }, + { + Key: tracetranslator.TagStatusCode, + VType: model.ValueType_INT64, + VInt64: int64(pdata.StatusCodeError), + }, + { + Key: tracetranslator.TagError, + VBool: true, + VType: model.ValueType_BOOL, + }, + { + Key: tracetranslator.TagStatusMsg, + VType: model.ValueType_STRING, + VStr: "status-cancelled", + }, + { + Key: tracetranslator.TagW3CTraceState, + VType: model.ValueType_STRING, + VStr: "lasterror=f39cd56cc44274fd5abd07ef1164246d10ce2955", + }, + }, + } +} + +func generateTraceDataTwoSpansChildParent() pdata.Traces { + td := generateTraceDataOneSpanNoResource() + spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + spans.Resize(2) + + span := spans.At(1) + span.SetName("operationB") + span.SetSpanID(pdata.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})) + span.SetParentSpanID(spans.At(0).SpanID()) + span.SetKind(pdata.SpanKindSERVER) + span.SetTraceID(spans.At(0).TraceID()) + span.SetStartTime(spans.At(0).StartTime()) + span.SetEndTime(spans.At(0).EndTime()) + span.Status().SetCode(pdata.StatusCodeError) + span.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + tracetranslator.TagHTTPStatusCode: pdata.NewAttributeValueInt(404), + }) + + return td +} + +func generateProtoChildSpan() *model.Span { + traceID := model.NewTraceID( + binary.BigEndian.Uint64([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8}), + binary.BigEndian.Uint64([]byte{0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80}), + ) + return &model.Span{ + TraceID: traceID, + SpanID: model.NewSpanID(binary.BigEndian.Uint64([]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})), + OperationName: "operationB", + StartTime: testSpanStartTime, + Duration: testSpanEndTime.Sub(testSpanStartTime), + Tags: []model.KeyValue{ + { + Key: tracetranslator.TagHTTPStatusCode, + VType: model.ValueType_INT64, + VInt64: 404, + }, + { + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindServer), + }, + }, + References: []model.SpanRef{ + { + TraceID: traceID, + SpanID: model.NewSpanID(binary.BigEndian.Uint64([]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + RefType: model.SpanRefType_CHILD_OF, + }, + }, + } +} + +func generateTraceDataTwoSpansWithFollower() pdata.Traces { + td := generateTraceDataOneSpanNoResource() + spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() + spans.Resize(2) + + span := spans.At(1) + span.SetName("operationC") + span.SetSpanID(pdata.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})) + span.SetTraceID(spans.At(0).TraceID()) + span.SetStartTime(spans.At(0).EndTime()) + span.SetEndTime(spans.At(0).EndTime() + 1000000) + span.SetKind(pdata.SpanKindCONSUMER) + span.Status().SetCode(pdata.StatusCodeOk) + span.Status().SetMessage("status-ok") + span.Links().Resize(1) + span.Links().At(0).SetTraceID(span.TraceID()) + span.Links().At(0).SetSpanID(spans.At(0).SpanID()) + return td +} + +func generateProtoFollowerSpan() *model.Span { + traceID := model.NewTraceID( + binary.BigEndian.Uint64([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8}), + binary.BigEndian.Uint64([]byte{0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80}), + ) + return &model.Span{ + TraceID: traceID, + SpanID: model.NewSpanID(binary.BigEndian.Uint64([]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})), + OperationName: "operationC", + StartTime: testSpanEndTime, + Duration: time.Millisecond, + Tags: []model.KeyValue{ + { + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindConsumer), + }, + { + Key: tracetranslator.TagStatusCode, + VType: model.ValueType_INT64, + VInt64: int64(pdata.StatusCodeOk), + }, + { + Key: tracetranslator.TagStatusMsg, + VType: model.ValueType_STRING, + VStr: "status-ok", + }, + }, + References: []model.SpanRef{ + { + TraceID: traceID, + SpanID: model.NewSpanID(binary.BigEndian.Uint64([]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + RefType: model.SpanRefType_FOLLOWS_FROM, + }, + }, + } +} + +func BenchmarkProtoBatchToInternalTraces(b *testing.B) { + jb := model.Batch{ + Process: generateProtoProcess(), + Spans: []*model.Span{ + generateProtoSpan(), + generateProtoChildSpan(), + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + ProtoBatchToInternalTraces(jb) + } +} + +func generateTraceDataTwoSpansFromTwoLibraries() pdata.Traces { + td := testdata.GenerateTraceDataOneEmptyResourceSpans() + + rs0 := td.ResourceSpans().At(0) + rs0.InstrumentationLibrarySpans().Resize(2) + + rs0ils0 := rs0.InstrumentationLibrarySpans().At(0) + rs0ils0.InstrumentationLibrary().SetName("library1") + rs0ils0.InstrumentationLibrary().SetVersion("0.42.0") + rs0ils0.Spans().Resize(1) + span1 := rs0ils0.Spans().At(0) + span1.SetTraceID(pdata.NewTraceID(tracetranslator.UInt64ToByteTraceID(0, 0))) + span1.SetSpanID(pdata.NewSpanID(tracetranslator.UInt64ToByteSpanID(0))) + span1.SetName("operation1") + span1.SetStartTime(testSpanStartTimestamp) + span1.SetEndTime(testSpanEndTimestamp) + + rs0ils1 := rs0.InstrumentationLibrarySpans().At(1) + rs0ils1.InstrumentationLibrary().SetName("library2") + rs0ils1.InstrumentationLibrary().SetVersion("0.42.0") + rs0ils1.Spans().Resize(1) + span2 := rs0ils1.Spans().At(0) + span2.SetTraceID(span1.TraceID()) + span2.SetSpanID(span1.SpanID()) + span2.SetName("operation2") + span2.SetStartTime(testSpanStartTimestamp) + span2.SetEndTime(testSpanEndTimestamp) + + return td +} diff --git a/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces.go b/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces.go new file mode 100644 index 00000000000..c6aa7d21977 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces.go @@ -0,0 +1,201 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "encoding/base64" + "fmt" + "reflect" + + "github.com/jaegertracing/jaeger/thrift-gen/jaeger" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func ThriftBatchToInternalTraces(batch *jaeger.Batch) pdata.Traces { + traceData := pdata.NewTraces() + jProcess := batch.GetProcess() + jSpans := batch.GetSpans() + + if jProcess == nil && len(jSpans) == 0 { + return traceData + } + + rss := traceData.ResourceSpans() + rss.Resize(1) + rs := rss.At(0) + jThriftProcessToInternalResource(jProcess, rs.Resource()) + + if len(jSpans) == 0 { + return traceData + } + + ilss := rs.InstrumentationLibrarySpans() + ilss.Resize(1) + jThriftSpansToInternal(jSpans, ilss.At(0).Spans()) + + return traceData +} + +func jThriftProcessToInternalResource(process *jaeger.Process, dest pdata.Resource) { + if process == nil { + return + } + + serviceName := process.GetServiceName() + tags := process.GetTags() + if serviceName == "" && tags == nil { + return + } + + attrs := dest.Attributes() + if serviceName != "" { + attrs.InitEmptyWithCapacity(len(tags) + 1) + attrs.UpsertString(conventions.AttributeServiceName, serviceName) + } else { + attrs.InitEmptyWithCapacity(len(tags)) + } + jThriftTagsToInternalAttributes(tags, attrs) + + // Handle special keys translations. + translateHostnameAttr(attrs) + translateJaegerVersionAttr(attrs) +} + +func jThriftSpansToInternal(spans []*jaeger.Span, dest pdata.SpanSlice) { + if len(spans) == 0 { + return + } + + dest.Resize(len(spans)) + i := 0 + for _, span := range spans { + if span == nil || reflect.DeepEqual(span, blankJaegerProtoSpan) { + continue + } + jThriftSpanToInternal(span, dest.At(i)) + i++ + } + + if i < len(spans) { + dest.Resize(i) + } +} + +func jThriftSpanToInternal(span *jaeger.Span, dest pdata.Span) { + dest.SetTraceID(tracetranslator.Int64ToTraceID(span.TraceIdHigh, span.TraceIdLow)) + dest.SetSpanID(tracetranslator.Int64ToSpanID(span.SpanId)) + dest.SetName(span.OperationName) + dest.SetStartTime(microsecondsToUnixNano(span.StartTime)) + dest.SetEndTime(microsecondsToUnixNano(span.StartTime + span.Duration)) + + parentSpanID := span.ParentSpanId + if parentSpanID != 0 { + dest.SetParentSpanID(tracetranslator.Int64ToSpanID(parentSpanID)) + } + + attrs := dest.Attributes() + attrs.InitEmptyWithCapacity(len(span.Tags)) + jThriftTagsToInternalAttributes(span.Tags, attrs) + setInternalSpanStatus(attrs, dest.Status()) + if spanKindAttr, ok := attrs.Get(tracetranslator.TagSpanKind); ok { + dest.SetKind(jSpanKindToInternal(spanKindAttr.StringVal())) + attrs.Delete(tracetranslator.TagSpanKind) + } + + // drop the attributes slice if all of them were replaced during translation + if attrs.Len() == 0 { + attrs.InitFromMap(nil) + } + + jThriftLogsToSpanEvents(span.Logs, dest.Events()) + jThriftReferencesToSpanLinks(span.References, parentSpanID, dest.Links()) +} + +// jThriftTagsToInternalAttributes sets internal span links based on jaeger span references skipping excludeParentID +func jThriftTagsToInternalAttributes(tags []*jaeger.Tag, dest pdata.AttributeMap) { + for _, tag := range tags { + switch tag.GetVType() { + case jaeger.TagType_STRING: + dest.UpsertString(tag.Key, tag.GetVStr()) + case jaeger.TagType_BOOL: + dest.UpsertBool(tag.Key, tag.GetVBool()) + case jaeger.TagType_LONG: + dest.UpsertInt(tag.Key, tag.GetVLong()) + case jaeger.TagType_DOUBLE: + dest.UpsertDouble(tag.Key, tag.GetVDouble()) + case jaeger.TagType_BINARY: + dest.UpsertString(tag.Key, base64.StdEncoding.EncodeToString(tag.GetVBinary())) + default: + dest.UpsertString(tag.Key, fmt.Sprintf("", tag.GetVType())) + } + } +} + +func jThriftLogsToSpanEvents(logs []*jaeger.Log, dest pdata.SpanEventSlice) { + if len(logs) == 0 { + return + } + + dest.Resize(len(logs)) + + for i, log := range logs { + event := dest.At(i) + + event.SetTimestamp(microsecondsToUnixNano(log.Timestamp)) + if len(log.Fields) == 0 { + continue + } + + attrs := event.Attributes() + attrs.InitEmptyWithCapacity(len(log.Fields)) + jThriftTagsToInternalAttributes(log.Fields, attrs) + if name, ok := attrs.Get(tracetranslator.TagMessage); ok { + event.SetName(name.StringVal()) + attrs.Delete(tracetranslator.TagMessage) + } + } +} + +func jThriftReferencesToSpanLinks(refs []*jaeger.SpanRef, excludeParentID int64, dest pdata.SpanLinkSlice) { + if len(refs) == 0 || len(refs) == 1 && refs[0].SpanId == excludeParentID && refs[0].RefType == jaeger.SpanRefType_CHILD_OF { + return + } + + dest.Resize(len(refs)) + i := 0 + for _, ref := range refs { + link := dest.At(i) + if ref.SpanId == excludeParentID && ref.RefType == jaeger.SpanRefType_CHILD_OF { + continue + } + + link.SetTraceID(tracetranslator.Int64ToTraceID(ref.TraceIdHigh, ref.TraceIdLow)) + link.SetSpanID(pdata.NewSpanID(tracetranslator.Int64ToByteSpanID(ref.SpanId))) + i++ + } + + // Reduce slice size in case if excludeParentID was skipped + if i < len(refs) { + dest.Resize(i) + } +} + +// microsecondsToUnixNano converts epoch microseconds to pdata.TimestampUnixNano +func microsecondsToUnixNano(ms int64) pdata.TimestampUnixNano { + return pdata.TimestampUnixNano(uint64(ms) * 1000) +} diff --git a/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces_test.go b/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces_test.go new file mode 100644 index 00000000000..9f668ccfdcd --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces_test.go @@ -0,0 +1,301 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "encoding/binary" + "testing" + + "github.com/jaegertracing/jaeger/thrift-gen/jaeger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func TestJThriftTagsToInternalAttributes(t *testing.T) { + var intVal int64 = 123 + boolVal := true + stringVal := "abc" + doubleVal := 1.23 + tags := []*jaeger.Tag{ + { + Key: "bool-val", + VType: jaeger.TagType_BOOL, + VBool: &boolVal, + }, + { + Key: "int-val", + VType: jaeger.TagType_LONG, + VLong: &intVal, + }, + { + Key: "string-val", + VType: jaeger.TagType_STRING, + VStr: &stringVal, + }, + { + Key: "double-val", + VType: jaeger.TagType_DOUBLE, + VDouble: &doubleVal, + }, + { + Key: "binary-val", + VType: jaeger.TagType_BINARY, + VBinary: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x7D, 0x98}, + }, + } + + expected := pdata.NewAttributeMap() + expected.InsertBool("bool-val", true) + expected.InsertInt("int-val", 123) + expected.InsertString("string-val", "abc") + expected.InsertDouble("double-val", 1.23) + expected.InsertString("binary-val", "AAAAAABkfZg=") + + got := pdata.NewAttributeMap() + jThriftTagsToInternalAttributes(tags, got) + + require.EqualValues(t, expected, got) +} + +func TestThriftBatchToInternalTraces(t *testing.T) { + + tests := []struct { + name string + jb *jaeger.Batch + td pdata.Traces + }{ + { + name: "empty", + jb: &jaeger.Batch{}, + td: testdata.GenerateTraceDataEmpty(), + }, + + { + name: "no-spans", + jb: &jaeger.Batch{ + Process: generateThriftProcess(), + }, + td: testdata.GenerateTraceDataNoLibraries(), + }, + + { + name: "one-span-no-resources", + jb: &jaeger.Batch{ + Spans: []*jaeger.Span{ + generateThriftSpan(), + }, + }, + td: generateTraceDataOneSpanNoResource(), + }, + { + name: "two-spans-child-parent", + jb: &jaeger.Batch{ + Spans: []*jaeger.Span{ + generateThriftSpan(), + generateThriftChildSpan(), + }, + }, + td: generateTraceDataTwoSpansChildParent(), + }, + + { + name: "two-spans-with-follower", + jb: &jaeger.Batch{ + Spans: []*jaeger.Span{ + generateThriftSpan(), + generateThriftFollowerSpan(), + }, + }, + td: generateTraceDataTwoSpansWithFollower(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + td := ThriftBatchToInternalTraces(test.jb) + assert.EqualValues(t, test.td, td) + }) + } +} + +func generateThriftProcess() *jaeger.Process { + attrVal := "resource-attr-val-1" + return &jaeger.Process{ + Tags: []*jaeger.Tag{ + { + Key: "resource-attr", + VType: jaeger.TagType_STRING, + VStr: &attrVal, + }, + }, + } +} + +func generateThriftSpan() *jaeger.Span { + spanStartTs := unixNanoToMicroseconds(testSpanStartTimestamp) + spanEndTs := unixNanoToMicroseconds(testSpanEndTimestamp) + eventTs := unixNanoToMicroseconds(testSpanEventTimestamp) + intAttrVal := int64(123) + eventName := "event-with-attr" + eventStrAttrVal := "span-event-attr-val" + statusCode := int64(pdata.StatusCodeError) + statusMsg := "status-cancelled" + kind := string(tracetranslator.OpenTracingSpanKindClient) + + return &jaeger.Span{ + TraceIdHigh: int64(binary.BigEndian.Uint64([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8})), + TraceIdLow: int64(binary.BigEndian.Uint64([]byte{0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), + SpanId: int64(binary.BigEndian.Uint64([]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + OperationName: "operationA", + StartTime: spanStartTs, + Duration: spanEndTs - spanStartTs, + Logs: []*jaeger.Log{ + { + Timestamp: eventTs, + Fields: []*jaeger.Tag{ + { + Key: tracetranslator.TagMessage, + VType: jaeger.TagType_STRING, + VStr: &eventName, + }, + { + Key: "span-event-attr", + VType: jaeger.TagType_STRING, + VStr: &eventStrAttrVal, + }, + }, + }, + { + Timestamp: eventTs, + Fields: []*jaeger.Tag{ + { + Key: "attr-int", + VType: jaeger.TagType_LONG, + VLong: &intAttrVal, + }, + }, + }, + }, + Tags: []*jaeger.Tag{ + { + Key: tracetranslator.TagStatusCode, + VType: jaeger.TagType_LONG, + VLong: &statusCode, + }, + { + Key: tracetranslator.TagStatusMsg, + VType: jaeger.TagType_STRING, + VStr: &statusMsg, + }, + { + Key: tracetranslator.TagSpanKind, + VType: jaeger.TagType_STRING, + VStr: &kind, + }, + }, + } +} + +func generateThriftChildSpan() *jaeger.Span { + spanStartTs := unixNanoToMicroseconds(testSpanStartTimestamp) + spanEndTs := unixNanoToMicroseconds(testSpanEndTimestamp) + notFoundAttrVal := int64(404) + kind := string(tracetranslator.OpenTracingSpanKindServer) + + return &jaeger.Span{ + TraceIdHigh: int64(binary.BigEndian.Uint64([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8})), + TraceIdLow: int64(binary.BigEndian.Uint64([]byte{0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), + SpanId: int64(binary.BigEndian.Uint64([]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})), + ParentSpanId: int64(binary.BigEndian.Uint64([]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + OperationName: "operationB", + StartTime: spanStartTs, + Duration: spanEndTs - spanStartTs, + Tags: []*jaeger.Tag{ + { + Key: tracetranslator.TagHTTPStatusCode, + VType: jaeger.TagType_LONG, + VLong: ¬FoundAttrVal, + }, + { + Key: tracetranslator.TagSpanKind, + VType: jaeger.TagType_STRING, + VStr: &kind, + }, + }, + } +} + +func generateThriftFollowerSpan() *jaeger.Span { + statusCode := int64(pdata.StatusCodeOk) + statusMsg := "status-ok" + kind := string(tracetranslator.OpenTracingSpanKindConsumer) + + return &jaeger.Span{ + TraceIdHigh: int64(binary.BigEndian.Uint64([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8})), + TraceIdLow: int64(binary.BigEndian.Uint64([]byte{0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), + SpanId: int64(binary.BigEndian.Uint64([]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})), + OperationName: "operationC", + StartTime: unixNanoToMicroseconds(testSpanEndTimestamp), + Duration: 1000, + Tags: []*jaeger.Tag{ + { + Key: tracetranslator.TagStatusCode, + VType: jaeger.TagType_LONG, + VLong: &statusCode, + }, + { + Key: tracetranslator.TagStatusMsg, + VType: jaeger.TagType_STRING, + VStr: &statusMsg, + }, + { + Key: tracetranslator.TagSpanKind, + VType: jaeger.TagType_STRING, + VStr: &kind, + }, + }, + References: []*jaeger.SpanRef{ + { + TraceIdHigh: int64(binary.BigEndian.Uint64([]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8})), + TraceIdLow: int64(binary.BigEndian.Uint64([]byte{0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), + SpanId: int64(binary.BigEndian.Uint64([]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + RefType: jaeger.SpanRefType_FOLLOWS_FROM, + }, + }, + } +} + +func unixNanoToMicroseconds(ns pdata.TimestampUnixNano) int64 { + return int64(ns / 1000) +} + +func BenchmarkThriftBatchToInternalTraces(b *testing.B) { + jb := &jaeger.Batch{ + Process: generateThriftProcess(), + Spans: []*jaeger.Span{ + generateThriftSpan(), + generateThriftChildSpan(), + }, + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + ThriftBatchToInternalTraces(jb) + } +} diff --git a/internal/otel_collector/translator/trace/jaeger/testdata/jaegerproto_batch_01.json b/internal/otel_collector/translator/trace/jaeger/testdata/jaegerproto_batch_01.json new file mode 100644 index 00000000000..ed7318b6f70 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/testdata/jaegerproto_batch_01.json @@ -0,0 +1,202 @@ +{ + "process": { + "service_name": "api", + "tags": [ + { + "key": "hostname", + "v_type": 0, + "v_str": "api246-sjc1" + }, + { + "key": "pid", + "v_type": 2, + "v_int64": 13 + }, + { + "key": "start.time", + "v_type": 0, + "v_str": "2017-01-26T21:46:30.639875Z" + }, + { + "key": "ip", + "v_type": 0, + "v_str": "10.53.69.61" + }, + { + "key": "opencensus.exporterversion", + "v_type": 0, + "v_str": "someVersion" + }, + { + "key": "a.bool", + "v_type": 0, + "v_str": "true" + }, + { + "key": "a.double", + "v_type": 0, + "v_str": "1234.56789" + }, + { + "key": "a.long", + "v_type": 0, + "v_str": "123456789" + }, + { + "key": "a.binary", + "v_type": 0, + "v_str": "AQIDBAMCAQ==" + } + ] + }, + "spans": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=", + "parent_span_id": 6866147, + "operation_name": "get", + "start_time": "2017-01-26T21:46:31.639875Z", + "duration": 22938000, + "tags": [ + { + "key": "http.url", + "v_type": 0, + "v_str": "http://localhost:15598/client_transactions" + }, + { + "key": "span.kind", + "v_type": 0, + "v_str": "server" + }, + { + "key": "peer.port", + "v_type": 2, + "v_int64": 53931 + }, + { + "key": "someBool", + "v_type": 1, + "v_bool": true + }, + { + "key": "someDouble", + "v_type": 3, + "v_float64": 129.8 + }, + { + "key": "peer.service", + "v_type": 0, + "v_str": "rtapi" + }, + { + "key": "peer.ipv4", + "v_type": 2, + "v_int64": 3224716605 + } + ], + "logs": [ + { + "timestamp": "2017-01-26T21:46:31.639874Z", + "fields": [ + { + "key": "message.id", + "v_int64": 0, + "v_type": 2 + }, + { + "key": "message.type", + "v_str": "SENT", + "v_type": 0 + }, + { + "key": "message.compressed_size", + "v_int64": 512, + "v_type": 2 + }, + { + "key": "message.uncompressed_size", + "v_int64": 1024, + "v_type": 2 + } + ] + }, + { + "timestamp": "2017-01-26T21:46:31.639875Z", + "fields": [ + { + "key": "key1", + "v_type": 0, + "v_str": "value1" + } + ] + }, + { + "timestamp": "2017-01-26T21:46:31.639875Z", + "fields": [ + { + "key": "event", + "v_type": 0, + "v_str": "nothing" + }, + { + "key": "description", + "v_type": 0, + "v_str": "annotation description" + } + ] + } + ], + "process": { + "service_name": "api", + "tags": [ + + { + "key": "hostname", + "v_type": 0, + "v_str": "api246-sjc1" + }, + { + "key": "pid", + "v_type": 2, + "v_int64": 13 + }, + { + "key": "start.time", + "v_type": 0, + "v_str": "2017-01-26T21:46:30.639875Z" + }, + { + "key": "ip", + "v_type": 0, + "v_str": "10.53.69.61" + }, + { + "key": "opencensus.exporterversion", + "v_type": 0, + "v_str": "someVersion" + }, + { + "key": "a.bool", + "v_type": 0, + "v_str": "true" + }, + { + "key": "a.double", + "v_type": 0, + "v_str": "1234.56789" + }, + { + "key": "a.long", + "v_type": 0, + "v_str": "123456789" + }, + { + "key": "a.binary", + "v_type": 0, + "v_str": "AQIDBAMCAQ==" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/internal/otel_collector/translator/trace/jaeger/testdata/jaegerproto_batch_02.json b/internal/otel_collector/translator/trace/jaeger/testdata/jaegerproto_batch_02.json new file mode 100644 index 00000000000..1c3e2270e89 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/testdata/jaegerproto_batch_02.json @@ -0,0 +1,80 @@ +{ + "process": { + "service_name": "api", + "tags": null + }, + "spans": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=", + "parent_span_id": 0, + "operation_name": "get", + "start_time": "2017-01-26T21:46:31.639875Z", + "duration": 22938000, + "logs": null, + "process": { + "service_name": "api", + "tags": null + }, + "tags": [ + { + "key": "peer.service", + "v_type": 0, + "v_str": "AAAAAAAAMDk=" + }, + { + "key": "span.kind", + "v_type": 0, + "v_str": "server" + } + ] + }, + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZk=", + "parent_span_id": 0, + "operation_name": "get", + "process": { + "service_name": "api", + "tags": null + }, + "logs": null, + "references": [ + { + "ref_type": 0, + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=" + }, + { + "ref_type": 1, + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABoxOM=" + } + ], + "start_time": "2017-01-26T21:46:31.639875Z", + "duration": 22938000, + "tags": [ + { + "key": "span.kind", + "v_type": 0, + "v_str": "server" + } + ] + }, + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=", + "parent_span_id": 0, + "operation_name": "get2", + "start_time": "2017-01-26T21:46:32.639875Z", + "duration": 22938000, + "logs": null, + "tags": null, + "process": { + "service_name": "api", + "tags": null + } + } + ] + } + \ No newline at end of file diff --git a/internal/otel_collector/translator/trace/jaeger/testdata/ocproto_batch_01.json b/internal/otel_collector/translator/trace/jaeger/testdata/ocproto_batch_01.json new file mode 100644 index 00000000000..cb4e53b4004 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/testdata/ocproto_batch_01.json @@ -0,0 +1,139 @@ +{ + "Node": { + "identifier": { + "host_name": "api246-sjc1" + }, + "library_info": { + "exporter_version": "Jaeger-Python-3.1.0" + }, + "service_info": { + "name": "api" + }, + "attributes": { + "a.binary": "AQIDBAMCAQ==", + "a.bool": "true", + "a.double": "1234.56789", + "a.long": "123456789", + "ip": "10.53.69.61" + } + }, + "Resource": null, + "Spans": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=", + "parent_span_id": "AAAAAABoxOM=", + "name": { + "value": "get" + }, + "kind": 1, + "start_time": { + "seconds": 1485467191, + "nanos": 639875000 + }, + "end_time": { + "seconds": 1485467191, + "nanos": 662813000 + }, + "attributes": { + "attribute_map": { + "http.url": { + "Value": { + "StringValue": { + "value": "http://localhost:15598/client_transactions" + } + } + }, + "peer.ipv4": { + "Value": { + "IntValue": 3224716605 + } + }, + "peer.port": { + "Value": { + "IntValue": 53931 + } + }, + "peer.service": { + "Value": { + "StringValue": { + "value": "rtapi" + } + } + }, + "someBool": { + "Value": { + "BoolValue": true + } + }, + "someDouble": { + "Value": { + "DoubleValue": 129.8 + } + }, + "span.kind": { + "Value": { + "StringValue": { + "value": "server" + } + } + } + } + }, + "time_events": { + "time_event": [ + { + "time": { + "seconds": 1485467191, + "nanos": 639875000 + }, + "Value": { + "Annotation": { + "attributes": { + "attribute_map": { + "key1": { + "Value": { + "StringValue": { + "value": "value1" + } + } + }, + "key2": { + "Value": { + "StringValue": { + "value": "value2" + } + } + } + } + } + } + } + }, + { + "time": { + "seconds": 1485467191, + "nanos": 639875000 + }, + "Value": { + "Annotation": { + "attributes": { + "attribute_map": { + "event": { + "Value": { + "StringValue": { + "value": "nothing" + } + } + } + } + } + } + } + } + ] + } + } + ], + "SourceFormat": "" + } \ No newline at end of file diff --git a/internal/otel_collector/translator/trace/jaeger/testdata/ocproto_batch_02.json b/internal/otel_collector/translator/trace/jaeger/testdata/ocproto_batch_02.json new file mode 100644 index 00000000000..8db79571bb1 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/testdata/ocproto_batch_02.json @@ -0,0 +1,69 @@ +{ + "Node": { + "identifier": {}, + "library_info": {}, + "service_info": { + "name": "api" + } + }, + "Resource": null, + "Spans": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=", + "parent_span_id": "AAAAAABoxOM=", + "name": { + "value": "get" + }, + "start_time": { + "seconds": 1485467191, + "nanos": 639875000 + }, + "end_time": { + "seconds": 1485467191, + "nanos": 662813000 + }, + "attributes": { + "attribute_map": { + "peer.service": { + "Value": { + "StringValue": { + "value": "AAAAAAAAMDk=" + } + } + } + } + } + }, + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZk=", + "parent_span_id": "AAAAAABoxOM=", + "name": { + "value": "get" + }, + "start_time": { + "seconds": 1485467191, + "nanos": 639875000 + }, + "end_time": { + "seconds": 1485467191, + "nanos": 662813000 + }, + "links": { + "link": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=", + "type": 2 + }, + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABoxOM=" + } + ] + } + } + ], + "SourceFormat": "" + } \ No newline at end of file diff --git a/internal/otel_collector/translator/trace/jaeger/testdata/proto_batch_no_binary_tags_01.json b/internal/otel_collector/translator/trace/jaeger/testdata/proto_batch_no_binary_tags_01.json new file mode 100644 index 00000000000..99e36923d1b --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/testdata/proto_batch_no_binary_tags_01.json @@ -0,0 +1,149 @@ +{ + "process": { + "service_name": "api", + "tags": [ + { + "key": "hostname", + "v_str": "api246-sjc1" + }, + { + "key": "pid", + "v_type": 2, + "v_int64": 13 + }, + { + "key": "start.time", + "v_str": "2017-01-26T21:46:30.639875Z" + }, + { + "key": "ip", + "v_str": "10.53.69.61" + }, + { + "key": "opencensus.exporterversion", + "v_str": "someVersion" + }, + { + "key": "a.bool", + "v_str": "true" + }, + { + "key": "a.double", + "v_str": "1234.56789" + }, + { + "key": "a.long", + "v_str": "123456789" + }, + { + "key": "a.binary", + "v_str": "AQIDBAMCAQ==" + }, + { + "key": "resource_key1", + "v_str": "resource_val1" + }, + { + "key": "opencensus.resourcetype", + "v_str": "k8s.io/container" + } + ] + }, + "spans": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=", + "references": [ + { + "trace_id" : "AAAAAAAAAABSlpqJVVcaPw==", + "span_id" : "AAAAAABoxOM=", + "refType" : "CHILD_OF" + } + ], + "operation_name": "get", + "start_time": "2017-01-26T21:46:31.639875Z", + "duration": 22938000, + "tags": [ + { + "key": "http.url", + "v_str": "http://localhost:15598/client_transactions" + }, + { + "key": "span.kind", + "v_str": "client" + }, + { + "key": "peer.port", + "v_type": 2, + "v_int64": 53931 + }, + { + "key": "someBool", + "v_type": 1, + "v_bool": true + }, + { + "key": "someDouble", + "v_type": 3, + "v_float64": 129.8 + }, + { + "key": "peer.service", + "v_str": "rtapi" + }, + { + "key": "peer.ipv4", + "v_type": 2, + "v_int64": 3224716605 + } + ], + "logs": [ + { + "timestamp": "2017-01-26T21:46:31.639874Z", + "fields": [ + { + "key": "oc.timeevent.messageevent.id", + "v_type": 2 + }, + { + "key": "oc.timeevent.messageevent.type", + "v_str": "SENT" + }, + { + "key": "oc.timeevent.messageevent.csize", + "v_int64": 512, + "v_type": 2 + }, + { + "key": "oc.timeevent.messageevent.usize", + "v_int64": 1024, + "v_type": 2 + } + ] + }, + { + "timestamp": "2017-01-26T21:46:31.639875Z", + "fields": [ + { + "key": "key1", + "v_str": "value1" + } + ] + }, + { + "timestamp": "2017-01-26T21:46:31.639875Z", + "fields": [ + { + "key": "event", + "v_str": "nothing" + }, + { + "key": "oc.timeevent.annotation.description", + "v_str": "annotation description" + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/internal/otel_collector/translator/trace/jaeger/testdata/proto_batch_no_binary_tags_02.json b/internal/otel_collector/translator/trace/jaeger/testdata/proto_batch_no_binary_tags_02.json new file mode 100644 index 00000000000..c25987de440 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/testdata/proto_batch_no_binary_tags_02.json @@ -0,0 +1,65 @@ +{ + "process": { + "service_name": "api" + }, + "spans": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=", + "operation_name": "get", + "start_time": "2017-01-26T21:46:31.639875Z", + "duration": 22938000, + "tags": [ + { + "key": "peer.service", + "v_str": "AAAAAAAAMDk=" + }, + { + "key": "span.kind", + "v_str": "server" + } + ] + }, + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZk=", + "operation_name": "get", + "references": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=" + }, + { + "ref_type": 1, + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABoxOM=" + }, + { + "ref_type": 1 + } + ], + "start_time": "2017-01-26T21:46:31.639875Z", + "duration": 22938000, + "tags": [ + { + "key": "span.kind", + "v_str": "server" + } + ] + }, + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZA=", + "operation_name": "get2", + "references": [ + { + "trace_id": "AAAAAAAAAABSlpqJVVcaPw==", + "span_id": "AAAAAABkfZg=" + } + ], + "start_time": "2017-01-26T21:46:32.639875Z", + "duration": 22938000 + } + ] + } + \ No newline at end of file diff --git a/internal/otel_collector/translator/trace/jaeger/testdata/thrift_batch_01.json b/internal/otel_collector/translator/trace/jaeger/testdata/thrift_batch_01.json new file mode 100644 index 00000000000..1f516ed0cf9 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/testdata/thrift_batch_01.json @@ -0,0 +1,116 @@ +{ + "process": { + "serviceName": "api", + "tags": [ + { + "key": "hostname", + "vType": "STRING", + "vStr": "api246-sjc1" + }, + { + "key": "ip", + "vType": "STRING", + "vStr": "10.53.69.61" + }, + { + "key": "jaeger.version", + "vType": "STRING", + "vStr": "Python-3.1.0" + }, + { + "key": "a.bool", + "vType": "BOOL", + "vBool": true + }, + { + "key": "a.double", + "vType": "DOUBLE", + "vDouble": 1234.56789 + }, + { + "key": "a.long", + "vType": "LONG", + "vLong": 123456789 + }, + { + "key": "a.binary", + "vType": "BINARY", + "vBinary": [1, 2, 3, 4, 3, 2, 1] + } + ] + }, + "spans": [ + { + "traceIdLow": 5951113872249657919, + "spanId": 6585752, + "parentSpanId": 6866147, + "operationName": "get", + "startTime": 1485467191639875, + "duration": 22938, + "tags": [ + { + "key": "http.url", + "vType": "STRING", + "vStr": "http://localhost:15598/client_transactions" + }, + { + "key": "span.kind", + "vType": "STRING", + "vStr": "server" + }, + { + "key": "peer.port", + "vType": "LONG", + "vLong": 53931 + }, + { + "key": "someBool", + "vType": "BOOL", + "vBool": true + }, + { + "key": "someDouble", + "vType": "DOUBLE", + "vDouble": 129.8 + }, + { + "key": "peer.service", + "vType": "STRING", + "vStr": "rtapi" + }, + { + "key": "peer.ipv4", + "vType": "LONG", + "vLong": 3224716605 + } + ], + "logs": [ + { + "timestamp": 1485467191639875, + "fields": [ + { + "key": "key1", + "vType": "STRING", + "vStr": "value1" + }, + { + "key": "key2", + "vType": "STRING", + "vStr": "value2" + } + ] + }, + { + "timestamp": 1485467191639875, + "fields": [ + { + "key": "event", + "vType": "STRING", + "vStr": "nothing" + } + ] + } + ] + } + ] +} diff --git a/internal/otel_collector/translator/trace/jaeger/testdata/thrift_batch_02.json b/internal/otel_collector/translator/trace/jaeger/testdata/thrift_batch_02.json new file mode 100644 index 00000000000..8d8ff3be39d --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/testdata/thrift_batch_02.json @@ -0,0 +1,44 @@ +{ + "process": { + "serviceName": "api", + "tags": [] + }, + "spans": [ + { + "traceIdLow": 5951113872249657919, + "spanId": 6585752, + "parentSpanId": 6866147, + "operationName": "get", + "startTime": 1485467191639875, + "duration": 22938, + "tags": [ + { + "key": "peer.service", + "vType": "BINARY", + "vBinary": "AAAAAAAAMDk=" + } + ] + }, + { + "traceIdLow": 5951113872249657919, + "spanId": 6585753, + "parentSpanId": 6866147, + "operationName": "get", + "references": [ + { + "refType": "CHILD_OF", + "traceIdLow": 5951113872249657919, + "spanId": 6585752 + }, + { + "refType": "FOLLOWS_FROM", + "traceIdLow": 5951113872249657919, + "spanId": 6866147 + } + ], + "startTime": 1485467191639875, + "duration": 22938 + } + ] + } + \ No newline at end of file diff --git a/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto.go b/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto.go new file mode 100644 index 00000000000..887612daece --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto.go @@ -0,0 +1,446 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "fmt" + + "github.com/jaegertracing/jaeger/model" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +// InternalTracesToJaegerProto translates internal trace data into the Jaeger Proto for GRPC. +// Returns slice of translated Jaeger batches and error if translation failed. +func InternalTracesToJaegerProto(td pdata.Traces) ([]*model.Batch, error) { + resourceSpans := td.ResourceSpans() + + if resourceSpans.Len() == 0 { + return nil, nil + } + + batches := make([]*model.Batch, 0, resourceSpans.Len()) + + for i := 0; i < resourceSpans.Len(); i++ { + rs := resourceSpans.At(i) + batch, err := resourceSpansToJaegerProto(rs) + if err != nil { + return nil, err + } + if batch != nil { + batches = append(batches, batch) + } + } + + return batches, nil +} + +func resourceSpansToJaegerProto(rs pdata.ResourceSpans) (*model.Batch, error) { + resource := rs.Resource() + ilss := rs.InstrumentationLibrarySpans() + + if resource.Attributes().Len() == 0 && ilss.Len() == 0 { + return nil, nil + } + + batch := &model.Batch{ + Process: resourceToJaegerProtoProcess(resource), + } + + if ilss.Len() == 0 { + return batch, nil + } + + // Approximate the number of the spans as the number of the spans in the first + // instrumentation library info. + jSpans := make([]*model.Span, 0, ilss.At(0).Spans().Len()) + + for i := 0; i < ilss.Len(); i++ { + ils := ilss.At(i) + spans := ils.Spans() + for j := 0; j < spans.Len(); j++ { + span := spans.At(j) + jSpan, err := spanToJaegerProto(span, ils.InstrumentationLibrary()) + if err != nil { + return nil, err + } + if jSpan != nil { + jSpans = append(jSpans, jSpan) + } + } + } + + batch.Spans = jSpans + + return batch, nil +} + +func resourceToJaegerProtoProcess(resource pdata.Resource) *model.Process { + process := &model.Process{} + attrs := resource.Attributes() + if attrs.Len() == 0 { + process.ServiceName = tracetranslator.ResourceNoServiceName + return process + } + attrsCount := attrs.Len() + if serviceName, ok := attrs.Get(conventions.AttributeServiceName); ok { + process.ServiceName = serviceName.StringVal() + attrsCount-- + } + if attrsCount == 0 { + return process + } + + tags := make([]model.KeyValue, 0, attrsCount) + process.Tags = appendTagsFromResourceAttributes(tags, attrs) + return process + +} + +func appendTagsFromResourceAttributes(dest []model.KeyValue, attrs pdata.AttributeMap) []model.KeyValue { + if attrs.Len() == 0 { + return dest + } + + attrs.ForEach(func(key string, attr pdata.AttributeValue) { + if key == conventions.AttributeServiceName { + return + } + dest = append(dest, attributeToJaegerProtoTag(key, attr)) + }) + return dest +} + +func appendTagsFromAttributes(dest []model.KeyValue, attrs pdata.AttributeMap) []model.KeyValue { + if attrs.Len() == 0 { + return dest + } + attrs.ForEach(func(key string, attr pdata.AttributeValue) { + dest = append(dest, attributeToJaegerProtoTag(key, attr)) + }) + return dest +} + +func attributeToJaegerProtoTag(key string, attr pdata.AttributeValue) model.KeyValue { + tag := model.KeyValue{Key: key} + switch attr.Type() { + case pdata.AttributeValueSTRING: + // Jaeger-to-Internal maps binary tags to string attributes and encodes them as + // base64 strings. Blindingly attempting to decode base64 seems too much. + tag.VType = model.ValueType_STRING + tag.VStr = attr.StringVal() + case pdata.AttributeValueINT: + tag.VType = model.ValueType_INT64 + tag.VInt64 = attr.IntVal() + case pdata.AttributeValueBOOL: + tag.VType = model.ValueType_BOOL + tag.VBool = attr.BoolVal() + case pdata.AttributeValueDOUBLE: + tag.VType = model.ValueType_FLOAT64 + tag.VFloat64 = attr.DoubleVal() + case pdata.AttributeValueMAP, pdata.AttributeValueARRAY: + tag.VType = model.ValueType_STRING + tag.VStr = tracetranslator.AttributeValueToString(attr, false) + } + return tag +} + +func spanToJaegerProto(span pdata.Span, libraryTags pdata.InstrumentationLibrary) (*model.Span, error) { + traceID, err := traceIDToJaegerProto(span.TraceID()) + if err != nil { + return nil, err + } + + spanID, err := spanIDToJaegerProto(span.SpanID()) + if err != nil { + return nil, err + } + + jReferences, err := makeJaegerProtoReferences(span.Links(), span.ParentSpanID(), traceID) + if err != nil { + return nil, fmt.Errorf("error converting span links to Jaeger references: %w", err) + } + + startTime := pdata.UnixNanoToTime(span.StartTime()) + + return &model.Span{ + TraceID: traceID, + SpanID: spanID, + OperationName: span.Name(), + References: jReferences, + StartTime: startTime, + Duration: pdata.UnixNanoToTime(span.EndTime()).Sub(startTime), + Tags: getJaegerProtoSpanTags(span, libraryTags), + Logs: spanEventsToJaegerProtoLogs(span.Events()), + }, nil +} + +func getJaegerProtoSpanTags(span pdata.Span, instrumentationLibrary pdata.InstrumentationLibrary) []model.KeyValue { + var spanKindTag, statusCodeTag, errorTag, statusMsgTag model.KeyValue + var spanKindTagFound, statusCodeTagFound, errorTagFound, statusMsgTagFound bool + + libraryTags, libraryTagsFound := getTagsFromInstrumentationLibrary(instrumentationLibrary) + + tagsCount := span.Attributes().Len() + len(libraryTags) + + spanKindTag, spanKindTagFound = getTagFromSpanKind(span.Kind()) + if spanKindTagFound { + tagsCount++ + } + status := span.Status() + statusCodeTag, statusCodeTagFound = getTagFromStatusCode(status.Code()) + if statusCodeTagFound { + tagsCount++ + } + + errorTag, errorTagFound = getErrorTagFromStatusCode(status.Code()) + if errorTagFound { + tagsCount++ + } + + statusMsgTag, statusMsgTagFound = getTagFromStatusMsg(status.Message()) + if statusMsgTagFound { + tagsCount++ + } + + traceStateTags, traceStateTagsFound := getTagsFromTraceState(span.TraceState()) + if traceStateTagsFound { + tagsCount += len(traceStateTags) + } + + if tagsCount == 0 { + return nil + } + + tags := make([]model.KeyValue, 0, tagsCount) + if libraryTagsFound { + tags = append(tags, libraryTags...) + } + tags = appendTagsFromAttributes(tags, span.Attributes()) + if spanKindTagFound { + tags = append(tags, spanKindTag) + } + if statusCodeTagFound { + tags = append(tags, statusCodeTag) + } + if errorTagFound { + tags = append(tags, errorTag) + } + if statusMsgTagFound { + tags = append(tags, statusMsgTag) + } + if traceStateTagsFound { + tags = append(tags, traceStateTags...) + } + return tags +} + +func traceIDToJaegerProto(traceID pdata.TraceID) (model.TraceID, error) { + traceIDHigh, traceIDLow := tracetranslator.TraceIDToUInt64Pair(traceID) + if traceIDLow == 0 && traceIDHigh == 0 { + return model.TraceID{}, errZeroTraceID + } + return model.TraceID{ + Low: traceIDLow, + High: traceIDHigh, + }, nil +} + +func spanIDToJaegerProto(spanID pdata.SpanID) (model.SpanID, error) { + uSpanID := tracetranslator.BytesToUInt64SpanID(spanID.Bytes()) + if uSpanID == 0 { + return model.SpanID(0), errZeroSpanID + } + return model.SpanID(uSpanID), nil +} + +// makeJaegerProtoReferences constructs jaeger span references based on parent span ID and span links +func makeJaegerProtoReferences( + links pdata.SpanLinkSlice, + parentSpanID pdata.SpanID, + traceID model.TraceID, +) ([]model.SpanRef, error) { + parentSpanIDSet := parentSpanID.IsValid() + if !parentSpanIDSet && links.Len() == 0 { + return nil, nil + } + + refsCount := links.Len() + if parentSpanIDSet { + refsCount++ + } + + refs := make([]model.SpanRef, 0, refsCount) + + // Put parent span ID at the first place because usually backends look for it + // as the first CHILD_OF item in the model.SpanRef slice. + if parentSpanIDSet { + jParentSpanID, err := spanIDToJaegerProto(parentSpanID) + if err != nil { + return nil, fmt.Errorf("OC incorrect parent span ID: %v", err) + } + + refs = append(refs, model.SpanRef{ + TraceID: traceID, + SpanID: jParentSpanID, + RefType: model.SpanRefType_CHILD_OF, + }) + } + + for i := 0; i < links.Len(); i++ { + link := links.At(i) + traceID, err := traceIDToJaegerProto(link.TraceID()) + if err != nil { + continue // skip invalid link + } + + spanID, err := spanIDToJaegerProto(link.SpanID()) + if err != nil { + continue // skip invalid link + } + + refs = append(refs, model.SpanRef{ + TraceID: traceID, + SpanID: spanID, + + // Since Jaeger RefType is not captured in internal data, + // use SpanRefType_FOLLOWS_FROM by default. + // SpanRefType_CHILD_OF supposed to be set only from parentSpanID. + RefType: model.SpanRefType_FOLLOWS_FROM, + }) + } + + return refs, nil +} + +func spanEventsToJaegerProtoLogs(events pdata.SpanEventSlice) []model.Log { + if events.Len() == 0 { + return nil + } + + logs := make([]model.Log, 0, events.Len()) + for i := 0; i < events.Len(); i++ { + event := events.At(i) + fields := make([]model.KeyValue, 0, event.Attributes().Len()+1) + if event.Name() != "" { + fields = append(fields, model.KeyValue{ + Key: tracetranslator.TagMessage, + VType: model.ValueType_STRING, + VStr: event.Name(), + }) + } + fields = appendTagsFromAttributes(fields, event.Attributes()) + logs = append(logs, model.Log{ + Timestamp: pdata.UnixNanoToTime(event.Timestamp()), + Fields: fields, + }) + } + + return logs +} + +func getTagFromSpanKind(spanKind pdata.SpanKind) (model.KeyValue, bool) { + var tagStr string + switch spanKind { + case pdata.SpanKindCLIENT: + tagStr = string(tracetranslator.OpenTracingSpanKindClient) + case pdata.SpanKindSERVER: + tagStr = string(tracetranslator.OpenTracingSpanKindServer) + case pdata.SpanKindPRODUCER: + tagStr = string(tracetranslator.OpenTracingSpanKindProducer) + case pdata.SpanKindCONSUMER: + tagStr = string(tracetranslator.OpenTracingSpanKindConsumer) + case pdata.SpanKindINTERNAL: + tagStr = string(tracetranslator.OpenTracingSpanKindInternal) + default: + return model.KeyValue{}, false + } + + return model.KeyValue{ + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: tagStr, + }, true +} + +func getTagFromStatusCode(statusCode pdata.StatusCode) (model.KeyValue, bool) { + return model.KeyValue{ + Key: tracetranslator.TagStatusCode, + VInt64: int64(statusCode), + VType: model.ValueType_INT64, + }, true +} + +func getErrorTagFromStatusCode(statusCode pdata.StatusCode) (model.KeyValue, bool) { + if statusCode == pdata.StatusCodeError { + return model.KeyValue{ + Key: tracetranslator.TagError, + VBool: true, + VType: model.ValueType_BOOL, + }, true + } + return model.KeyValue{}, false + +} + +func getTagFromStatusMsg(statusMsg string) (model.KeyValue, bool) { + if statusMsg == "" { + return model.KeyValue{}, false + } + return model.KeyValue{ + Key: tracetranslator.TagStatusMsg, + VStr: statusMsg, + VType: model.ValueType_STRING, + }, true +} + +func getTagsFromTraceState(traceState pdata.TraceState) ([]model.KeyValue, bool) { + keyValues := make([]model.KeyValue, 0) + exists := traceState != pdata.TraceStateEmpty + if exists { + // TODO Bring this inline with solution for jaegertracing/jaeger-client-java #702 once available + kv := model.KeyValue{ + Key: tracetranslator.TagW3CTraceState, + VStr: string(traceState), + VType: model.ValueType_STRING, + } + keyValues = append(keyValues, kv) + } + return keyValues, exists +} + +func getTagsFromInstrumentationLibrary(il pdata.InstrumentationLibrary) ([]model.KeyValue, bool) { + keyValues := make([]model.KeyValue, 0) + if ilName := il.Name(); ilName != "" { + kv := model.KeyValue{ + Key: tracetranslator.TagInstrumentationName, + VStr: ilName, + VType: model.ValueType_STRING, + } + keyValues = append(keyValues, kv) + } + if ilVersion := il.Version(); ilVersion != "" { + kv := model.KeyValue{ + Key: tracetranslator.TagInstrumentationVersion, + VStr: ilVersion, + VType: model.ValueType_STRING, + } + keyValues = append(keyValues, kv) + } + + return keyValues, true +} diff --git a/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto_test.go b/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto_test.go new file mode 100644 index 00000000000..7a738b1c982 --- /dev/null +++ b/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto_test.go @@ -0,0 +1,371 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "io" + "math/rand" + "testing" + + "github.com/jaegertracing/jaeger/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/consumer/pdata" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/goldendataset" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func TestGetTagFromStatusCode(t *testing.T) { + tests := []struct { + name string + code pdata.StatusCode + tag model.KeyValue + }{ + { + name: "ok", + code: pdata.StatusCodeOk, + tag: model.KeyValue{ + Key: tracetranslator.TagStatusCode, + VInt64: int64(pdata.StatusCodeOk), + VType: model.ValueType_INT64, + }, + }, + + { + name: "error", + code: pdata.StatusCodeError, + tag: model.KeyValue{ + Key: tracetranslator.TagStatusCode, + VInt64: int64(pdata.StatusCodeError), + VType: model.ValueType_INT64, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, ok := getTagFromStatusCode(test.code) + assert.True(t, ok) + assert.EqualValues(t, test.tag, got) + }) + } +} + +func TestGetErrorTagFromStatusCode(t *testing.T) { + errTag := model.KeyValue{ + Key: tracetranslator.TagError, + VBool: true, + VType: model.ValueType_BOOL, + } + + _, ok := getErrorTagFromStatusCode(pdata.StatusCodeUnset) + assert.False(t, ok) + + _, ok = getErrorTagFromStatusCode(pdata.StatusCodeOk) + assert.False(t, ok) + + got, ok := getErrorTagFromStatusCode(pdata.StatusCodeError) + assert.True(t, ok) + assert.EqualValues(t, errTag, got) +} + +func TestGetTagFromStatusMsg(t *testing.T) { + got, ok := getTagFromStatusMsg("") + assert.False(t, ok) + + got, ok = getTagFromStatusMsg("test-error") + assert.True(t, ok) + assert.EqualValues(t, model.KeyValue{ + Key: tracetranslator.TagStatusMsg, + VStr: "test-error", + VType: model.ValueType_STRING, + }, got) +} + +func TestGetTagFromSpanKind(t *testing.T) { + tests := []struct { + name string + kind pdata.SpanKind + tag model.KeyValue + ok bool + }{ + { + name: "unspecified", + kind: pdata.SpanKindUNSPECIFIED, + tag: model.KeyValue{}, + ok: false, + }, + + { + name: "client", + kind: pdata.SpanKindCLIENT, + tag: model.KeyValue{ + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindClient), + }, + ok: true, + }, + + { + name: "server", + kind: pdata.SpanKindSERVER, + tag: model.KeyValue{ + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindServer), + }, + ok: true, + }, + + { + name: "producer", + kind: pdata.SpanKindPRODUCER, + tag: model.KeyValue{ + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindProducer), + }, + ok: true, + }, + + { + name: "consumer", + kind: pdata.SpanKindCONSUMER, + tag: model.KeyValue{ + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindConsumer), + }, + ok: true, + }, + + { + name: "internal", + kind: pdata.SpanKindINTERNAL, + tag: model.KeyValue{ + Key: tracetranslator.TagSpanKind, + VType: model.ValueType_STRING, + VStr: string(tracetranslator.OpenTracingSpanKindInternal), + }, + ok: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, ok := getTagFromSpanKind(test.kind) + assert.Equal(t, test.ok, ok) + assert.EqualValues(t, test.tag, got) + }) + } +} + +func TestAttributesToJaegerProtoTags(t *testing.T) { + + attributes := pdata.NewAttributeMap() + attributes.InsertBool("bool-val", true) + attributes.InsertInt("int-val", 123) + attributes.InsertString("string-val", "abc") + attributes.InsertDouble("double-val", 1.23) + attributes.InsertString(conventions.AttributeServiceName, "service-name") + + expected := []model.KeyValue{ + { + Key: "bool-val", + VType: model.ValueType_BOOL, + VBool: true, + }, + { + Key: "int-val", + VType: model.ValueType_INT64, + VInt64: 123, + }, + { + Key: "string-val", + VType: model.ValueType_STRING, + VStr: "abc", + }, + { + Key: "double-val", + VType: model.ValueType_FLOAT64, + VFloat64: 1.23, + }, + { + Key: conventions.AttributeServiceName, + VType: model.ValueType_STRING, + VStr: "service-name", + }, + } + + got := appendTagsFromAttributes(make([]model.KeyValue, 0, len(expected)), attributes) + require.EqualValues(t, expected, got) + + // The last item in expected ("service-name") must be skipped in resource tags translation + got = appendTagsFromResourceAttributes(make([]model.KeyValue, 0, len(expected)-1), attributes) + require.EqualValues(t, expected[:4], got) +} + +func TestInternalTracesToJaegerProto(t *testing.T) { + + tests := []struct { + name string + td pdata.Traces + jb *model.Batch + err error + }{ + { + name: "empty", + td: testdata.GenerateTraceDataEmpty(), + err: nil, + }, + + { + name: "no-spans", + td: generateTraceDataResourceOnly(), + jb: &model.Batch{ + Process: generateProtoProcess(), + }, + err: nil, + }, + + { + name: "no-resource-attrs", + td: generateTraceDataResourceOnlyWithNoAttrs(), + err: nil, + }, + + { + name: "one-span-no-resources", + td: generateTraceDataOneSpanNoResourceWithTraceState(), + jb: &model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + Spans: []*model.Span{ + generateProtoSpanWithTraceState(), + }, + }, + err: nil, + }, + { + name: "library-info", + td: generateTraceDataWithLibraryInfo(), + jb: &model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + Spans: []*model.Span{ + generateProtoSpanWithLibraryInfo("io.opentelemetry.test"), + }, + }, + err: nil, + }, + { + name: "two-spans-child-parent", + td: generateTraceDataTwoSpansChildParent(), + jb: &model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + Spans: []*model.Span{ + generateProtoSpan(), + generateProtoChildSpanWithErrorTags(), + }, + }, + err: nil, + }, + + { + name: "two-spans-with-follower", + td: generateTraceDataTwoSpansWithFollower(), + jb: &model.Batch{ + Process: &model.Process{ + ServiceName: tracetranslator.ResourceNoServiceName, + }, + Spans: []*model.Span{ + generateProtoSpan(), + generateProtoFollowerSpan(), + }, + }, + err: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + jbs, err := InternalTracesToJaegerProto(test.td) + assert.EqualValues(t, test.err, err) + if test.jb == nil { + assert.Len(t, jbs, 0) + } else { + require.Equal(t, 1, len(jbs)) + assert.EqualValues(t, test.jb, jbs[0]) + } + }) + } +} + +func TestInternalTracesToJaegerProtoBatchesAndBack(t *testing.T) { + rscSpans, err := goldendataset.GenerateResourceSpans( + "../../../internal/goldendataset/testdata/generated_pict_pairs_traces.txt", + "../../../internal/goldendataset/testdata/generated_pict_pairs_spans.txt", + io.Reader(rand.New(rand.NewSource(2004)))) + assert.NoError(t, err) + for _, rs := range rscSpans { + orig := make([]*otlptrace.ResourceSpans, 1) + orig[0] = rs + td := pdata.TracesFromOtlp(orig) + protoBatches, err := InternalTracesToJaegerProto(td) + assert.NoError(t, err) + tdFromPB := ProtoBatchesToInternalTraces(protoBatches) + assert.NotNil(t, tdFromPB) + assert.Equal(t, td.SpanCount(), tdFromPB.SpanCount()) + } +} + +// generateProtoChildSpanWithErrorTags generates a jaeger span to be used in +// internal->jaeger translation test. It supposed to be the same as generateProtoChildSpan +// that used in jaeger->internal, but jaeger->internal translation infers status code from http status if +// status.code is not set, so the pipeline jaeger->internal->jaeger adds two more tags as the result in that case. +func generateProtoChildSpanWithErrorTags() *model.Span { + span := generateProtoChildSpan() + span.Tags = append(span.Tags, model.KeyValue{ + Key: tracetranslator.TagStatusCode, + VType: model.ValueType_INT64, + VInt64: int64(pdata.StatusCodeError), + }) + span.Tags = append(span.Tags, model.KeyValue{ + Key: tracetranslator.TagError, + VBool: true, + VType: model.ValueType_BOOL, + }) + return span +} + +func BenchmarkInternalTracesToJaegerProto(b *testing.B) { + td := generateTraceDataTwoSpansChildParent() + resource := generateTraceDataResourceOnly().ResourceSpans().At(0).Resource() + resource.CopyTo(td.ResourceSpans().At(0).Resource()) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + InternalTracesToJaegerProto(td) + } +} diff --git a/internal/otel_collector/translator/trace/opencensus_helper.go b/internal/otel_collector/translator/trace/opencensus_helper.go new file mode 100644 index 00000000000..ac00f8ef662 --- /dev/null +++ b/internal/otel_collector/translator/trace/opencensus_helper.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetranslator + +import ( + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" +) + +// OCAttributeKeyExist returns true if a key in attribute of an OC Span exists. +// It returns false, if attributes is nil, the map itself is nil or the key wasn't found. +func OCAttributeKeyExist(ocAttributes *tracepb.Span_Attributes, key string) bool { + if ocAttributes == nil || ocAttributes.AttributeMap == nil { + return false + } + _, foundKey := ocAttributes.AttributeMap[key] + return foundKey +} diff --git a/internal/otel_collector/translator/trace/protospan_translation.go b/internal/otel_collector/translator/trace/protospan_translation.go new file mode 100644 index 00000000000..6fe70ef2f85 --- /dev/null +++ b/internal/otel_collector/translator/trace/protospan_translation.go @@ -0,0 +1,307 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetranslator + +import ( + "encoding/json" + "fmt" + "math" + "regexp" + "strconv" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +// Some of the keys used to represent OTLP constructs as tags or annotations in other formats. +const ( + AnnotationDescriptionKey = "description" + + MessageEventIDKey = "message.id" + MessageEventTypeKey = "message.type" + MessageEventCompressedSizeKey = "message.compressed_size" + MessageEventUncompressedSizeKey = "message.uncompressed_size" + + TagMessage = "message" + + TagSpanKind = "span.kind" + + TagStatusCode = "status.code" + TagStatusMsg = "status.message" + TagError = "error" + TagHTTPStatusCode = "http.status_code" + TagHTTPStatusMsg = "http.status_message" + TagZipkinCensusCode = "census.status_code" + TagZipkinCensusMsg = "census.status_description" + TagZipkinOpenCensusMsg = "opencensus.status_description" + + TagW3CTraceState = "w3c.tracestate" + TagServiceNameSource = "otlp.service.name.source" + TagInstrumentationName = "otlp.instrumentation.library.name" + TagInstrumentationVersion = "otlp.instrumentation.library.version" +) + +// Constants used for signifying batch-level attribute values where not supplied by OTLP data but required +// by other protocols. +const ( + ResourceNoServiceName = "OTLPResourceNoServiceName" +) + +// OpenTracingSpanKind are possible values for TagSpanKind and match the OpenTracing +// conventions: https://github.com/opentracing/specification/blob/master/semantic_conventions.md +// These values are used for representing span kinds that have no +// equivalents in OpenCensus format. They are stored as values of TagSpanKind +type OpenTracingSpanKind string + +const ( + OpenTracingSpanKindUnspecified OpenTracingSpanKind = "" + OpenTracingSpanKindClient OpenTracingSpanKind = "client" + OpenTracingSpanKindServer OpenTracingSpanKind = "server" + OpenTracingSpanKindConsumer OpenTracingSpanKind = "consumer" + OpenTracingSpanKindProducer OpenTracingSpanKind = "producer" + OpenTracingSpanKindInternal OpenTracingSpanKind = "internal" +) + +const ( + SpanLinkDataFormat = "%s|%s|%s|%s|%d" + SpanEventDataFormat = "%s|%s|%d" +) + +type attrValDescript struct { + regex *regexp.Regexp + attrType pdata.AttributeValueType +} + +var attrValDescriptions = getAttrValDescripts() +var complexAttrValDescriptions = getComplexAttrValDescripts() + +func getAttrValDescripts() []*attrValDescript { + descriptions := make([]*attrValDescript, 0, 5) + descriptions = append(descriptions, constructAttrValDescript("^$", pdata.AttributeValueNULL)) + descriptions = append(descriptions, constructAttrValDescript(`^-?\d+$`, pdata.AttributeValueINT)) + descriptions = append(descriptions, constructAttrValDescript(`^-?\d+\.\d+$`, pdata.AttributeValueDOUBLE)) + descriptions = append(descriptions, constructAttrValDescript(`^(true|false)$`, pdata.AttributeValueBOOL)) + descriptions = append(descriptions, constructAttrValDescript(`^\{"\w+":.+\}$`, pdata.AttributeValueMAP)) + descriptions = append(descriptions, constructAttrValDescript(`^\[.*\]$`, pdata.AttributeValueARRAY)) + return descriptions +} + +func getComplexAttrValDescripts() []*attrValDescript { + descriptions := getAttrValDescripts() + return descriptions[4:] +} + +func constructAttrValDescript(regex string, attrType pdata.AttributeValueType) *attrValDescript { + regexc := regexp.MustCompile(regex) + return &attrValDescript{ + regex: regexc, + attrType: attrType, + } +} + +// AttributeValueToString converts an OTLP AttributeValue object to its equivalent string representation +func AttributeValueToString(attr pdata.AttributeValue, jsonLike bool) string { + switch attr.Type() { + case pdata.AttributeValueNULL: + if jsonLike { + return "null" + } + return "" + case pdata.AttributeValueSTRING: + if jsonLike { + return fmt.Sprintf("%q", attr.StringVal()) + } + return attr.StringVal() + + case pdata.AttributeValueBOOL: + return strconv.FormatBool(attr.BoolVal()) + + case pdata.AttributeValueDOUBLE: + return strconv.FormatFloat(attr.DoubleVal(), 'f', -1, 64) + + case pdata.AttributeValueINT: + return strconv.FormatInt(attr.IntVal(), 10) + + case pdata.AttributeValueMAP: + jsonStr, _ := json.Marshal(AttributeMapToMap(attr.MapVal())) + return string(jsonStr) + + case pdata.AttributeValueARRAY: + jsonStr, _ := json.Marshal(AttributeArrayToSlice(attr.ArrayVal())) + return string(jsonStr) + + default: + return fmt.Sprintf("", attr.Type()) + } +} + +// AttributeMapToMap converts an OTLP AttributeMap to a standard go map +func AttributeMapToMap(attrMap pdata.AttributeMap) map[string]interface{} { + rawMap := make(map[string]interface{}) + attrMap.ForEach(func(k string, v pdata.AttributeValue) { + switch v.Type() { + case pdata.AttributeValueSTRING: + rawMap[k] = v.StringVal() + case pdata.AttributeValueINT: + rawMap[k] = v.IntVal() + case pdata.AttributeValueDOUBLE: + rawMap[k] = v.DoubleVal() + case pdata.AttributeValueBOOL: + rawMap[k] = v.BoolVal() + case pdata.AttributeValueNULL: + rawMap[k] = nil + case pdata.AttributeValueMAP: + rawMap[k] = AttributeMapToMap(v.MapVal()) + case pdata.AttributeValueARRAY: + rawMap[k] = AttributeArrayToSlice(v.ArrayVal()) + } + }) + return rawMap +} + +func AttributeArrayToSlice(attrArray pdata.AnyValueArray) []interface{} { + rawSlice := make([]interface{}, 0, attrArray.Len()) + for i := 0; i < attrArray.Len(); i++ { + v := attrArray.At(i) + switch v.Type() { + case pdata.AttributeValueSTRING: + rawSlice = append(rawSlice, v.StringVal()) + case pdata.AttributeValueINT: + rawSlice = append(rawSlice, v.IntVal()) + case pdata.AttributeValueDOUBLE: + rawSlice = append(rawSlice, v.DoubleVal()) + case pdata.AttributeValueBOOL: + rawSlice = append(rawSlice, v.BoolVal()) + case pdata.AttributeValueNULL: + rawSlice = append(rawSlice, nil) + default: + rawSlice = append(rawSlice, "") + } + } + return rawSlice +} + +// UpsertStringToAttributeMap upserts a string value to the specified key as it's native OTLP type +func UpsertStringToAttributeMap(key string, val string, dest pdata.AttributeMap, omitSimpleTypes bool) { + switch DetermineValueType(val, omitSimpleTypes) { + case pdata.AttributeValueINT: + iVal, _ := strconv.ParseInt(val, 10, 64) + dest.UpsertInt(key, iVal) + case pdata.AttributeValueDOUBLE: + fVal, _ := strconv.ParseFloat(val, 64) + dest.UpsertDouble(key, fVal) + case pdata.AttributeValueBOOL: + bVal, _ := strconv.ParseBool(val) + dest.UpsertBool(key, bVal) + case pdata.AttributeValueMAP: + var attrs map[string]interface{} + err := json.Unmarshal([]byte(val), &attrs) + if err == nil { + attrMap := pdata.NewAttributeValueMap() + jsonMapToAttributeMap(attrs, attrMap.MapVal()) + dest.Upsert(key, attrMap) + } else { + dest.UpsertString(key, "") + } + case pdata.AttributeValueARRAY: + var jArray []interface{} + err := json.Unmarshal([]byte(val), &jArray) + if err == nil { + attrArr := pdata.NewAttributeValueArray() + jsonArrayToAttributeArray(jArray, attrArr.ArrayVal()) + dest.Upsert(key, attrArr) + } else { + dest.UpsertString(key, "") + } + default: + dest.UpsertString(key, val) + } +} + +// DetermineValueType returns the native OTLP attribute type the string translates to. +func DetermineValueType(value string, omitSimpleTypes bool) pdata.AttributeValueType { + if omitSimpleTypes { + for _, desc := range complexAttrValDescriptions { + if desc.regex.MatchString(value) { + return desc.attrType + } + } + } else { + for _, desc := range attrValDescriptions { + if desc.regex.MatchString(value) { + return desc.attrType + } + } + } + return pdata.AttributeValueSTRING +} + +func jsonMapToAttributeMap(attrs map[string]interface{}, dest pdata.AttributeMap) { + for key, val := range attrs { + if val == nil { + dest.Upsert(key, pdata.NewAttributeValueNull()) + continue + } + if s, ok := val.(string); ok { + dest.UpsertString(key, s) + } else if d, ok := val.(float64); ok { + if math.Mod(d, 1.0) == 0.0 { + dest.UpsertInt(key, int64(d)) + } else { + dest.UpsertDouble(key, d) + } + } else if b, ok := val.(bool); ok { + dest.UpsertBool(key, b) + } else if m, ok := val.(map[string]interface{}); ok { + value := pdata.NewAttributeValueMap() + jsonMapToAttributeMap(m, value.MapVal()) + dest.Upsert(key, value) + } else if a, ok := val.([]interface{}); ok { + value := pdata.NewAttributeValueArray() + jsonArrayToAttributeArray(a, value.ArrayVal()) + dest.Upsert(key, value) + } + } +} + +func jsonArrayToAttributeArray(jArray []interface{}, dest pdata.AnyValueArray) { + for _, val := range jArray { + if val == nil { + dest.Append(pdata.NewAttributeValueNull()) + continue + } + if s, ok := val.(string); ok { + dest.Append(pdata.NewAttributeValueString(s)) + } else if d, ok := val.(float64); ok { + if math.Mod(d, 1.0) == 0.0 { + dest.Append(pdata.NewAttributeValueInt(int64(d))) + } else { + dest.Append(pdata.NewAttributeValueDouble(d)) + } + } else if b, ok := val.(bool); ok { + dest.Append(pdata.NewAttributeValueBool(b)) + } else { + dest.Append(pdata.NewAttributeValueString("")) + } + } +} + +// StatusCodeFromHTTP takes an HTTP status code and return the appropriate OpenTelemetry status code +// See: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#status +func StatusCodeFromHTTP(httpStatusCode int) pdata.StatusCode { + if httpStatusCode >= 100 && httpStatusCode < 399 { + return pdata.StatusCodeUnset + } + return pdata.StatusCodeError +} diff --git a/internal/otel_collector/translator/trace/protospan_translation_test.go b/internal/otel_collector/translator/trace/protospan_translation_test.go new file mode 100644 index 00000000000..b70e455fafe --- /dev/null +++ b/internal/otel_collector/translator/trace/protospan_translation_test.go @@ -0,0 +1,190 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetranslator + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" +) + +func TestAttributeValueToString(t *testing.T) { + tests := []struct { + name string + input pdata.AttributeValue + jsonLike bool + expected string + }{ + { + name: "string", + input: pdata.NewAttributeValueString("string value"), + jsonLike: false, + expected: "string value", + }, + { + name: "json string", + input: pdata.NewAttributeValueString("string value"), + jsonLike: true, + expected: "\"string value\"", + }, + { + name: "int64", + input: pdata.NewAttributeValueInt(42), + jsonLike: false, + expected: "42", + }, + { + name: "float64", + input: pdata.NewAttributeValueDouble(1.61803399), + jsonLike: false, + expected: "1.61803399", + }, + { + name: "boolean", + input: pdata.NewAttributeValueBool(true), + jsonLike: false, + expected: "true", + }, + { + name: "null", + input: pdata.NewAttributeValueNull(), + jsonLike: false, + expected: "", + }, + { + name: "null", + input: pdata.NewAttributeValueNull(), + jsonLike: true, + expected: "null", + }, + { + name: "map", + input: pdata.NewAttributeValueMap(), + jsonLike: false, + expected: "{}", + }, + { + name: "array", + input: pdata.NewAttributeValueArray(), + jsonLike: false, + expected: "[]", + }, + { + name: "array", + input: pdata.NewAttributeValueNull(), + jsonLike: false, + expected: "", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual := AttributeValueToString(test.input, test.jsonLike) + assert.Equal(t, test.expected, actual) + dest := pdata.NewAttributeMap() + key := "keyOne" + UpsertStringToAttributeMap(key, actual, dest, false) + val, ok := dest.Get(key) + assert.True(t, ok) + if !test.jsonLike { + switch test.input.Type() { + case pdata.AttributeValueINT, pdata.AttributeValueDOUBLE, pdata.AttributeValueBOOL: + assert.EqualValues(t, test.input, val) + case pdata.AttributeValueARRAY: + assert.NotNil(t, val) + default: + assert.Equal(t, test.expected, val.StringVal()) + } + } + }) + } +} + +func TestAttributeMapToStringAndBack(t *testing.T) { + expected := pdata.NewAttributeValueMap() + attrMap := expected.MapVal() + attrMap.UpsertString("strKey", "strVal") + attrMap.UpsertInt("intKey", 7) + attrMap.UpsertDouble("floatKey", 18.6) + attrMap.UpsertBool("boolKey", false) + attrMap.Upsert("nullKey", pdata.NewAttributeValueNull()) + attrMap.Upsert("mapKey", constructTestAttributeSubmap()) + attrMap.Upsert("arrKey", constructTestAttributeSubarray()) + strVal := AttributeValueToString(expected, false) + dest := pdata.NewAttributeMap() + UpsertStringToAttributeMap("parent", strVal, dest, false) + actual, ok := dest.Get("parent") + assert.True(t, ok) + compareMaps(t, attrMap, actual.MapVal()) +} + +func TestAttributeArrayToStringAndBack(t *testing.T) { + expected := pdata.NewAttributeValueArray() + attrArr := expected.ArrayVal() + attrArr.Append(pdata.NewAttributeValueString("strVal")) + attrArr.Append(pdata.NewAttributeValueInt(7)) + attrArr.Append(pdata.NewAttributeValueDouble(18.6)) + attrArr.Append(pdata.NewAttributeValueBool(false)) + attrArr.Append(pdata.NewAttributeValueNull()) + strVal := AttributeValueToString(expected, false) + dest := pdata.NewAttributeMap() + UpsertStringToAttributeMap("parent", strVal, dest, false) + actual, ok := dest.Get("parent") + assert.True(t, ok) + compareArrays(t, attrArr, actual.ArrayVal()) +} + +func compareMaps(t *testing.T, expected pdata.AttributeMap, actual pdata.AttributeMap) { + expected.ForEach(func(k string, e pdata.AttributeValue) { + a, ok := actual.Get(k) + assert.True(t, ok) + if ok { + if e.Type() == pdata.AttributeValueMAP { + compareMaps(t, e.MapVal(), a.MapVal()) + } else { + assert.Equal(t, e, a) + } + } + }) +} + +func compareArrays(t *testing.T, expected pdata.AnyValueArray, actual pdata.AnyValueArray) { + for i := 0; i < expected.Len(); i++ { + e := expected.At(i) + a := actual.At(i) + if e.Type() == pdata.AttributeValueMAP { + compareMaps(t, e.MapVal(), a.MapVal()) + } else { + assert.Equal(t, e, a) + } + } +} + +func constructTestAttributeSubmap() pdata.AttributeValue { + value := pdata.NewAttributeValueMap() + value.MapVal().UpsertString("keyOne", "valOne") + value.MapVal().UpsertString("keyTwo", "valTwo") + return value +} + +func constructTestAttributeSubarray() pdata.AttributeValue { + value := pdata.NewAttributeValueArray() + a1 := pdata.NewAttributeValueString("strOne") + value.ArrayVal().Append(a1) + a2 := pdata.NewAttributeValueString("strTwo") + value.ArrayVal().Append(a2) + return value +} diff --git a/internal/otel_collector/translator/trace/zipkin/attributekeys.go b/internal/otel_collector/translator/trace/zipkin/attributekeys.go new file mode 100644 index 00000000000..cf062a3eb8d --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/attributekeys.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +// These constants are the attribute keys used when translating from zipkin +// format to the internal collector data format. +const ( + LocalEndpointIPv4 = "ipv4" + LocalEndpointIPv6 = "ipv6" + LocalEndpointPort = "port" + LocalEndpointServiceName = "serviceName" + RemoteEndpointIPv4 = "zipkin.remoteEndpoint.ipv4" + RemoteEndpointIPv6 = "zipkin.remoteEndpoint.ipv6" + RemoteEndpointPort = "zipkin.remoteEndpoint.port" + RemoteEndpointServiceName = "zipkin.remoteEndpoint.serviceName" + StartTimeAbsent = "otel.zipkin.absentField.startTime" +) diff --git a/internal/otel_collector/translator/trace/zipkin/status_code.go b/internal/otel_collector/translator/trace/zipkin/status_code.go new file mode 100644 index 00000000000..c453749e918 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/status_code.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "fmt" + "math" + "strconv" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +type status struct { + codePtr *int32 + message string +} + +// statusMapper contains codes translated from different sources to OC status codes +type statusMapper struct { + // oc status code extracted from "status.code" tags + fromStatus status + // oc status code extracted from "census.status_code" tags + fromCensus status + // oc status code extracted from "http.status_code" tags + fromHTTP status + // oc status code extracted from "error" tags + fromErrorTag status + // oc status code 'unknown' when the "error" tag exists but is invalid + fromErrorTagUnknown status +} + +// ocStatus returns an OC status from the best possible extraction source. +// It'll first try to return status extracted from "census.status_code" to account for zipkin +// then fallback on code extracted from "status.code" tags +// and finally fallback on code extracted and translated from "http.status_code" +// ocStatus must be called after all tags/attributes are processed with the `fromAttribute` method. +func (m *statusMapper) ocStatus() *tracepb.Status { + var s status + switch { + case m.fromCensus.codePtr != nil: + s = m.fromCensus + case m.fromStatus.codePtr != nil: + s = m.fromStatus + case m.fromErrorTag.codePtr != nil: + s = m.fromErrorTag + if m.fromCensus.message != "" { + s.message = m.fromCensus.message + } else if m.fromStatus.message != "" { + s.message = m.fromStatus.message + } + case m.fromHTTP.codePtr != nil: + s = m.fromHTTP + default: + s = m.fromErrorTagUnknown + } + + if s.codePtr != nil { + code := int32(0) + if s.codePtr != nil { + code = *s.codePtr + } + return &tracepb.Status{ + Code: code, + Message: s.message, + } + } + return nil +} + +func (m *statusMapper) fromAttribute(key string, attrib *tracepb.AttributeValue) bool { + switch key { + case tracetranslator.TagZipkinCensusCode: + code, err := attribToStatusCode(attrib) + if err == nil { + m.fromCensus.codePtr = &code + } + return true + + case tracetranslator.TagZipkinCensusMsg, tracetranslator.TagZipkinOpenCensusMsg: + m.fromCensus.message = attrib.GetStringValue().GetValue() + return true + + case tracetranslator.TagStatusCode: + code, err := attribToStatusCode(attrib) + if err == nil { + m.fromStatus.codePtr = &code + } + return true + + case tracetranslator.TagStatusMsg: + m.fromStatus.message = attrib.GetStringValue().GetValue() + return true + + case tracetranslator.TagHTTPStatusCode: + httpCode, err := attribToStatusCode(attrib) + if err == nil { + code := tracetranslator.OCStatusCodeFromHTTP(httpCode) + m.fromHTTP.codePtr = &code + } + + case tracetranslator.TagHTTPStatusMsg: + m.fromHTTP.message = attrib.GetStringValue().GetValue() + + case tracetranslator.TagError: + code, ok := extractStatusFromError(attrib) + if ok { + m.fromErrorTag.codePtr = code + return true + } + m.fromErrorTagUnknown.codePtr = code + } + return false +} + +// attribToStatusCode maps an integer or string attribute value to a status code. +// The function return nil if the value is of another type or cannot be converted to an int32 value. +func attribToStatusCode(attr *tracepb.AttributeValue) (int32, error) { + if attr == nil { + return 0, fmt.Errorf("nil attribute") + } + + switch val := attr.Value.(type) { + case *tracepb.AttributeValue_IntValue: + return toInt32(int(val.IntValue)) + case *tracepb.AttributeValue_StringValue: + i, err := strconv.Atoi(val.StringValue.GetValue()) + if err != nil { + return 0, err + } + return toInt32(i) + } + return 0, fmt.Errorf("invalid attribute type") +} + +func toInt32(i int) (int32, error) { + if i <= math.MaxInt32 && i >= math.MinInt32 { + return int32(i), nil + } + return 0, fmt.Errorf("outside of the int32 range") +} + +func extractStatusFromError(attrib *tracepb.AttributeValue) (*int32, bool) { + // The status is stored with the "error" key + // See https://github.com/census-instrumentation/opencensus-go/blob/1eb9a13c7dd02141e065a665f6bf5c99a090a16a/exporter/zipkin/zipkin.go#L160-L165 + var unknown int32 = 2 + + switch val := attrib.Value.(type) { + case *tracepb.AttributeValue_StringValue: + canonicalCodeStr := val.StringValue.GetValue() + if canonicalCodeStr == "" { + return nil, true + } + code, set := canonicalCodesMap[canonicalCodeStr] + if set { + return &code, true + } + default: + break + } + + return &unknown, false +} + +var canonicalCodesMap = map[string]int32{ + // https://github.com/googleapis/googleapis/blob/bee79fbe03254a35db125dc6d2f1e9b752b390fe/google/rpc/code.proto#L33-L186 + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + "UNAUTHENTICATED": 16, +} diff --git a/internal/otel_collector/translator/trace/zipkin/status_code_test.go b/internal/otel_collector/translator/trace/zipkin/status_code_test.go new file mode 100644 index 00000000000..c568dd34b1d --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/status_code_test.go @@ -0,0 +1,277 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "fmt" + "strconv" + "testing" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/stretchr/testify/assert" +) + +func TestAttribToStatusCode(t *testing.T) { + _, atoiError := strconv.Atoi("nan") + + tests := []struct { + name string + attr *tracepb.AttributeValue + code int32 + err error + }{ + { + name: "nil", + attr: nil, + code: 0, + err: fmt.Errorf("nil attribute"), + }, + + { + name: "valid-int-code", + attr: &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_IntValue{IntValue: int64(0)}, + }, + code: 0, + err: nil, + }, + + { + name: "invalid-int-code", + attr: &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_IntValue{IntValue: int64(1 << 32)}, + }, + code: 0, + err: fmt.Errorf("outside of the int32 range"), + }, + + { + name: "valid-string-code", + attr: &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{StringValue: &tracepb.TruncatableString{Value: "200"}}, + }, + code: 200, + err: nil, + }, + + { + name: "invalid-string-code", + attr: &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{StringValue: &tracepb.TruncatableString{Value: "nan"}}, + }, + code: 0, + err: atoiError, + }, + + { + name: "bool-code", + attr: &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_BoolValue{BoolValue: true}, + }, + code: 0, + err: fmt.Errorf("invalid attribute type"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := attribToStatusCode(test.attr) + assert.Equal(t, test.code, got) + assert.Equal(t, test.err, err) + }) + } +} + +func TestStatusCodeMapperCases(t *testing.T) { + tests := []struct { + name string + expected *tracepb.Status + attributes map[string]string + }{ + { + name: "no relevant attributes", + expected: nil, + attributes: map[string]string{ + "not.relevant": "data", + }, + }, + + { + name: "http: 500", + expected: &tracepb.Status{Code: 13}, + attributes: map[string]string{ + "http.status_code": "500", + }, + }, + + { + name: "http: message only, nil", + expected: nil, + attributes: map[string]string{ + "http.status_message": "something", + }, + }, + + { + name: "http: 500", + expected: &tracepb.Status{Code: 13, Message: "a message"}, + attributes: map[string]string{ + "http.status_code": "500", + "http.status_message": "a message", + }, + }, + + { + name: "http: 500, with error attribute", + expected: &tracepb.Status{Code: 13}, + attributes: map[string]string{ + "http.status_code": "500", + "error": "an error occurred", + }, + }, + + { + name: "oc: internal", + expected: &tracepb.Status{Code: 13, Message: "a description"}, + attributes: map[string]string{ + "census.status_code": "13", + "census.status_description": "a description", + }, + }, + + { + name: "oc: description and error", + expected: &tracepb.Status{Code: 13, Message: "a description"}, + attributes: map[string]string{ + "opencensus.status_description": "a description", + "error": "INTERNAL", + }, + }, + + { + name: "oc: error only", + expected: &tracepb.Status{Code: 13, Message: ""}, + attributes: map[string]string{ + "error": "INTERNAL", + }, + }, + + { + name: "oc: empty error tag", + expected: nil, + attributes: map[string]string{ + "error": "", + }, + }, + + { + name: "oc: description only, no status", + expected: nil, + attributes: map[string]string{ + "opencensus.status_description": "a description", + }, + }, + + { + name: "oc: priority over http", + expected: &tracepb.Status{Code: 4, Message: "deadline expired"}, + attributes: map[string]string{ + "census.status_description": "deadline expired", + "census.status_code": "4", + + "http.status_message": "a description", + "http.status_code": "500", + }, + }, + + { + name: "error: valid oc status priority over http", + expected: &tracepb.Status{Code: 4}, + attributes: map[string]string{ + "error": "DEADLINE_EXCEEDED", + + "http.status_message": "a description", + "http.status_code": "500", + }, + }, + + { + name: "error: invalid oc status uses http", + expected: &tracepb.Status{Code: 13, Message: "a description"}, + attributes: map[string]string{ + "error": "123", + + "http.status_message": "a description", + "http.status_code": "500", + }, + }, + + { + name: "error only: string description", + expected: &tracepb.Status{Code: 2}, + attributes: map[string]string{ + "error": "a description", + }, + }, + + { + name: "error only: true", + expected: &tracepb.Status{Code: 2}, + attributes: map[string]string{ + "error": "true", + }, + }, + + { + name: "error only: false", + expected: &tracepb.Status{Code: 2}, + attributes: map[string]string{ + "error": "false", + }, + }, + + { + name: "error only: 1", + expected: &tracepb.Status{Code: 2}, + attributes: map[string]string{ + "error": "1", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + attributes := attributesFromMap(test.attributes) + + sMapper := &statusMapper{} + for k, v := range attributes { + sMapper.fromAttribute(k, v) + } + + got := sMapper.ocStatus() + assert.EqualValues(t, test.expected, got) + }) + } +} + +func attributesFromMap(mapValues map[string]string) map[string]*tracepb.AttributeValue { + res := map[string]*tracepb.AttributeValue{} + + for k, v := range mapValues { + pbAttrib := parseAnnotationValue(v, false) + res[k] = pbAttrib + } + return res +} diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_error_batch.json b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_error_batch.json new file mode 100644 index 00000000000..372289ee72a --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_error_batch.json @@ -0,0 +1,64 @@ +[ + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkAvailability", + "id": "0ed2e63cbe71f5a8", + "annotations": [ + { + "timestamp": 1544805927448081, + "value": "sr", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927450000, + "value": "custom time event", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927460102, + "value": "ss", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + } + ] + }, + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkStock", + "id": "f9ebb6e64880612a", + "parentId": "BADID", + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [ + { + "timestamp": 1544805927453923, + "value": "cs", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927457717, + "value": "cr", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + } + ] + } +] diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_local_component.json b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_local_component.json new file mode 100644 index 00000000000..10114470553 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_local_component.json @@ -0,0 +1,37 @@ +[ + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkStock", + "id": "fe351a053fbcac1f", + "parentId": "0ed2e63cbe71f5a8", + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [], + "binaryAnnotations": [ + { + "key": "lc", + "value": "myLocalComponent" + } + ] + }, + { + "traceId": "0ed2e63cbe71f5a8", + "name": "sendOrder", + "id": "fe351a053fbcac2f", + "parentId": "0ed2e63cbe71f5a8", + "timestamp": 1544805927453925, + "duration": 3740, + "annotations": [], + "binaryAnnotations": [ + { + "key": "lc", + "value": "myLocalComponent", + "endpoint": { + "ipv4": "172.31.0.7", + "port": 0, + "serviceName": "myServiceName" + } + } + ] + } +] diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_multiple_batches.json b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_multiple_batches.json new file mode 100644 index 00000000000..8e1b58f2032 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_multiple_batches.json @@ -0,0 +1,154 @@ +[ + [ + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkAvailability", + "id": "0ed2e63cbe71f5a8", + "annotations": [ + { + "timestamp": 1544805927448081, + "value": "sr", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927450000, + "value": "custom time event", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927460102, + "value": "ss", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + } + ] + } + ], + [ + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkStock", + "id": "f9ebb6e64880612a", + "parentId": "0ed2e63cbe71f5a8", + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [ + { + "timestamp": 1544805927453923, + "value": "cs", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927457717, + "value": "cr", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + } + ] + } + ], + [ + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkAvailability", + "id": "0ed2e63cbe71f5a8", + "timestamp": 1544805927446743, + "duration": 12956, + "annotations": [ + { + "timestamp": 1544805927446743, + "value": "cs", + "endpoint": { + "ipv4": "172.31.0.2", + "port": 0, + "serviceName": "front-proxy" + } + }, + { + "timestamp": 1544805927460510, + "value": "cr", + "endpoint": { + "ipv4": "172.31.0.2", + "port": 0, + "serviceName": "front-proxy" + } + } + ] + } + ], + [ + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkStock", + "id": "f9ebb6e64880612a", + "parentId": "0ed2e63cbe71f5a8", + "annotations": [ + { + "timestamp": 1544805927454487, + "value": "sr", + "endpoint": { + "ipv4": "172.31.0.7", + "port": 0, + "serviceName": "service2" + } + }, + { + "timestamp": 1544805927457320, + "value": "ss", + "endpoint": { + "ipv4": "172.31.0.7", + "port": 0, + "serviceName": "service2" + } + } + ], + "binaryAnnotations": [ + { + "key": "http.url", + "value": "http://localhost:9000/trace/2" + }, + { + "key": "http.status_code", + "value": "200" + }, + { + "key": "success", + "value": "true" + }, + { + "key": "processed", + "value": "1.5" + } + ] + } + ], + [ + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkStock", + "id": "fe351a053fbcac1f", + "parentId": "0ed2e63cbe71f5a8", + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [] + } + ] +] diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_single_batch.json b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_single_batch.json new file mode 100644 index 00000000000..c446e320952 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_single_batch.json @@ -0,0 +1,144 @@ +[ + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkAvailability", + "id": "0ed2e63cbe71f5a8", + "annotations": [ + { + "timestamp": 1544805927448081, + "value": "sr", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927450000, + "value": "custom time event", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927460102, + "value": "ss", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + } + ] + }, + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkStock", + "id": "f9ebb6e64880612a", + "parentId": "0ed2e63cbe71f5a8", + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [ + { + "timestamp": 1544805927453923, + "value": "cs", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + }, + { + "timestamp": 1544805927457717, + "value": "cr", + "endpoint": { + "ipv4": "172.31.0.4", + "port": 0, + "serviceName": "service1" + } + } + ] + }, + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkAvailability", + "id": "0ed2e63cbe71f5a8", + "timestamp": 1544805927446743, + "duration": 12956, + "annotations": [ + { + "timestamp": 1544805927446743, + "value": "cs", + "endpoint": { + "ipv4": "172.31.0.2", + "port": 0, + "serviceName": "front-proxy" + } + }, + { + "timestamp": 1544805927460510, + "value": "cr", + "endpoint": { + "ipv4": "172.31.0.2", + "port": 0, + "serviceName": "front-proxy" + } + } + ] + }, + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkStock", + "id": "f9ebb6e64880612a", + "parentId": "0ed2e63cbe71f5a8", + "annotations": [ + { + "timestamp": 1544805927454487, + "value": "sr", + "endpoint": { + "ipv4": "172.31.0.7", + "port": 0, + "serviceName": "service2" + } + }, + { + "timestamp": 1544805927457320, + "value": "ss", + "endpoint": { + "ipv4": "172.31.0.7", + "port": 0, + "serviceName": "service2" + } + } + ], + "binaryAnnotations": [ + { + "key": "http.url", + "value": "http://localhost:9000/trace/2" + }, + { + "key": "http.status_code", + "value": "200" + }, + { + "key": "success", + "value": "true" + }, + { + "key": "processed", + "value": "1.5" + } + ] + }, + { + "traceId": "0ed2e63cbe71f5a8", + "name": "checkStock", + "id": "fe351a053fbcac1f", + "parentId": "0ed2e63cbe71f5a8", + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [] + } +] diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_local_component.json b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_local_component.json new file mode 100644 index 00000000000..d6f099dbe43 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_local_component.json @@ -0,0 +1,39 @@ +[ + { + "trace_id": 1068169210207794600, + "name": "checkStock", + "id": -129168404463703009, + "parent_id": 1068169210207794600, + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [], + "binary_annotations": [ + { + "key": "lc", + "annotation_type": "STRING", + "value": "bXlMb2NhbENvbXBvbmVudA==" + } + ] + }, + { + "trace_id": 1068169210207794600, + "name": "sendOrder", + "id": -129168404463703007, + "parent_id": 1068169210207794600, + "timestamp": 1544805927453925, + "duration": 3740, + "annotations": [], + "binary_annotations": [ + { + "key": "lc", + "annotation_type": "STRING", + "value": "bXlMb2NhbENvbXBvbmVudA==", + "host": { + "ipv4": -1407254521, + "port": 0, + "service_name": "myServiceName" + } + } + ] + } +] diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_single_batch.json b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_single_batch.json new file mode 100644 index 00000000000..6bbd0c8778c --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_single_batch.json @@ -0,0 +1,148 @@ +[ + { + "trace_id": 1068169210207794600, + "name": "checkAvailability", + "id": 1068169210207794600, + "annotations": [ + { + "timestamp": 1544805927448081, + "value": "sr", + "host": { + "ipv4": -1407254524, + "port": 0, + "service_name": "service1" + } + }, + { + "timestamp": 1544805927450000, + "value": "custom time event", + "host": { + "ipv4": -1407254524, + "port": 0, + "service_name": "service1" + } + }, + { + "timestamp": 1544805927460102, + "value": "ss", + "host": { + "ipv4": -1407254524, + "port": 0, + "service_name": "service1" + } + } + ] + }, + { + "trace_id": 1068169210207794600, + "name": "checkStock", + "id": -438055438563385046, + "parent_id": 1068169210207794600, + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [ + { + "timestamp": 1544805927453923, + "value": "cs", + "host": { + "ipv4": -1407254524, + "port": 0, + "service_name": "service1" + } + }, + { + "timestamp": 1544805927457717, + "value": "cr", + "host": { + "ipv4": -1407254524, + "port": 0, + "service_name": "service1" + } + } + ] + }, + { + "trace_id": 1068169210207794600, + "name": "checkAvailability", + "id": 1068169210207794600, + "timestamp": 1544805927446743, + "duration": 12956, + "annotations": [ + { + "timestamp": 1544805927446743, + "value": "cs", + "host": { + "ipv4": -1407254526, + "port": 0, + "service_name": "front-proxy" + } + }, + { + "timestamp": 1544805927460510, + "value": "cr", + "host": { + "ipv4": -1407254526, + "port": 0, + "service_name": "front-proxy" + } + } + ] + }, + { + "trace_id": 1068169210207794600, + "name": "checkStock", + "id": -438055438563385046, + "parent_id": 1068169210207794600, + "annotations": [ + { + "timestamp": 1544805927454487, + "value": "sr", + "host": { + "ipv4": -1407254521, + "port": 0, + "service_name": "service2" + } + }, + { + "timestamp": 1544805927457320, + "value": "ss", + "host": { + "ipv4": -1407254521, + "port": 0, + "service_name": "service2" + } + } + ], + "binary_annotations": [ + { + "key": "http.url", + "annotation_type": "STRING", + "value": "aHR0cDovL2xvY2FsaG9zdDo5MDAwL3RyYWNlLzI=" + }, + { + "key": "http.status_code", + "annotation_type": "I64", + "value": "AAAAAAAAAMgAAA==" + }, + { + "key": "success", + "annotation_type": "BOOL", + "value": "AQ==" + }, + { + "key": "processed", + "annotation_type": "DOUBLE", + "value": "P/gAAAAAAAA=" + } + ] + }, + { + "trace_id": 1068169210207794600, + "name": "checkStock", + "id": -129168404463703009, + "parent_id": 1068169210207794600, + "timestamp": 1544805927453923, + "duration": 3740, + "annotations": [] + } +] diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v2_single.json b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v2_single.json new file mode 100644 index 00000000000..875eda1a0c5 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v2_single.json @@ -0,0 +1,38 @@ +[ + { + "traceId": "4d1e00c0db9010db86154a4ba6e91385", + "parentId": "86154a4ba6e91385", + "id": "4d1e00c0db9010db", + "kind": "CLIENT", + "name": "get", + "timestamp": 1472470996199000, + "duration": 207000, + "localEndpoint": { + "serviceName": "frontend", + "ipv6": "7::0.128.128.127" + }, + "remoteEndpoint": { + "serviceName": "backend", + "ipv4": "192.168.99.101", + "port": 9000 + }, + "annotations": [ + { + "timestamp": 1472470996238000, + "value": "foo" + }, + { + "timestamp": 1472470996403000, + "value": "bar" + } + ], + "tags": { + "http.path": "/api", + "http.status_code": "500", + "cache_hit": "true", + "ping_count": "25", + "timeout": "12.3", + "clnt/finagle.version": "6.45.0" + } + } +] diff --git a/internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2.go b/internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2.go new file mode 100644 index 00000000000..ad263129387 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2.go @@ -0,0 +1,351 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "strconv" + "time" + + zipkinmodel "github.com/openzipkin/zipkin-go/model" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +var sampled = true + +// InternalTracesToZipkinSpans translates internal trace data into Zipkin v2 spans. +// Returns a slice of Zipkin SpanModel's. +func InternalTracesToZipkinSpans(td pdata.Traces) ([]*zipkinmodel.SpanModel, error) { + + resourceSpans := td.ResourceSpans() + if resourceSpans.Len() == 0 { + return nil, nil + } + + zSpans := make([]*zipkinmodel.SpanModel, 0, td.SpanCount()) + + for i := 0; i < resourceSpans.Len(); i++ { + batch, err := resourceSpansToZipkinSpans(resourceSpans.At(i), td.SpanCount()/resourceSpans.Len()) + if err != nil { + return zSpans, err + } + if batch != nil { + zSpans = append(zSpans, batch...) + } + } + + return zSpans, nil +} + +func resourceSpansToZipkinSpans(rs pdata.ResourceSpans, estSpanCount int) ([]*zipkinmodel.SpanModel, error) { + resource := rs.Resource() + ilss := rs.InstrumentationLibrarySpans() + + if resource.Attributes().Len() == 0 && ilss.Len() == 0 { + return nil, nil + } + + localServiceName, zTags := resourceToZipkinEndpointServiceNameAndAttributeMap(resource) + + zSpans := make([]*zipkinmodel.SpanModel, 0, estSpanCount) + for i := 0; i < ilss.Len(); i++ { + ils := ilss.At(i) + extractInstrumentationLibraryTags(ils.InstrumentationLibrary(), zTags) + spans := ils.Spans() + for j := 0; j < spans.Len(); j++ { + zSpan, err := spanToZipkinSpan(spans.At(j), localServiceName, zTags) + if err != nil { + return zSpans, err + } + zSpans = append(zSpans, zSpan) + } + } + + return zSpans, nil +} + +func extractInstrumentationLibraryTags(il pdata.InstrumentationLibrary, zTags map[string]string) { + if ilName := il.Name(); ilName != "" { + zTags[tracetranslator.TagInstrumentationName] = ilName + } + if ilVer := il.Version(); ilVer != "" { + zTags[tracetranslator.TagInstrumentationVersion] = ilVer + } +} + +func spanToZipkinSpan( + span pdata.Span, + localServiceName string, + zTags map[string]string, +) (*zipkinmodel.SpanModel, error) { + + tags := aggregateSpanTags(span, zTags) + + zs := &zipkinmodel.SpanModel{} + + if !span.TraceID().IsValid() { + return zs, errors.New("TraceID is invalid") + } + zs.TraceID = convertTraceID(span.TraceID()) + if !span.SpanID().IsValid() { + return zs, errors.New("SpanID is invalid") + } + zs.ID = convertSpanID(span.SpanID()) + + if len(span.TraceState()) > 0 { + tags[tracetranslator.TagW3CTraceState] = string(span.TraceState()) + } + + if span.ParentSpanID().IsValid() { + id := convertSpanID(span.ParentSpanID()) + zs.ParentID = &id + } + + zs.Sampled = &sampled + zs.Name = span.Name() + zs.Timestamp = pdata.UnixNanoToTime(span.StartTime()) + if span.EndTime() != 0 { + zs.Duration = time.Duration(span.EndTime() - span.StartTime()) + } + zs.Kind = spanKindToZipkinKind(span.Kind()) + if span.Kind() == pdata.SpanKindINTERNAL { + tags[tracetranslator.TagSpanKind] = "internal" + } + + redundantKeys := make(map[string]bool, 8) + zs.LocalEndpoint = zipkinEndpointFromTags(tags, localServiceName, false, redundantKeys) + zs.RemoteEndpoint = zipkinEndpointFromTags(tags, "", true, redundantKeys) + + removeRedundentTags(redundantKeys, tags) + + status := span.Status() + tags[tracetranslator.TagStatusCode] = status.Code().String() + if status.Message() != "" { + tags[tracetranslator.TagStatusMsg] = status.Message() + if int32(status.Code()) > 0 { + zs.Err = fmt.Errorf("%s", status.Message()) + } + } + + if err := spanEventsToZipkinAnnotations(span.Events(), zs); err != nil { + return nil, err + } + if err := spanLinksToZipkinTags(span.Links(), tags); err != nil { + return nil, err + } + + zs.Tags = tags + + return zs, nil +} + +func aggregateSpanTags(span pdata.Span, zTags map[string]string) map[string]string { + tags := make(map[string]string) + for key, val := range zTags { + tags[key] = val + } + spanTags := attributeMapToStringMap(span.Attributes()) + for key, val := range spanTags { + tags[key] = val + } + return tags +} + +func spanEventsToZipkinAnnotations(events pdata.SpanEventSlice, zs *zipkinmodel.SpanModel) error { + if events.Len() > 0 { + zAnnos := make([]zipkinmodel.Annotation, events.Len()) + for i := 0; i < events.Len(); i++ { + event := events.At(i) + if event.Attributes().Len() == 0 && event.DroppedAttributesCount() == 0 { + zAnnos[i] = zipkinmodel.Annotation{ + Timestamp: pdata.UnixNanoToTime(event.Timestamp()), + Value: event.Name(), + } + } else { + jsonStr, err := json.Marshal(tracetranslator.AttributeMapToMap(event.Attributes())) + if err != nil { + return err + } + zAnnos[i] = zipkinmodel.Annotation{ + Timestamp: pdata.UnixNanoToTime(event.Timestamp()), + Value: fmt.Sprintf(tracetranslator.SpanEventDataFormat, event.Name(), jsonStr, + event.DroppedAttributesCount()), + } + } + } + zs.Annotations = zAnnos + } + return nil +} + +func spanLinksToZipkinTags(links pdata.SpanLinkSlice, zTags map[string]string) error { + for i := 0; i < links.Len(); i++ { + link := links.At(i) + key := fmt.Sprintf("otlp.link.%d", i) + jsonStr, err := json.Marshal(tracetranslator.AttributeMapToMap(link.Attributes())) + if err != nil { + return err + } + zTags[key] = fmt.Sprintf(tracetranslator.SpanLinkDataFormat, link.TraceID().HexString(), + link.SpanID().HexString(), link.TraceState(), jsonStr, link.DroppedAttributesCount()) + } + return nil +} + +func attributeMapToStringMap(attrMap pdata.AttributeMap) map[string]string { + rawMap := make(map[string]string) + attrMap.ForEach(func(k string, v pdata.AttributeValue) { + rawMap[k] = tracetranslator.AttributeValueToString(v, false) + }) + return rawMap +} + +func removeRedundentTags(redundantKeys map[string]bool, zTags map[string]string) { + for k, v := range redundantKeys { + if v { + delete(zTags, k) + } + } +} + +func resourceToZipkinEndpointServiceNameAndAttributeMap( + resource pdata.Resource, +) (serviceName string, zTags map[string]string) { + zTags = make(map[string]string) + attrs := resource.Attributes() + if attrs.Len() == 0 { + return tracetranslator.ResourceNoServiceName, zTags + } + + attrs.ForEach(func(k string, v pdata.AttributeValue) { + zTags[k] = tracetranslator.AttributeValueToString(v, false) + }) + + serviceName = extractZipkinServiceName(zTags) + return serviceName, zTags +} + +func extractZipkinServiceName(zTags map[string]string) string { + var serviceName string + if sn, ok := zTags[conventions.AttributeServiceName]; ok { + serviceName = sn + delete(zTags, conventions.AttributeServiceName) + } else if fn, ok := zTags[conventions.AttributeFaasName]; ok { + serviceName = fn + delete(zTags, conventions.AttributeFaasName) + zTags[tracetranslator.TagServiceNameSource] = conventions.AttributeFaasName + } else if fn, ok := zTags[conventions.AttributeK8sDeployment]; ok { + serviceName = fn + delete(zTags, conventions.AttributeK8sDeployment) + zTags[tracetranslator.TagServiceNameSource] = conventions.AttributeK8sDeployment + } else if fn, ok := zTags[conventions.AttributeProcessExecutableName]; ok { + serviceName = fn + delete(zTags, conventions.AttributeProcessExecutableName) + zTags[tracetranslator.TagServiceNameSource] = conventions.AttributeProcessExecutableName + } else { + serviceName = tracetranslator.ResourceNoServiceName + } + return serviceName +} + +func spanKindToZipkinKind(kind pdata.SpanKind) zipkinmodel.Kind { + switch kind { + case pdata.SpanKindCLIENT: + return zipkinmodel.Client + case pdata.SpanKindSERVER: + return zipkinmodel.Server + case pdata.SpanKindPRODUCER: + return zipkinmodel.Producer + case pdata.SpanKindCONSUMER: + return zipkinmodel.Consumer + default: + return zipkinmodel.Undetermined + } +} + +func zipkinEndpointFromTags( + zTags map[string]string, + localServiceName string, + remoteEndpoint bool, + redundantKeys map[string]bool, +) (endpoint *zipkinmodel.Endpoint) { + + serviceName := localServiceName + if peerSvc, ok := zTags[conventions.AttributePeerService]; ok && remoteEndpoint { + serviceName = peerSvc + redundantKeys[conventions.AttributePeerService] = true + } + + var ipKey, portKey string + if remoteEndpoint { + ipKey, portKey = conventions.AttributeNetPeerIP, conventions.AttributeNetPeerPort + } else { + ipKey, portKey = conventions.AttributeNetHostIP, conventions.AttributeNetHostPort + } + + var ip net.IP + ipv6Selected := false + if ipStr, ok := zTags[ipKey]; ok { + ipv6Selected = isIPv6Address(ipStr) + ip = net.ParseIP(ipStr) + redundantKeys[ipKey] = true + } + + var port uint64 + if portStr, ok := zTags[portKey]; ok { + port, _ = strconv.ParseUint(portStr, 10, 16) + redundantKeys[portKey] = true + } + + if serviceName == "" && ip == nil { + return nil + } + + zEndpoint := &zipkinmodel.Endpoint{ + ServiceName: serviceName, + Port: uint16(port), + } + if ipv6Selected { + zEndpoint.IPv6 = ip + } else { + zEndpoint.IPv4 = ip + } + + return zEndpoint +} + +func isIPv6Address(ipStr string) bool { + for i := 0; i < len(ipStr); i++ { + if ipStr[i] == ':' { + return true + } + } + return false +} + +func convertTraceID(t pdata.TraceID) zipkinmodel.TraceID { + h, l := tracetranslator.TraceIDToUInt64Pair(t) + return zipkinmodel.TraceID{High: h, Low: l} +} + +func convertSpanID(s pdata.SpanID) zipkinmodel.ID { + return zipkinmodel.ID(tracetranslator.BytesToUInt64SpanID(s.Bytes())) +} diff --git a/internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2_test.go b/internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2_test.go new file mode 100644 index 00000000000..b3d5d66fb7b --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2_test.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "errors" + "io" + "math/rand" + "testing" + + zipkinmodel "github.com/openzipkin/zipkin-go/model" + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/internal/goldendataset" + "go.opentelemetry.io/collector/internal/testdata" +) + +func TestInternalTracesToZipkinSpans(t *testing.T) { + tests := []struct { + name string + td pdata.Traces + zs []*zipkinmodel.SpanModel + err error + }{ + { + name: "empty", + td: testdata.GenerateTraceDataEmpty(), + err: nil, + }, + { + name: "oneEmpty", + td: testdata.GenerateTraceDataOneEmptyResourceSpans(), + zs: make([]*zipkinmodel.SpanModel, 0), + err: nil, + }, + { + name: "noLibs", + td: testdata.GenerateTraceDataNoLibraries(), + zs: make([]*zipkinmodel.SpanModel, 0), + err: nil, + }, + { + name: "oneEmptyLib", + td: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + zs: make([]*zipkinmodel.SpanModel, 0), + err: nil, + }, + { + name: "oneSpanNoResrouce", + td: testdata.GenerateTraceDataOneSpanNoResource(), + zs: make([]*zipkinmodel.SpanModel, 0), + err: errors.New("TraceID is invalid"), + }, + { + name: "oneSpan", + td: generateTraceOneSpanOneTraceID(), + zs: []*zipkinmodel.SpanModel{zipkinOneSpan()}, + err: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + zss, err := InternalTracesToZipkinSpans(test.td) + assert.EqualValues(t, test.err, err) + if test.name == "empty" { + assert.Nil(t, zss) + } else { + assert.Equal(t, len(test.zs), len(zss)) + assert.EqualValues(t, test.zs, zss) + } + }) + } +} + +func TestInternalTracesToZipkinSpansAndBack(t *testing.T) { + rscSpans, err := goldendataset.GenerateResourceSpans( + "../../../internal/goldendataset/testdata/generated_pict_pairs_traces.txt", + "../../../internal/goldendataset/testdata/generated_pict_pairs_spans.txt", + io.Reader(rand.New(rand.NewSource(2004)))) + assert.NoError(t, err) + for _, rs := range rscSpans { + orig := make([]*otlptrace.ResourceSpans, 1) + orig[0] = rs + td := pdata.TracesFromOtlp(orig) + zipkinSpans, err := InternalTracesToZipkinSpans(td) + assert.NoError(t, err) + assert.Equal(t, td.SpanCount(), len(zipkinSpans)) + tdFromZS, zErr := V2SpansToInternalTraces(zipkinSpans, false) + assert.NoError(t, zErr, zipkinSpans) + assert.NotNil(t, tdFromZS) + assert.Equal(t, td.SpanCount(), tdFromZS.SpanCount()) + } +} + +func generateTraceOneSpanOneTraceID() pdata.Traces { + td := testdata.GenerateTraceDataOneSpan() + span := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) + span.SetTraceID(pdata.NewTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10})) + span.SetSpanID(pdata.NewSpanID([8]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08})) + return td +} + +func zipkinOneSpan() *zipkinmodel.SpanModel { + trueBool := true + return &zipkinmodel.SpanModel{ + SpanContext: zipkinmodel.SpanContext{ + TraceID: zipkinmodel.TraceID{High: 72623859790382856, Low: 651345242494996240}, + ID: 72623859790382856, + ParentID: nil, + Debug: false, + Sampled: &trueBool, + Err: errors.New("status-cancelled"), + }, + LocalEndpoint: &zipkinmodel.Endpoint{ + ServiceName: "OTLPResourceNoServiceName", + }, + RemoteEndpoint: nil, + Annotations: []zipkinmodel.Annotation{ + { + Timestamp: testdata.TestSpanEventTime, + Value: "event-with-attr|{\"span-event-attr\":\"span-event-attr-val\"}|2", + }, + { + Timestamp: testdata.TestSpanEventTime, + Value: "event|{}|2", + }, + }, + Tags: map[string]string{ + "resource-attr": "resource-attr-val-1", + "status.code": "STATUS_CODE_ERROR", + "status.message": "status-cancelled", + }, + Name: "operationA", + Timestamp: testdata.TestSpanStartTime, + Duration: 1000000468, + Shared: false, + } +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan.go new file mode 100644 index 00000000000..b1dbb23dc2c --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan.go @@ -0,0 +1,267 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "math" + "net" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +// v1ThriftBatchToOCProto converts Zipkin v1 spans to OC Proto. +func v1ThriftBatchToOCProto(zSpans []*zipkincore.Span) ([]consumerdata.TraceData, error) { + ocSpansAndParsedAnnotations := make([]ocSpanAndParsedAnnotations, 0, len(zSpans)) + for _, zSpan := range zSpans { + ocSpan, parsedAnnotations := zipkinV1ThriftToOCSpan(zSpan) + ocSpansAndParsedAnnotations = append(ocSpansAndParsedAnnotations, ocSpanAndParsedAnnotations{ + ocSpan: ocSpan, + parsedAnnotations: parsedAnnotations, + }) + } + + return zipkinToOCProtoBatch(ocSpansAndParsedAnnotations) +} + +func zipkinV1ThriftToOCSpan(zSpan *zipkincore.Span) (*tracepb.Span, *annotationParseResult) { + traceIDHigh := int64(0) + if zSpan.TraceIDHigh != nil { + traceIDHigh = *zSpan.TraceIDHigh + } + + // TODO: (@pjanotti) ideally we should error here instead of generating invalid OC proto + // however per https://go.opentelemetry.io/collector/issues/349 + // failures on the receivers in general are silent at this moment, so letting them + // proceed for now. We should validate the traceID, spanID and parentID are good with + // OC proto requirements. + traceID := tracetranslator.Int64ToByteTraceID(traceIDHigh, zSpan.TraceID) + spanID := tracetranslator.Int64ToByteSpanID(zSpan.ID) + var parentID []byte + if zSpan.ParentID != nil { + parentIDBytes := tracetranslator.Int64ToByteSpanID(*zSpan.ParentID) + parentID = parentIDBytes[:] + } + + parsedAnnotations := parseZipkinV1ThriftAnnotations(zSpan.Annotations) + attributes, ocStatus, localComponent := zipkinV1ThriftBinAnnotationsToOCAttributes(zSpan.BinaryAnnotations) + if parsedAnnotations.Endpoint.ServiceName == unknownServiceName && localComponent != "" { + parsedAnnotations.Endpoint.ServiceName = localComponent + } + + var startTime, endTime *timestamppb.Timestamp + if zSpan.Timestamp == nil { + startTime = parsedAnnotations.EarlyAnnotationTime + endTime = parsedAnnotations.LateAnnotationTime + } else { + startTime = epochMicrosecondsToTimestamp(*zSpan.Timestamp) + var duration int64 + if zSpan.Duration != nil { + duration = *zSpan.Duration + } + endTime = epochMicrosecondsToTimestamp(*zSpan.Timestamp + duration) + } + + ocSpan := &tracepb.Span{ + TraceId: traceID[:], + SpanId: spanID[:], + ParentSpanId: parentID, + Status: ocStatus, + Kind: parsedAnnotations.Kind, + TimeEvents: parsedAnnotations.TimeEvents, + StartTime: startTime, + EndTime: endTime, + Attributes: attributes, + } + + if zSpan.Name != "" { + ocSpan.Name = &tracepb.TruncatableString{Value: zSpan.Name} + } + + setSpanKind(ocSpan, parsedAnnotations.Kind, parsedAnnotations.ExtendedKind) + + return ocSpan, parsedAnnotations +} + +func parseZipkinV1ThriftAnnotations(ztAnnotations []*zipkincore.Annotation) *annotationParseResult { + annotations := make([]*annotation, 0, len(ztAnnotations)) + for _, ztAnnot := range ztAnnotations { + annot := &annotation{ + Timestamp: ztAnnot.Timestamp, + Value: ztAnnot.Value, + Endpoint: toTranslatorEndpoint(ztAnnot.Host), + } + annotations = append(annotations, annot) + } + return parseZipkinV1Annotations(annotations) +} + +func toTranslatorEndpoint(e *zipkincore.Endpoint) *endpoint { + if e == nil { + return nil + } + + var ipv4, ipv6 string + if e.Ipv4 != 0 { + ipv4 = net.IPv4(byte(e.Ipv4>>24), byte(e.Ipv4>>16), byte(e.Ipv4>>8), byte(e.Ipv4)).String() + } + if len(e.Ipv6) != 0 { + ipv6 = net.IP(e.Ipv6).String() + } + return &endpoint{ + ServiceName: e.ServiceName, + IPv4: ipv4, + IPv6: ipv6, + Port: int32(e.Port), + } +} + +var trueByteSlice = []byte{1} + +func zipkinV1ThriftBinAnnotationsToOCAttributes(ztBinAnnotations []*zipkincore.BinaryAnnotation) (attributes *tracepb.Span_Attributes, status *tracepb.Status, fallbackServiceName string) { + if len(ztBinAnnotations) == 0 { + return nil, nil, "" + } + + sMapper := &statusMapper{} + var localComponent string + attributeMap := make(map[string]*tracepb.AttributeValue) + for _, binaryAnnotation := range ztBinAnnotations { + pbAttrib := &tracepb.AttributeValue{} + binAnnotationType := binaryAnnotation.AnnotationType + if binaryAnnotation.Host != nil { + fallbackServiceName = binaryAnnotation.Host.ServiceName + } + switch binaryAnnotation.AnnotationType { + case zipkincore.AnnotationType_BOOL: + isTrue := bytes.Equal(binaryAnnotation.Value, trueByteSlice) + pbAttrib.Value = &tracepb.AttributeValue_BoolValue{BoolValue: isTrue} + case zipkincore.AnnotationType_BYTES: + bytesStr := base64.StdEncoding.EncodeToString(binaryAnnotation.Value) + pbAttrib.Value = &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: bytesStr}} + case zipkincore.AnnotationType_DOUBLE: + if d, err := bytesFloat64ToFloat64(binaryAnnotation.Value); err != nil { + pbAttrib.Value = strAttributeForError(err) + } else { + pbAttrib.Value = &tracepb.AttributeValue_DoubleValue{DoubleValue: d} + } + case zipkincore.AnnotationType_I16: + if i, err := bytesInt16ToInt64(binaryAnnotation.Value); err != nil { + pbAttrib.Value = strAttributeForError(err) + } else { + pbAttrib.Value = &tracepb.AttributeValue_IntValue{IntValue: i} + } + case zipkincore.AnnotationType_I32: + if i, err := bytesInt32ToInt64(binaryAnnotation.Value); err != nil { + pbAttrib.Value = strAttributeForError(err) + } else { + pbAttrib.Value = &tracepb.AttributeValue_IntValue{IntValue: i} + } + case zipkincore.AnnotationType_I64: + if i, err := bytesInt64ToInt64(binaryAnnotation.Value); err != nil { + pbAttrib.Value = strAttributeForError(err) + } else { + pbAttrib.Value = &tracepb.AttributeValue_IntValue{IntValue: i} + } + case zipkincore.AnnotationType_STRING: + pbAttrib.Value = &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: string(binaryAnnotation.Value)}} + default: + err := fmt.Errorf("unknown zipkin v1 binary annotation type (%d)", int(binAnnotationType)) + pbAttrib.Value = strAttributeForError(err) + } + + key := binaryAnnotation.Key + if key == zipkincore.LOCAL_COMPONENT { + // TODO: (@pjanotti) add reference to OpenTracing and change related tags to use them + key = "component" + localComponent = string(binaryAnnotation.Value) + } + + if drop := sMapper.fromAttribute(key, pbAttrib); drop { + continue + } + + attributeMap[key] = pbAttrib + } + + status = sMapper.ocStatus() + + if len(attributeMap) == 0 { + return nil, status, "" + } + + if fallbackServiceName == "" { + fallbackServiceName = localComponent + } + + attributes = &tracepb.Span_Attributes{ + AttributeMap: attributeMap, + } + return attributes, status, fallbackServiceName +} + +var errNotEnoughBytes = errors.New("not enough bytes representing the number") + +func bytesInt16ToInt64(b []byte) (int64, error) { + const minSliceLength = 2 + if len(b) < minSliceLength { + return 0, errNotEnoughBytes + } + return int64(binary.BigEndian.Uint16(b[:minSliceLength])), nil +} + +func bytesInt32ToInt64(b []byte) (int64, error) { + const minSliceLength = 4 + if len(b) < minSliceLength { + return 0, errNotEnoughBytes + } + return int64(binary.BigEndian.Uint32(b[:minSliceLength])), nil +} + +func bytesInt64ToInt64(b []byte) (int64, error) { + const minSliceLength = 8 + if len(b) < minSliceLength { + return 0, errNotEnoughBytes + } + return int64(binary.BigEndian.Uint64(b[:minSliceLength])), nil +} + +func bytesFloat64ToFloat64(b []byte) (float64, error) { + const minSliceLength = 8 + if len(b) < minSliceLength { + return 0.0, errNotEnoughBytes + } + bits := binary.BigEndian.Uint64(b) + return math.Float64frombits(bits), nil +} + +func strAttributeForError(err error) *tracepb.AttributeValue_StringValue { + return &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{ + Value: "<" + err.Error() + ">", + }, + } +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan_test.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan_test.go new file mode 100644 index 00000000000..101ba74e3a6 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan_test.go @@ -0,0 +1,619 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "encoding/binary" + "encoding/json" + "io/ioutil" + "math" + "sort" + "testing" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func TestZipkinThriftFallbackToLocalComponent(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_thrift_local_component.json") + require.NoError(t, err, "Failed to load test data") + + var ztSpans []*zipkincore.Span + err = json.Unmarshal(blob, &ztSpans) + require.NoError(t, err, "Failed to unmarshal json into zipkin v1 thrift") + + reqs, err := v1ThriftBatchToOCProto(ztSpans) + require.NoError(t, err, "Failed to translate zipkinv1 thrift to OC proto") + require.Equal(t, 2, len(reqs), "Invalid trace service requests count") + + // Ensure the order of nodes + sort.Slice(reqs, func(i, j int) bool { + return reqs[i].Node.ServiceInfo.Name < reqs[j].Node.ServiceInfo.Name + }) + + // First span didn't have a host/endpoint to give service name, use the local component. + got := reqs[0].Node.ServiceInfo.Name + require.Equal(t, "myLocalComponent", got) + + // Second span have a host/endpoint to give service name, do not use local component. + got = reqs[1].Node.ServiceInfo.Name + require.Equal(t, "myServiceName", got) +} + +func TestV1ThriftToOCProto(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_thrift_single_batch.json") + require.NoError(t, err, "Failed to load test data") + + var ztSpans []*zipkincore.Span + err = json.Unmarshal(blob, &ztSpans) + require.NoError(t, err, "Failed to unmarshal json into zipkin v1 thrift") + + got, err := v1ThriftBatchToOCProto(ztSpans) + require.NoError(t, err, "Failed to translate zipkinv1 thrift to OC proto") + + want := ocBatchesFromZipkinV1 + sortTraceByNodeName(want) + sortTraceByNodeName(got) + + assert.EqualValues(t, want, got) +} + +func BenchmarkV1ThriftToOCProto(b *testing.B) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_thrift_single_batch.json") + require.NoError(b, err, "Failed to load test data") + + var ztSpans []*zipkincore.Span + err = json.Unmarshal(blob, &ztSpans) + require.NoError(b, err, "Failed to unmarshal json into zipkin v1 thrift") + + for n := 0; n < b.N; n++ { + v1ThriftBatchToOCProto(ztSpans) + } +} + +func TestZipkinThriftAnnotationsToOCStatus(t *testing.T) { + type test struct { + haveTags []*zipkincore.BinaryAnnotation + wantAttributes *tracepb.Span_Attributes + wantStatus *tracepb.Status + } + + cases := []test{ + // too large code for OC + { + haveTags: []*zipkincore.BinaryAnnotation{{ + Key: "status.code", + Value: uint64ToBytes(math.MaxInt64), + AnnotationType: zipkincore.AnnotationType_I64, + }}, + wantAttributes: nil, + wantStatus: nil, + }, + // only status.code tag + { + haveTags: []*zipkincore.BinaryAnnotation{{ + Key: "status.code", + Value: uint64ToBytes(5), + AnnotationType: zipkincore.AnnotationType_I64, + }}, + wantAttributes: nil, + wantStatus: &tracepb.Status{ + Code: 5, + }, + }, + { + haveTags: []*zipkincore.BinaryAnnotation{{ + Key: "status.code", + Value: uint32ToBytes(6), + AnnotationType: zipkincore.AnnotationType_I32, + }}, + wantAttributes: nil, + wantStatus: &tracepb.Status{ + Code: 6, + }, + }, + { + haveTags: []*zipkincore.BinaryAnnotation{{ + Key: "status.code", + Value: uint16ToBytes(7), + AnnotationType: zipkincore.AnnotationType_I16, + }}, + wantAttributes: nil, + wantStatus: &tracepb.Status{ + Code: 7, + }, + }, + // only status.message tag + { + haveTags: []*zipkincore.BinaryAnnotation{{ + Key: "status.message", + Value: []byte("Forbidden"), + AnnotationType: zipkincore.AnnotationType_STRING, + }}, + wantAttributes: nil, + wantStatus: nil, + }, + // both status.code and status.message + { + haveTags: []*zipkincore.BinaryAnnotation{ + { + Key: "status.code", + Value: uint32ToBytes(13), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "status.message", + Value: []byte("Forbidden"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + }, + wantAttributes: nil, + wantStatus: &tracepb.Status{ + Code: 13, + Message: "Forbidden", + }, + }, + + // http status.code + { + haveTags: []*zipkincore.BinaryAnnotation{ + { + Key: "http.status_code", + Value: uint32ToBytes(404), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "http.status_message", + Value: []byte("NotFound"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 5, + Message: "NotFound", + }, + }, + + // http and oc + { + haveTags: []*zipkincore.BinaryAnnotation{ + { + Key: "http.status_code", + Value: uint32ToBytes(404), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "http.status_message", + Value: []byte("NotFound"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + { + Key: "status.code", + Value: uint32ToBytes(13), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "status.message", + Value: []byte("Forbidden"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 13, + Message: "Forbidden", + }, + }, + + // http and only oc code + { + haveTags: []*zipkincore.BinaryAnnotation{ + { + Key: "http.status_code", + Value: uint32ToBytes(404), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "http.status_message", + Value: []byte("NotFound"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + { + Key: "status.code", + Value: uint32ToBytes(14), + AnnotationType: zipkincore.AnnotationType_I32, + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 14, + }, + }, + // http and only oc message + { + haveTags: []*zipkincore.BinaryAnnotation{ + { + Key: "http.status_code", + Value: uint32ToBytes(404), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "http.status_message", + Value: []byte("NotFound"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + { + Key: "status.message", + Value: []byte("Forbidden"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 5, + Message: "NotFound", + }, + }, + + // census tags + { + haveTags: []*zipkincore.BinaryAnnotation{ + { + Key: "census.status_code", + Value: uint32ToBytes(18), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "census.status_description", + Value: []byte("RPCError"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + }, + wantAttributes: nil, + wantStatus: &tracepb.Status{ + Code: 18, + Message: "RPCError", + }, + }, + + // census tags priority over others + { + haveTags: []*zipkincore.BinaryAnnotation{ + { + Key: "census.status_code", + Value: uint32ToBytes(18), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "census.status_description", + Value: []byte("RPCError"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + { + Key: "http.status_code", + Value: uint32ToBytes(404), + AnnotationType: zipkincore.AnnotationType_I32, + }, + { + Key: "http.status_message", + Value: []byte("NotFound"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + { + Key: "status.message", + Value: []byte("Forbidden"), + AnnotationType: zipkincore.AnnotationType_STRING, + }, + { + Key: "status.code", + Value: uint32ToBytes(1), + AnnotationType: zipkincore.AnnotationType_I32, + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 18, + Message: "RPCError", + }, + }, + } + + for i, c := range cases { + zSpans := []*zipkincore.Span{{ + ID: 1, + TraceID: 1, + BinaryAnnotations: c.haveTags, + }} + gb, err := v1ThriftBatchToOCProto(zSpans) + if err != nil { + t.Errorf("#%d: Unexpected error: %v", i, err) + continue + } + gs := gb[0].Spans[0] + require.Equal(t, c.wantAttributes, gs.Attributes, "Unsuccessful conversion %d", i) + require.Equal(t, c.wantStatus, gs.Status, "Unsuccessful conversion %d", i) + } +} + +func TestThriftHTTPToGRPCStatusCode(t *testing.T) { + for i := int32(100); i <= 600; i++ { + wantStatus := tracetranslator.OCStatusCodeFromHTTP(i) + gb, err := v1ThriftBatchToOCProto([]*zipkincore.Span{{ + ID: 1, + TraceID: 1, + BinaryAnnotations: []*zipkincore.BinaryAnnotation{ + { + Key: "http.status_code", + Value: uint32ToBytes(uint32(i)), + AnnotationType: zipkincore.AnnotationType_I32, + }, + }, + }}) + if err != nil { + t.Errorf("#%d: Unexpected error: %v", i, err) + continue + } + gs := gb[0].Spans[0] + require.Equal(t, wantStatus, gs.Status.Code, "Unsuccessful conversion %d", i) + } +} + +func Test_bytesInt16ToInt64(t *testing.T) { + tests := []struct { + name string + bytes []byte + want int64 + wantErr error + }{ + { + name: "too short byte slice", + bytes: nil, + want: 0, + wantErr: errNotEnoughBytes, + }, + { + name: "exact size byte slice", + bytes: []byte{0, 200}, + want: 200, + wantErr: nil, + }, + { + name: "large byte slice", + bytes: []byte{0, 128, 200, 200}, + want: 128, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := bytesInt16ToInt64(tt.bytes) + if err != tt.wantErr { + t.Errorf("bytesInt16ToInt64() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("bytesInt16ToInt64() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_bytesInt32ToInt64(t *testing.T) { + tests := []struct { + name string + bytes []byte + want int64 + wantErr error + }{ + { + name: "too short byte slice", + bytes: []byte{}, + want: 0, + wantErr: errNotEnoughBytes, + }, + { + name: "exact size byte slice", + bytes: []byte{0, 0, 0, 202}, + want: 202, + wantErr: nil, + }, + { + name: "large byte slice", + bytes: []byte{0, 0, 0, 128, 0, 0, 0, 0}, + want: 128, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := bytesInt32ToInt64(tt.bytes) + if err != tt.wantErr { + t.Errorf("bytesInt32ToInt64() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("bytesInt32ToInt64() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_bytesInt64ToInt64(t *testing.T) { + tests := []struct { + name string + bytes []byte + want int64 + wantErr error + }{ + { + name: "too short byte slice", + bytes: []byte{0, 0, 0, 0}, + want: 0, + wantErr: errNotEnoughBytes, + }, + { + name: "exact size byte slice", + bytes: []byte{0, 0, 0, 0, 0, 0, 0, 202}, + want: 202, + wantErr: nil, + }, + { + name: "large byte slice", + bytes: []byte{0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0}, + want: 128, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := bytesInt64ToInt64(tt.bytes) + if err != tt.wantErr { + t.Errorf("bytesInt64ToInt64() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("bytesInt64ToInt64() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_bytesFloat64ToFloat64(t *testing.T) { + tests := []struct { + name string + bytes []byte + want float64 + wantErr error + }{ + { + name: "too short byte slice", + bytes: []byte{0, 0, 0, 0}, + want: 0, + wantErr: errNotEnoughBytes, + }, + { + name: "exact size byte slice", + bytes: []byte{64, 9, 33, 251, 84, 68, 45, 24}, + want: 3.141592653589793, + wantErr: nil, + }, + { + name: "large byte slice", + bytes: []byte{64, 9, 33, 251, 84, 68, 45, 24, 0, 0, 0, 0}, + want: 3.141592653589793, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := bytesFloat64ToFloat64(tt.bytes) + if err != tt.wantErr { + t.Errorf("bytesFloat64ToFloat64() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("bytesFloat64ToFloat64() = %v, want %v", got, tt.want) + } + }) + } +} + +func uint64ToBytes(i uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + return b +} + +func uint32ToBytes(i uint32) []byte { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, i) + return b +} + +func uint16ToBytes(i uint16) []byte { + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, i) + return b +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces.go new file mode 100644 index 00000000000..7c3fa6d7fb2 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/internaldata" +) + +func V1ThriftBatchToInternalTraces(zSpans []*zipkincore.Span) (pdata.Traces, error) { + traces := pdata.NewTraces() + + ocTraces, _ := v1ThriftBatchToOCProto(zSpans) + + for _, td := range ocTraces { + tmp := internaldata.OCToTraceData(td) + tmp.ResourceSpans().MoveAndAppendTo(traces.ResourceSpans()) + } + return traces, nil +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces_test.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces_test.go new file mode 100644 index 00000000000..c019ee7c8cb --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces_test.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "encoding/json" + "io/ioutil" + "testing" + + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestV1ThriftToTraces(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_thrift_single_batch.json") + require.NoError(t, err, "Failed to load test data") + + var ztSpans []*zipkincore.Span + err = json.Unmarshal(blob, &ztSpans) + require.NoError(t, err, "Failed to unmarshal json into zipkin v1 thrift") + + got, err := V1ThriftBatchToInternalTraces(ztSpans) + require.NoError(t, err, "Failed to translate zipkinv1 thrift to OC proto") + + assert.Equal(t, 5, got.SpanCount()) +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan.go new file mode 100644 index 00000000000..27863fe8c1c --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan.go @@ -0,0 +1,517 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "strconv" + "time" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + "go.opentelemetry.io/collector/consumer/pdata" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +var ( + // ZipkinV1 friendly conversion errors + msgZipkinV1JSONUnmarshalError = "zipkinv1" + msgZipkinV1TraceIDError = "zipkinV1 span traceId" + msgZipkinV1SpanIDError = "zipkinV1 span id" + msgZipkinV1ParentIDError = "zipkinV1 span parentId" + // Generic hex to ID conversion errors + errHexTraceIDWrongLen = errors.New("hex traceId span has wrong length (expected 16 or 32)") + errHexTraceIDParsing = errors.New("failed to parse hex traceId") + errHexTraceIDZero = errors.New("traceId is zero") + errHexIDWrongLen = errors.New("hex Id has wrong length (expected 16)") + errHexIDParsing = errors.New("failed to parse hex Id") + errHexIDZero = errors.New("ID is zero") +) + +// Trace translation from Zipkin V1 is a bit of special case since there is no model +// defined in golang for Zipkin V1 spans and there is no need to define one here, given +// that the zipkinV1Span defined below is as defined at: +// https://zipkin.io/zipkin-api/zipkin-api.yaml +type zipkinV1Span struct { + TraceID string `json:"traceId"` + Name string `json:"name,omitempty"` + ParentID string `json:"parentId,omitempty"` + ID string `json:"id"` + Timestamp int64 `json:"timestamp"` + Duration int64 `json:"duration"` + Debug bool `json:"debug,omitempty"` + Annotations []*annotation `json:"annotations,omitempty"` + BinaryAnnotations []*binaryAnnotation `json:"binaryAnnotations,omitempty"` +} + +// endpoint structure used by zipkinV1Span. +type endpoint struct { + ServiceName string `json:"serviceName"` + IPv4 string `json:"ipv4"` + IPv6 string `json:"ipv6"` + Port int32 `json:"port"` +} + +// annotation struct used by zipkinV1Span. +type annotation struct { + Timestamp int64 `json:"timestamp"` + Value string `json:"value"` + Endpoint *endpoint `json:"endpoint"` +} + +// binaryAnnotation used by zipkinV1Span. +type binaryAnnotation struct { + Key string `json:"key"` + Value string `json:"value"` + Endpoint *endpoint `json:"endpoint"` +} + +// v1JSONBatchToOCProto converts a JSON blob with a list of Zipkin v1 spans to OC Proto. +func v1JSONBatchToOCProto(blob []byte, parseStringTags bool) ([]consumerdata.TraceData, error) { + var zSpans []*zipkinV1Span + if err := json.Unmarshal(blob, &zSpans); err != nil { + return nil, fmt.Errorf("%s: %w", msgZipkinV1JSONUnmarshalError, err) + } + + ocSpansAndParsedAnnotations := make([]ocSpanAndParsedAnnotations, 0, len(zSpans)) + for _, zSpan := range zSpans { + ocSpan, parsedAnnotations, err := zipkinV1ToOCSpan(zSpan, parseStringTags) + if err != nil { + // error from internal package function, it already wraps the error to give better context. + return nil, err + } + ocSpansAndParsedAnnotations = append(ocSpansAndParsedAnnotations, ocSpanAndParsedAnnotations{ + ocSpan: ocSpan, + parsedAnnotations: parsedAnnotations, + }) + } + + return zipkinToOCProtoBatch(ocSpansAndParsedAnnotations) +} + +type ocSpanAndParsedAnnotations struct { + ocSpan *tracepb.Span + parsedAnnotations *annotationParseResult +} + +func zipkinToOCProtoBatch(ocSpansAndParsedAnnotations []ocSpanAndParsedAnnotations) ([]consumerdata.TraceData, error) { + // Service to batch maps the service name to the trace request with the corresponding node. + svcToTD := make(map[string]*consumerdata.TraceData) + for _, curr := range ocSpansAndParsedAnnotations { + req := getOrCreateNodeRequest(svcToTD, curr.parsedAnnotations.Endpoint) + req.Spans = append(req.Spans, curr.ocSpan) + } + + tds := make([]consumerdata.TraceData, 0, len(svcToTD)) + for _, v := range svcToTD { + tds = append(tds, *v) + } + return tds, nil +} + +func zipkinV1ToOCSpan(zSpan *zipkinV1Span, parseStringTags bool) (*tracepb.Span, *annotationParseResult, error) { + traceID, err := hexTraceIDToOCTraceID(zSpan.TraceID) + if err != nil { + return nil, nil, fmt.Errorf("%s: %w", msgZipkinV1TraceIDError, err) + } + spanID, err := hexIDToOCID(zSpan.ID) + if err != nil { + return nil, nil, fmt.Errorf("%s: %w", msgZipkinV1SpanIDError, err) + } + var parentID []byte + if zSpan.ParentID != "" { + id, err := hexIDToOCID(zSpan.ParentID) + if err != nil { + return nil, nil, fmt.Errorf("%s: %w", msgZipkinV1ParentIDError, err) + } + parentID = id + } + + parsedAnnotations := parseZipkinV1Annotations(zSpan.Annotations) + attributes, ocStatus, localComponent := zipkinV1BinAnnotationsToOCAttributes(zSpan.BinaryAnnotations, parseStringTags) + if parsedAnnotations.Endpoint.ServiceName == unknownServiceName && localComponent != "" { + parsedAnnotations.Endpoint.ServiceName = localComponent + } + var startTime, endTime *timestamppb.Timestamp + if zSpan.Timestamp == 0 { + startTime = parsedAnnotations.EarlyAnnotationTime + endTime = parsedAnnotations.LateAnnotationTime + } else { + startTime = epochMicrosecondsToTimestamp(zSpan.Timestamp) + endTime = epochMicrosecondsToTimestamp(zSpan.Timestamp + zSpan.Duration) + } + + ocSpan := &tracepb.Span{ + TraceId: traceID, + SpanId: spanID, + ParentSpanId: parentID, + Status: ocStatus, + Kind: parsedAnnotations.Kind, + TimeEvents: parsedAnnotations.TimeEvents, + StartTime: startTime, + EndTime: endTime, + Attributes: attributes, + } + + if zSpan.Name != "" { + ocSpan.Name = &tracepb.TruncatableString{Value: zSpan.Name} + } + + setSpanKind(ocSpan, parsedAnnotations.Kind, parsedAnnotations.ExtendedKind) + setTimestampsIfUnset(ocSpan) + + return ocSpan, parsedAnnotations, nil +} + +func setSpanKind(ocSpan *tracepb.Span, kind tracepb.Span_SpanKind, extendedKind tracetranslator.OpenTracingSpanKind) { + if kind == tracepb.Span_SPAN_KIND_UNSPECIFIED && + extendedKind != tracetranslator.OpenTracingSpanKindUnspecified { + // Span kind has no equivalent in OC, so we cannot represent it in the Kind field. + // We will set a TagSpanKind attribute in the span. This will successfully transfer + // in the pipeline until it reaches the exporter which is responsible for + // reverse translation. + if ocSpan.Attributes == nil { + ocSpan.Attributes = &tracepb.Span_Attributes{} + } + if ocSpan.Attributes.AttributeMap == nil { + ocSpan.Attributes.AttributeMap = make(map[string]*tracepb.AttributeValue, 1) + } + ocSpan.Attributes.AttributeMap[tracetranslator.TagSpanKind] = + &tracepb.AttributeValue{Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: string(extendedKind)}, + }} + } +} + +func zipkinV1BinAnnotationsToOCAttributes(binAnnotations []*binaryAnnotation, parseStringTags bool) (attributes *tracepb.Span_Attributes, status *tracepb.Status, fallbackServiceName string) { + if len(binAnnotations) == 0 { + return nil, nil, "" + } + + sMapper := &statusMapper{} + var localComponent string + attributeMap := make(map[string]*tracepb.AttributeValue) + for _, binAnnotation := range binAnnotations { + + if binAnnotation.Endpoint != nil && binAnnotation.Endpoint.ServiceName != "" { + fallbackServiceName = binAnnotation.Endpoint.ServiceName + } + + pbAttrib := parseAnnotationValue(binAnnotation.Value, parseStringTags) + + key := binAnnotation.Key + + if key == zipkincore.LOCAL_COMPONENT { + // TODO: (@pjanotti) add reference to OpenTracing and change related tags to use them + key = "component" + localComponent = binAnnotation.Value + } + + if drop := sMapper.fromAttribute(key, pbAttrib); drop { + continue + } + + attributeMap[key] = pbAttrib + } + + status = sMapper.ocStatus() + + if len(attributeMap) == 0 { + return nil, status, "" + } + + if fallbackServiceName == "" { + fallbackServiceName = localComponent + } + + attributes = &tracepb.Span_Attributes{ + AttributeMap: attributeMap, + } + + return attributes, status, fallbackServiceName +} + +func parseAnnotationValue(value string, parseStringTags bool) *tracepb.AttributeValue { + pbAttrib := &tracepb.AttributeValue{} + + if parseStringTags { + switch tracetranslator.DetermineValueType(value, false) { + case pdata.AttributeValueINT: + iValue, _ := strconv.ParseInt(value, 10, 64) + pbAttrib.Value = &tracepb.AttributeValue_IntValue{IntValue: iValue} + case pdata.AttributeValueDOUBLE: + fValue, _ := strconv.ParseFloat(value, 64) + pbAttrib.Value = &tracepb.AttributeValue_DoubleValue{DoubleValue: fValue} + case pdata.AttributeValueBOOL: + bValue, _ := strconv.ParseBool(value) + pbAttrib.Value = &tracepb.AttributeValue_BoolValue{BoolValue: bValue} + default: + pbAttrib.Value = &tracepb.AttributeValue_StringValue{StringValue: &tracepb.TruncatableString{Value: value}} + } + } else { + pbAttrib.Value = &tracepb.AttributeValue_StringValue{StringValue: &tracepb.TruncatableString{Value: value}} + } + + return pbAttrib +} + +// annotationParseResult stores the results of examining the original annotations, +// this way multiple passes on the annotations are not needed. +type annotationParseResult struct { + Endpoint *endpoint + TimeEvents *tracepb.Span_TimeEvents + Kind tracepb.Span_SpanKind + ExtendedKind tracetranslator.OpenTracingSpanKind + EarlyAnnotationTime *timestamppb.Timestamp + LateAnnotationTime *timestamppb.Timestamp +} + +// Unknown service name works both as a default value and a flag to indicate that a valid endpoint was found. +const unknownServiceName = "unknown-service" + +func parseZipkinV1Annotations(annotations []*annotation) *annotationParseResult { + // Zipkin V1 annotations have a timestamp so they fit well with OC TimeEvent + earlyAnnotationTimestamp := int64(math.MaxInt64) + lateAnnotationTimestamp := int64(math.MinInt64) + res := &annotationParseResult{} + timeEvents := make([]*tracepb.Span_TimeEvent, 0, len(annotations)) + + // We want to set the span kind from the first annotation that contains information + // about the span kind. This flags ensures we only set span kind once from + // the first annotation. + spanKindIsSet := false + + for _, currAnnotation := range annotations { + if currAnnotation == nil || currAnnotation.Value == "" { + continue + } + + endpointName := unknownServiceName + if currAnnotation.Endpoint != nil && currAnnotation.Endpoint.ServiceName != "" { + endpointName = currAnnotation.Endpoint.ServiceName + } + + // Check if annotation has span kind information. + annotationHasSpanKind := false + switch currAnnotation.Value { + case "cs", "cr", "ms", "mr", "ss", "sr": + annotationHasSpanKind = true + } + + // Populate the endpoint if it is not already populated and current endpoint + // has a service name and span kind. + if res.Endpoint == nil && endpointName != unknownServiceName && annotationHasSpanKind { + res.Endpoint = currAnnotation.Endpoint + } + + if !spanKindIsSet && annotationHasSpanKind { + // We have not yet populated span kind, do it now. + // Translate from Zipkin span kind stored in Value field to Kind/ExternalKind + // pair of internal fields. + switch currAnnotation.Value { + case "cs", "cr": + res.Kind = tracepb.Span_CLIENT + res.ExtendedKind = tracetranslator.OpenTracingSpanKindClient + + case "ms": + // "ms" and "mr" are PRODUCER and CONSUMER kinds which have no equivalent + // representation in OC. We keep res.Kind unspecified and will use + // ExtendedKind for translations. + res.ExtendedKind = tracetranslator.OpenTracingSpanKindProducer + + case "mr": + res.ExtendedKind = tracetranslator.OpenTracingSpanKindConsumer + + case "ss", "sr": + res.Kind = tracepb.Span_SERVER + res.ExtendedKind = tracetranslator.OpenTracingSpanKindServer + } + + // Remember that we populated the span kind, so that we don't do it again. + spanKindIsSet = true + } + + ts := epochMicrosecondsToTimestamp(currAnnotation.Timestamp) + if currAnnotation.Timestamp < earlyAnnotationTimestamp { + earlyAnnotationTimestamp = currAnnotation.Timestamp + res.EarlyAnnotationTime = ts + } + if currAnnotation.Timestamp > lateAnnotationTimestamp { + lateAnnotationTimestamp = currAnnotation.Timestamp + res.LateAnnotationTime = ts + } + + if annotationHasSpanKind { + // If this annotation is for the send/receive timestamps, no need to create the annotation + continue + } + + timeEvent := &tracepb.Span_TimeEvent{ + Time: ts, + // More economically we could use a tracepb.Span_TimeEvent_Message, however, it will mean the loss of some information. + // Using the more expensive annotation until/if something cheaper is needed. + Value: &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: &tracepb.TruncatableString{Value: currAnnotation.Value}, + }, + }, + } + + timeEvents = append(timeEvents, timeEvent) + } + + if len(timeEvents) > 0 { + res.TimeEvents = &tracepb.Span_TimeEvents{TimeEvent: timeEvents} + } + + if res.Endpoint == nil { + res.Endpoint = &endpoint{ + ServiceName: unknownServiceName, + } + } + + return res +} + +func hexTraceIDToOCTraceID(hex string) ([]byte, error) { + // Per info at https://zipkin.io/zipkin-api/zipkin-api.yaml it should be 16 or 32 characters + hexLen := len(hex) + if hexLen != 16 && hexLen != 32 { + return nil, errHexTraceIDWrongLen + } + + var high, low uint64 + var err error + if hexLen == 32 { + if high, err = strconv.ParseUint(hex[:16], 16, 64); err != nil { + return nil, errHexTraceIDParsing + } + } + + if low, err = strconv.ParseUint(hex[hexLen-16:], 16, 64); err != nil { + return nil, errHexTraceIDParsing + } + + if high == 0 && low == 0 { + return nil, errHexTraceIDZero + } + + tidBytes := tracetranslator.UInt64ToByteTraceID(high, low) + return tidBytes[:], nil +} + +func hexIDToOCID(hex string) ([]byte, error) { + // Per info at https://zipkin.io/zipkin-api/zipkin-api.yaml it should be 16 characters + if len(hex) != 16 { + return nil, errHexIDWrongLen + } + + idValue, err := strconv.ParseUint(hex, 16, 64) + if err != nil { + return nil, errHexIDParsing + } + + if idValue == 0 { + return nil, errHexIDZero + } + + idBytes := tracetranslator.UInt64ToByteSpanID(idValue) + return idBytes[:], nil +} + +func epochMicrosecondsToTimestamp(msecs int64) *timestamppb.Timestamp { + if msecs <= 0 { + return nil + } + t := ×tamppb.Timestamp{} + t.Seconds = msecs / 1e6 + t.Nanos = int32(msecs%1e6) * 1e3 + return t +} + +func getOrCreateNodeRequest(m map[string]*consumerdata.TraceData, endpoint *endpoint) *consumerdata.TraceData { + // this private function assumes that the caller never passes an nil endpoint + nodeKey := endpoint.string() + req := m[nodeKey] + + if req != nil { + return req + } + + req = &consumerdata.TraceData{ + Node: &commonpb.Node{ + ServiceInfo: &commonpb.ServiceInfo{Name: endpoint.ServiceName}, + }, + } + + if attributeMap := endpoint.createAttributeMap(); attributeMap != nil { + req.Node.Attributes = attributeMap + } + + m[nodeKey] = req + + return req +} + +func (ep *endpoint) string() string { + return fmt.Sprintf("%s-%s-%s-%d", ep.ServiceName, ep.IPv4, ep.IPv6, ep.Port) +} + +func (ep *endpoint) createAttributeMap() map[string]string { + if ep.IPv4 == "" && ep.IPv6 == "" && ep.Port == 0 { + return nil + } + + attributeMap := make(map[string]string, 3) + if ep.IPv4 != "" { + attributeMap["ipv4"] = ep.IPv4 + } + if ep.IPv6 != "" { + attributeMap["ipv6"] = ep.IPv6 + } + if ep.Port != 0 { + attributeMap["port"] = strconv.Itoa(int(ep.Port)) + } + return attributeMap +} + +func setTimestampsIfUnset(span *tracepb.Span) { + // zipkin allows timestamp to be unset, but opentelemetry-collector expects it to have a value. + // If this is unset, the conversion from open census to the internal trace format breaks + // what should be an identity transformation oc -> internal -> oc + if span.StartTime == nil { + now := timestamppb.New(time.Now()) + span.StartTime = now + span.EndTime = now + + if span.Attributes == nil { + span.Attributes = &tracepb.Span_Attributes{} + } + if span.Attributes.AttributeMap == nil { + span.Attributes.AttributeMap = make(map[string]*tracepb.AttributeValue, 1) + } + span.Attributes.AttributeMap[StartTimeAbsent] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_BoolValue{ + BoolValue: true, + }} + } +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan_test.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan_test.go new file mode 100644 index 00000000000..44455959187 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan_test.go @@ -0,0 +1,790 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "encoding/json" + "io/ioutil" + "sort" + "strconv" + "testing" + "time" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/google/go-cmp/cmp" + zipkinmodel "github.com/openzipkin/zipkin-go/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/timestamppb" + + "go.opentelemetry.io/collector/consumer/consumerdata" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +func Test_hexIDToOCID(t *testing.T) { + tests := []struct { + name string + hexStr string + want []byte + wantErr error + }{ + { + name: "empty hex string", + hexStr: "", + want: nil, + wantErr: errHexIDWrongLen, + }, + { + name: "wrong length", + hexStr: "0000", + want: nil, + wantErr: errHexIDWrongLen, + }, + { + name: "parse error", + hexStr: "000000000000000-", + want: nil, + wantErr: errHexIDParsing, + }, + { + name: "all zero", + hexStr: "0000000000000000", + want: nil, + wantErr: errHexIDZero, + }, + { + name: "happy path", + hexStr: "0706050400010203", + want: []byte{7, 6, 5, 4, 0, 1, 2, 3}, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := hexIDToOCID(tt.hexStr) + require.Equal(t, tt.wantErr, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_hexTraceIDToOCTraceID(t *testing.T) { + tests := []struct { + name string + hexStr string + want []byte + wantErr error + }{ + { + name: "empty hex string", + hexStr: "", + want: nil, + wantErr: errHexTraceIDWrongLen, + }, + { + name: "wrong length", + hexStr: "000000000000000010", + want: nil, + wantErr: errHexTraceIDWrongLen, + }, + { + name: "parse error", + hexStr: "000000000000000X0000000000000000", + want: nil, + wantErr: errHexTraceIDParsing, + }, + { + name: "all zero", + hexStr: "00000000000000000000000000000000", + want: nil, + wantErr: errHexTraceIDZero, + }, + { + name: "happy path", + hexStr: "00000000000000010000000000000002", + want: []byte{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2}, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := hexTraceIDToOCTraceID(tt.hexStr) + require.Equal(t, tt.wantErr, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestZipkinJSONFallbackToLocalComponent(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_local_component.json") + require.NoError(t, err, "Failed to load test data") + + reqs, err := v1JSONBatchToOCProto(blob, false) + require.NoError(t, err, "Failed to translate zipkinv1 to OC proto") + require.Equal(t, 2, len(reqs), "Invalid trace service requests count") + + // Ensure the order of nodes + sort.Slice(reqs, func(i, j int) bool { + return reqs[i].Node.ServiceInfo.Name < reqs[j].Node.ServiceInfo.Name + }) + + // First span didn't have a host/endpoint to give service name, use the local component. + got := reqs[0].Node.ServiceInfo.Name + require.Equal(t, "myLocalComponent", got) + + // Second span have a host/endpoint to give service name, do not use local component. + got = reqs[1].Node.ServiceInfo.Name + require.Equal(t, "myServiceName", got) +} + +func TestSingleJSONV1BatchToOCProto(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_single_batch.json") + require.NoError(t, err, "Failed to load test data") + + parseStringTags := true // This test relies on parsing int/bool to the typed span attributes + got, err := v1JSONBatchToOCProto(blob, parseStringTags) + require.NoError(t, err, "Failed to translate zipkinv1 to OC proto") + + want := ocBatchesFromZipkinV1 + sortTraceByNodeName(want) + sortTraceByNodeName(got) + + assert.EqualValues(t, got, want) +} + +func TestMultipleJSONV1BatchesToOCProto(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_multiple_batches.json") + require.NoError(t, err, "Failed to load test data") + + var batches []interface{} + err = json.Unmarshal(blob, &batches) + require.NoError(t, err, "Failed to load the batches") + + nodeToTraceReqs := make(map[string]*consumerdata.TraceData) + var got []consumerdata.TraceData + for _, batch := range batches { + jsonBatch, err := json.Marshal(batch) + require.NoError(t, err, "Failed to marshal interface back to blob") + + parseStringTags := true // This test relies on parsing int/bool to the typed span attributes + g, err := v1JSONBatchToOCProto(jsonBatch, parseStringTags) + require.NoError(t, err, "Failed to translate zipkinv1 to OC proto") + + // Coalesce the nodes otherwise they will differ due to multiple + // nodes representing same logical service + for i := range g { + key := g[i].Node.String() + if pTsr, ok := nodeToTraceReqs[key]; ok { + pTsr.Spans = append(pTsr.Spans, g[i].Spans...) + } else { + nodeToTraceReqs[key] = &g[i] + } + } + } + + for _, tsr := range nodeToTraceReqs { + got = append(got, *tsr) + } + + want := ocBatchesFromZipkinV1 + sortTraceByNodeName(want) + sortTraceByNodeName(got) + + if diff := cmp.Diff(want, got, protocmp.Transform()); diff != "" { + t.Errorf("Unexpected difference:\n%v", diff) + } +} + +func sortTraceByNodeName(trace []consumerdata.TraceData) { + sort.Slice(trace, func(i, j int) bool { + return trace[i].Node.ServiceInfo.Name < trace[j].Node.ServiceInfo.Name + }) +} + +func TestZipkinAnnotationsToOCStatus(t *testing.T) { + type test struct { + name string + haveTags []*binaryAnnotation + wantAttributes *tracepb.Span_Attributes + wantStatus *tracepb.Status + } + + cases := []test{ + { + name: "only status.code tag", + haveTags: []*binaryAnnotation{{ + Key: "status.code", + Value: "13", + }}, + wantAttributes: nil, + wantStatus: &tracepb.Status{ + Code: 13, + }, + }, + + { + name: "only status.message tag", + haveTags: []*binaryAnnotation{{ + Key: "status.message", + Value: "Forbidden", + }}, + wantAttributes: nil, + wantStatus: nil, + }, + + { + name: "both status.code and status.message", + haveTags: []*binaryAnnotation{ + { + Key: "status.code", + Value: "13", + }, + { + Key: "status.message", + Value: "Forbidden", + }, + }, + wantAttributes: nil, + wantStatus: &tracepb.Status{ + Code: 13, + Message: "Forbidden", + }, + }, + + { + name: "http status.code", + haveTags: []*binaryAnnotation{ + { + Key: "http.status_code", + Value: "404", + }, + { + Key: "http.status_message", + Value: "NotFound", + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 5, + Message: "NotFound", + }, + }, + + { + name: "http and oc", + haveTags: []*binaryAnnotation{ + { + Key: "http.status_code", + Value: "404", + }, + { + Key: "http.status_message", + Value: "NotFound", + }, + { + Key: "status.code", + Value: "13", + }, + { + Key: "status.message", + Value: "Forbidden", + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 13, + Message: "Forbidden", + }, + }, + + { + name: "http and only oc code", + haveTags: []*binaryAnnotation{ + { + Key: "http.status_code", + Value: "404", + }, + { + Key: "http.status_message", + Value: "NotFound", + }, + { + Key: "status.code", + Value: "14", + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 14, + }, + }, + + { + name: "http and only oc message", + haveTags: []*binaryAnnotation{ + { + Key: "http.status_code", + Value: "404", + }, + { + Key: "http.status_message", + Value: "NotFound", + }, + { + Key: "status.message", + Value: "Forbidden", + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 5, + Message: "NotFound", + }, + }, + + { + name: "census tags", + haveTags: []*binaryAnnotation{ + { + Key: "census.status_code", + Value: "10", + }, + { + Key: "census.status_description", + Value: "RPCError", + }, + }, + wantAttributes: nil, + wantStatus: &tracepb.Status{ + Code: 10, + Message: "RPCError", + }, + }, + + { + name: "census tags priority over others", + haveTags: []*binaryAnnotation{ + { + Key: "census.status_code", + Value: "10", + }, + { + Key: "census.status_description", + Value: "RPCError", + }, + { + Key: "http.status_code", + Value: "404", + }, + { + Key: "http.status_message", + Value: "NotFound", + }, + { + Key: "status.message", + Value: "Forbidden", + }, + { + Key: "status.code", + Value: "7", + }, + }, + wantAttributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + tracetranslator.TagHTTPStatusCode: { + Value: &tracepb.AttributeValue_IntValue{ + IntValue: 404, + }, + }, + tracetranslator.TagHTTPStatusMsg: { + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: "NotFound"}, + }, + }, + }, + }, + wantStatus: &tracepb.Status{ + Code: 10, + Message: "RPCError", + }, + }, + } + + fakeTraceID := "00000000000000010000000000000002" + fakeSpanID := "0000000000000001" + + for i, c := range cases { + t.Run(c.name, func(t *testing.T) { + zSpans := []*zipkinV1Span{{ + ID: fakeSpanID, + TraceID: fakeTraceID, + BinaryAnnotations: c.haveTags, + Timestamp: 1, + }} + zBytes, err := json.Marshal(zSpans) + if err != nil { + t.Errorf("#%d: Unexpected error: %v", i, err) + return + } + + parseStringTags := true // This test relies on parsing int/bool to the typed span attributes + gb, err := v1JSONBatchToOCProto(zBytes, parseStringTags) + if err != nil { + t.Errorf("#%d: Unexpected error: %v", i, err) + return + } + gs := gb[0].Spans[0] + require.Equal(t, c.wantAttributes, gs.Attributes, "Unsuccessful conversion %d", i) + require.Equal(t, c.wantStatus, gs.Status, "Unsuccessful conversion %d", i) + }) + } +} + +func TestSpanWithoutTimestampGetsTag(t *testing.T) { + fakeTraceID := "00000000000000010000000000000002" + fakeSpanID := "0000000000000001" + zSpans := []*zipkinV1Span{ + { + ID: fakeSpanID, + TraceID: fakeTraceID, + Timestamp: 0, // no timestamp field + }, + } + zBytes, err := json.Marshal(zSpans) + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + + testStart := time.Now() + + gb, err := v1JSONBatchToOCProto(zBytes, false) + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + + gs := gb[0].Spans[0] + assert.NotNil(t, gs.StartTime) + assert.NotNil(t, gs.EndTime) + + assert.True(t, gs.StartTime.AsTime().Sub(testStart) >= 0) + + wantAttributes := &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + StartTimeAbsent: { + Value: &tracepb.AttributeValue_BoolValue{ + BoolValue: true, + }, + }, + }, + } + + assert.EqualValues(t, gs.Attributes, wantAttributes) +} + +func TestJSONHTTPToGRPCStatusCode(t *testing.T) { + fakeTraceID := "00000000000000010000000000000002" + fakeSpanID := "0000000000000001" + for i := int32(100); i <= 600; i++ { + wantStatus := tracetranslator.OCStatusCodeFromHTTP(i) + zBytes, err := json.Marshal([]*zipkinV1Span{{ + ID: fakeSpanID, + TraceID: fakeTraceID, + BinaryAnnotations: []*binaryAnnotation{ + { + Key: "http.status_code", + Value: strconv.Itoa(int(i)), + }, + }, + }}) + if err != nil { + t.Errorf("#%d: Unexpected error: %v", i, err) + continue + } + gb, err := v1JSONBatchToOCProto(zBytes, false) + if err != nil { + t.Errorf("#%d: Unexpected error: %v", i, err) + continue + } + + gs := gb[0].Spans[0] + require.Equal(t, wantStatus, gs.Status.Code, "Unsuccessful conversion %d", i) + } +} + +// ocBatches has the OpenCensus proto batches used in the test. They are hard coded because +// structs like tracepb.AttributeMap cannot be ready from JSON. +var ocBatchesFromZipkinV1 = []consumerdata.TraceData{ + { + Node: &commonpb.Node{ + ServiceInfo: &commonpb.ServiceInfo{Name: "front-proxy"}, + Attributes: map[string]string{"ipv4": "172.31.0.2"}, + }, + Spans: []*tracepb.Span{ + { + TraceId: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + SpanId: []byte{0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + ParentSpanId: nil, + Name: &tracepb.TruncatableString{Value: "checkAvailability"}, + Kind: tracepb.Span_CLIENT, + StartTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 446743000}, + EndTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 459699000}, + TimeEvents: nil, + }, + }, + }, + { + Node: &commonpb.Node{ + ServiceInfo: &commonpb.ServiceInfo{Name: "service1"}, + Attributes: map[string]string{"ipv4": "172.31.0.4"}, + }, + Spans: []*tracepb.Span{ + { + TraceId: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + SpanId: []byte{0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + ParentSpanId: nil, + Name: &tracepb.TruncatableString{Value: "checkAvailability"}, + Kind: tracepb.Span_SERVER, + StartTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 448081000}, + EndTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 460102000}, + TimeEvents: &tracepb.Span_TimeEvents{ + TimeEvent: []*tracepb.Span_TimeEvent{ + { + Time: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 450000000}, + Value: &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: &tracepb.TruncatableString{Value: "custom time event"}, + }, + }, + }, + }, + }, + }, + { + TraceId: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + SpanId: []byte{0xf9, 0xeb, 0xb6, 0xe6, 0x48, 0x80, 0x61, 0x2a}, + ParentSpanId: []byte{0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + Name: &tracepb.TruncatableString{Value: "checkStock"}, + Kind: tracepb.Span_CLIENT, + StartTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 453923000}, + EndTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 457663000}, + TimeEvents: nil, + }, + }, + }, + { + Node: &commonpb.Node{ + ServiceInfo: &commonpb.ServiceInfo{Name: "service2"}, + Attributes: map[string]string{"ipv4": "172.31.0.7"}, + }, + Spans: []*tracepb.Span{ + { + TraceId: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + SpanId: []byte{0xf9, 0xeb, 0xb6, 0xe6, 0x48, 0x80, 0x61, 0x2a}, + ParentSpanId: []byte{0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + Name: &tracepb.TruncatableString{Value: "checkStock"}, + Kind: tracepb.Span_SERVER, + StartTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 454487000}, + EndTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 457320000}, + Status: &tracepb.Status{ + Code: 0, + }, + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + "http.status_code": { + Value: &tracepb.AttributeValue_IntValue{IntValue: 200}, + }, + "http.url": { + Value: &tracepb.AttributeValue_StringValue{StringValue: &tracepb.TruncatableString{Value: "http://localhost:9000/trace/2"}}, + }, + "success": { + Value: &tracepb.AttributeValue_BoolValue{BoolValue: true}, + }, + "processed": { + Value: &tracepb.AttributeValue_DoubleValue{DoubleValue: 1.5}, + }, + }, + }, + TimeEvents: nil, + }, + }, + }, + { + Node: &commonpb.Node{ + ServiceInfo: &commonpb.ServiceInfo{Name: "unknown-service"}, + }, + Spans: []*tracepb.Span{ + { + TraceId: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + SpanId: []byte{0xfe, 0x35, 0x1a, 0x05, 0x3f, 0xbc, 0xac, 0x1f}, + ParentSpanId: []byte{0x0e, 0xd2, 0xe6, 0x3c, 0xbe, 0x71, 0xf5, 0xa8}, + Name: &tracepb.TruncatableString{Value: "checkStock"}, + Kind: tracepb.Span_SPAN_KIND_UNSPECIFIED, + StartTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 453923000}, + EndTime: ×tamppb.Timestamp{Seconds: 1544805927, Nanos: 457663000}, + Attributes: nil, + }, + }, + }, +} + +func TestSpanKindTranslation(t *testing.T) { + tests := []struct { + zipkinV1Kind string + zipkinV2Kind zipkinmodel.Kind + ocKind tracepb.Span_SpanKind + ocAttrSpanKind tracetranslator.OpenTracingSpanKind + jaegerSpanKind string + }{ + { + zipkinV1Kind: "cr", + zipkinV2Kind: zipkinmodel.Client, + ocKind: tracepb.Span_CLIENT, + jaegerSpanKind: "client", + }, + + { + zipkinV1Kind: "sr", + zipkinV2Kind: zipkinmodel.Server, + ocKind: tracepb.Span_SERVER, + jaegerSpanKind: "server", + }, + + { + zipkinV1Kind: "ms", + zipkinV2Kind: zipkinmodel.Producer, + ocKind: tracepb.Span_SPAN_KIND_UNSPECIFIED, + ocAttrSpanKind: tracetranslator.OpenTracingSpanKindProducer, + jaegerSpanKind: "producer", + }, + + { + zipkinV1Kind: "mr", + zipkinV2Kind: zipkinmodel.Consumer, + ocKind: tracepb.Span_SPAN_KIND_UNSPECIFIED, + ocAttrSpanKind: tracetranslator.OpenTracingSpanKindConsumer, + jaegerSpanKind: "consumer", + }, + } + + for _, test := range tests { + t.Run(test.zipkinV1Kind, func(t *testing.T) { + // Create Zipkin V1 span. + zSpan := &zipkinV1Span{ + TraceID: "1234567890123456", + ID: "0123456789123456", + Annotations: []*annotation{ + {Value: test.zipkinV1Kind}, // note that only first annotation matters. + {Value: "cr"}, // this will have no effect. + }, + } + + // Translate to OC and verify that span kind is correctly translated. + ocSpan, parsedAnnotations, err := zipkinV1ToOCSpan(zSpan, false) + assert.NoError(t, err) + assert.EqualValues(t, test.ocKind, ocSpan.Kind) + assert.NotNil(t, parsedAnnotations) + if test.ocAttrSpanKind != "" { + require.NotNil(t, ocSpan.Attributes) + // This is a special case, verify that TagSpanKind attribute is set. + expected := &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: string(test.ocAttrSpanKind)}, + }, + } + assert.EqualValues(t, expected, ocSpan.Attributes.AttributeMap[tracetranslator.TagSpanKind]) + } + }) + } +} + +func TestZipkinV1ToOCSpanInvalidTraceId(t *testing.T) { + zSpan := &zipkinV1Span{ + TraceID: "abc", + ID: "0123456789123456", + Annotations: []*annotation{ + {Value: "cr"}, + }, + } + _, _, err := zipkinV1ToOCSpan(zSpan, false) + assert.EqualError(t, err, "zipkinV1 span traceId: hex traceId span has wrong length (expected 16 or 32)") +} + +func TestZipkinV1ToOCSpanInvalidSpanId(t *testing.T) { + zSpan := &zipkinV1Span{ + TraceID: "1234567890123456", + ID: "abc", + Annotations: []*annotation{ + {Value: "cr"}, + }, + } + _, _, err := zipkinV1ToOCSpan(zSpan, false) + assert.EqualError(t, err, "zipkinV1 span id: hex Id has wrong length (expected 16)") +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces.go new file mode 100644 index 00000000000..a323123764c --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/internaldata" +) + +func V1JSONBatchToInternalTraces(blob []byte, parseStringTags bool) (pdata.Traces, error) { + traces := pdata.NewTraces() + + ocTraces, err := v1JSONBatchToOCProto(blob, parseStringTags) + if err != nil { + return traces, err + } + + for _, td := range ocTraces { + tmp := internaldata.OCToTraceData(td) + tmp.ResourceSpans().MoveAndAppendTo(traces.ResourceSpans()) + } + return traces, nil +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces_test.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces_test.go new file mode 100644 index 00000000000..e91d2b1c734 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces_test.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSingleJSONV1BatchToTraces(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_single_batch.json") + require.NoError(t, err, "Failed to load test data") + + got, err := V1JSONBatchToInternalTraces(blob, false) + require.NoError(t, err, "Failed to translate zipkinv1 to OC proto") + + assert.Equal(t, 5, got.SpanCount()) +} + +func TestErrorSpanToTraces(t *testing.T) { + blob, err := ioutil.ReadFile("./testdata/zipkin_v1_error_batch.json") + require.NoError(t, err, "Failed to load test data") + + td, err := V1JSONBatchToInternalTraces(blob, false) + assert.Error(t, err, "Should have generated error") + assert.NotNil(t, td) +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces.go b/internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces.go new file mode 100644 index 00000000000..91f18efe667 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces.go @@ -0,0 +1,427 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" + + zipkinmodel "github.com/openzipkin/zipkin-go/model" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/data" + otlptrace "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/trace/v1" + "go.opentelemetry.io/collector/translator/conventions" + tracetranslator "go.opentelemetry.io/collector/translator/trace" +) + +var nonSpanAttributes = getNonSpanAttributes() + +func getNonSpanAttributes() map[string]struct{} { + attrs := make(map[string]struct{}) + for _, key := range conventions.GetResourceSemanticConventionAttributeNames() { + attrs[key] = struct{}{} + } + attrs[tracetranslator.TagServiceNameSource] = struct{}{} + attrs[tracetranslator.TagInstrumentationName] = struct{}{} + attrs[tracetranslator.TagInstrumentationVersion] = struct{}{} + attrs[conventions.OCAttributeProcessStartTime] = struct{}{} + attrs[conventions.OCAttributeExporterVersion] = struct{}{} + attrs[conventions.OCAttributeProcessID] = struct{}{} + attrs[conventions.OCAttributeResourceType] = struct{}{} + return attrs +} + +// Custom Sort on +type byOTLPTypes []*zipkinmodel.SpanModel + +func (b byOTLPTypes) Len() int { + return len(b) +} + +func (b byOTLPTypes) Less(i, j int) bool { + diff := strings.Compare(extractLocalServiceName(b[i]), extractLocalServiceName(b[j])) + if diff != 0 { + return diff <= 0 + } + diff = strings.Compare(extractInstrumentationLibrary(b[i]), extractInstrumentationLibrary(b[j])) + return diff <= 0 +} + +func (b byOTLPTypes) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +// V2SpansToInternalTraces translates Zipkin v2 spans into internal trace data. +func V2SpansToInternalTraces(zipkinSpans []*zipkinmodel.SpanModel, parseStringTags bool) (pdata.Traces, error) { + traceData := pdata.NewTraces() + if len(zipkinSpans) == 0 { + return traceData, nil + } + + sort.Sort(byOTLPTypes(zipkinSpans)) + + rss := traceData.ResourceSpans() + prevServiceName := "" + prevInstrLibName := "" + rsCount := rss.Len() + ilsCount := 0 + spanCount := 0 + var curRscSpans pdata.ResourceSpans + var curILSpans pdata.InstrumentationLibrarySpans + var curSpans pdata.SpanSlice + for _, zspan := range zipkinSpans { + if zspan == nil { + continue + } + tags := copySpanTags(zspan.Tags) + localServiceName := extractLocalServiceName(zspan) + if localServiceName != prevServiceName { + prevServiceName = localServiceName + rss.Resize(rsCount + 1) + curRscSpans = rss.At(rsCount) + rsCount++ + populateResourceFromZipkinSpan(tags, localServiceName, curRscSpans.Resource()) + prevInstrLibName = "" + ilsCount = 0 + } + instrLibName := extractInstrumentationLibrary(zspan) + if instrLibName != prevInstrLibName || ilsCount == 0 { + prevInstrLibName = instrLibName + curRscSpans.InstrumentationLibrarySpans().Resize(ilsCount + 1) + curILSpans = curRscSpans.InstrumentationLibrarySpans().At(ilsCount) + ilsCount++ + populateILFromZipkinSpan(tags, instrLibName, curILSpans.InstrumentationLibrary()) + spanCount = 0 + curSpans = curILSpans.Spans() + } + curSpans.Resize(spanCount + 1) + err := zSpanToInternal(zspan, tags, curSpans.At(spanCount), parseStringTags) + if err != nil { + return traceData, err + } + spanCount++ + } + + return traceData, nil +} + +func zSpanToInternal(zspan *zipkinmodel.SpanModel, tags map[string]string, dest pdata.Span, parseStringTags bool) error { + dest.SetTraceID(tracetranslator.UInt64ToTraceID(zspan.TraceID.High, zspan.TraceID.Low)) + dest.SetSpanID(tracetranslator.UInt64ToSpanID(uint64(zspan.ID))) + if value, ok := tags[tracetranslator.TagW3CTraceState]; ok { + dest.SetTraceState(pdata.TraceState(value)) + delete(tags, tracetranslator.TagW3CTraceState) + } + parentID := zspan.ParentID + if parentID != nil && *parentID != zspan.ID { + dest.SetParentSpanID(tracetranslator.UInt64ToSpanID(uint64(*parentID))) + } + + dest.SetName(zspan.Name) + startNano := zspan.Timestamp.UnixNano() + dest.SetStartTime(pdata.TimestampUnixNano(startNano)) + dest.SetEndTime(pdata.TimestampUnixNano(startNano + zspan.Duration.Nanoseconds())) + dest.SetKind(zipkinKindToSpanKind(zspan.Kind, tags)) + + populateSpanStatus(tags, dest.Status()) + if err := zTagsToSpanLinks(tags, dest.Links()); err != nil { + return err + } + + attrs := dest.Attributes() + attrs.InitEmptyWithCapacity(len(tags)) + if err := zTagsToInternalAttrs(zspan, tags, attrs, parseStringTags); err != nil { + return err + } + + err := populateSpanEvents(zspan, dest.Events()) + return err +} + +func populateSpanStatus(tags map[string]string, status pdata.SpanStatus) { + if value, ok := tags[tracetranslator.TagStatusCode]; ok { + status.SetCode(pdata.StatusCode(otlptrace.Status_StatusCode_value[value])) + delete(tags, tracetranslator.TagStatusCode) + if value, ok := tags[tracetranslator.TagStatusMsg]; ok { + status.SetMessage(value) + delete(tags, tracetranslator.TagStatusMsg) + } + } +} + +func zipkinKindToSpanKind(kind zipkinmodel.Kind, tags map[string]string) pdata.SpanKind { + switch kind { + case zipkinmodel.Client: + return pdata.SpanKindCLIENT + case zipkinmodel.Server: + return pdata.SpanKindSERVER + case zipkinmodel.Producer: + return pdata.SpanKindPRODUCER + case zipkinmodel.Consumer: + return pdata.SpanKindCONSUMER + default: + if value, ok := tags[tracetranslator.TagSpanKind]; ok { + delete(tags, tracetranslator.TagSpanKind) + if value == "internal" { + return pdata.SpanKindINTERNAL + } + } + return pdata.SpanKindUNSPECIFIED + } +} + +func zTagsToSpanLinks(tags map[string]string, dest pdata.SpanLinkSlice) error { + index := 0 + for i := 0; i < 128; i++ { + key := fmt.Sprintf("otlp.link.%d", i) + val, ok := tags[key] + if !ok { + return nil + } + delete(tags, key) + + parts := strings.Split(val, "|") + partCnt := len(parts) + if partCnt < 5 { + continue + } + dest.Resize(index + 1) + link := dest.At(index) + index++ + + // Convert trace id. + rawTrace := data.TraceID{} + errTrace := rawTrace.UnmarshalJSON([]byte(parts[0])) + if errTrace != nil { + return errTrace + } + link.SetTraceID(pdata.TraceID(rawTrace)) + + // Convert span id. + rawSpan := data.SpanID{} + errSpan := rawSpan.UnmarshalJSON([]byte(parts[1])) + if errSpan != nil { + return errSpan + } + link.SetSpanID(pdata.SpanID(rawSpan)) + + link.SetTraceState(pdata.TraceState(parts[2])) + + var jsonStr string + if partCnt == 5 { + jsonStr = parts[3] + } else { + jsonParts := parts[3 : partCnt-1] + jsonStr = strings.Join(jsonParts, "|") + } + var attrs map[string]interface{} + if err := json.Unmarshal([]byte(jsonStr), &attrs); err != nil { + return err + } + if err := jsonMapToAttributeMap(attrs, link.Attributes()); err != nil { + return err + } + + dropped, errDropped := strconv.ParseUint(parts[partCnt-1], 10, 32) + if errDropped != nil { + return errDropped + } + link.SetDroppedAttributesCount(uint32(dropped)) + } + return nil +} + +func populateSpanEvents(zspan *zipkinmodel.SpanModel, events pdata.SpanEventSlice) error { + events.Resize(len(zspan.Annotations)) + for ix, anno := range zspan.Annotations { + event := events.At(ix) + startNano := anno.Timestamp.UnixNano() + event.SetTimestamp(pdata.TimestampUnixNano(startNano)) + + parts := strings.Split(anno.Value, "|") + partCnt := len(parts) + event.SetName(parts[0]) + if partCnt < 3 { + continue + } + + var jsonStr string + if partCnt == 3 { + jsonStr = parts[1] + } else { + jsonParts := parts[1 : partCnt-1] + jsonStr = strings.Join(jsonParts, "|") + } + var attrs map[string]interface{} + if err := json.Unmarshal([]byte(jsonStr), &attrs); err != nil { + return err + } + if err := jsonMapToAttributeMap(attrs, event.Attributes()); err != nil { + return err + } + + dropped, errDropped := strconv.ParseUint(parts[partCnt-1], 10, 32) + if errDropped != nil { + return errDropped + } + event.SetDroppedAttributesCount(uint32(dropped)) + } + return nil +} + +func jsonMapToAttributeMap(attrs map[string]interface{}, dest pdata.AttributeMap) error { + for key, val := range attrs { + if s, ok := val.(string); ok { + dest.InsertString(key, s) + } else if d, ok := val.(float64); ok { + if math.Mod(d, 1.0) == 0.0 { + dest.InsertInt(key, int64(d)) + } else { + dest.InsertDouble(key, d) + } + } else if b, ok := val.(bool); ok { + dest.InsertBool(key, b) + } + } + return nil +} + +func zTagsToInternalAttrs(zspan *zipkinmodel.SpanModel, tags map[string]string, dest pdata.AttributeMap, parseStringTags bool) error { + parseErr := tagsToAttributeMap(tags, dest, parseStringTags) + if zspan.LocalEndpoint != nil { + if zspan.LocalEndpoint.IPv4 != nil { + dest.InsertString(conventions.AttributeNetHostIP, zspan.LocalEndpoint.IPv4.String()) + } + if zspan.LocalEndpoint.IPv6 != nil { + dest.InsertString(conventions.AttributeNetHostIP, zspan.LocalEndpoint.IPv6.String()) + } + if zspan.LocalEndpoint.Port > 0 { + dest.UpsertInt(conventions.AttributeNetHostPort, int64(zspan.LocalEndpoint.Port)) + } + } + if zspan.RemoteEndpoint != nil { + if zspan.RemoteEndpoint.ServiceName != "" { + dest.InsertString(conventions.AttributePeerService, zspan.RemoteEndpoint.ServiceName) + } + if zspan.RemoteEndpoint.IPv4 != nil { + dest.InsertString(conventions.AttributeNetPeerIP, zspan.RemoteEndpoint.IPv4.String()) + } + if zspan.RemoteEndpoint.IPv6 != nil { + dest.InsertString(conventions.AttributeNetPeerIP, zspan.RemoteEndpoint.IPv6.String()) + } + if zspan.RemoteEndpoint.Port > 0 { + dest.UpsertInt(conventions.AttributeNetPeerPort, int64(zspan.RemoteEndpoint.Port)) + } + } + return parseErr +} + +func tagsToAttributeMap(tags map[string]string, dest pdata.AttributeMap, parseStringTags bool) error { + var parseErr error + for key, val := range tags { + if _, ok := nonSpanAttributes[key]; ok { + continue + } + + if parseStringTags { + switch tracetranslator.DetermineValueType(val, false) { + case pdata.AttributeValueINT: + iValue, _ := strconv.ParseInt(val, 10, 64) + dest.UpsertInt(key, iValue) + case pdata.AttributeValueDOUBLE: + fValue, _ := strconv.ParseFloat(val, 64) + dest.UpsertDouble(key, fValue) + case pdata.AttributeValueBOOL: + bValue, _ := strconv.ParseBool(val) + dest.UpsertBool(key, bValue) + default: + dest.UpsertString(key, val) + } + } else { + dest.UpsertString(key, val) + } + } + return parseErr +} + +func populateResourceFromZipkinSpan(tags map[string]string, localServiceName string, resource pdata.Resource) { + if localServiceName == tracetranslator.ResourceNoServiceName { + return + } + + if len(tags) == 0 { + resource.Attributes().InsertString(conventions.AttributeServiceName, localServiceName) + return + } + + snSource := tags[tracetranslator.TagServiceNameSource] + if snSource == "" { + resource.Attributes().InsertString(conventions.AttributeServiceName, localServiceName) + } else { + resource.Attributes().InsertString(snSource, localServiceName) + } + delete(tags, tracetranslator.TagServiceNameSource) + + for key := range getNonSpanAttributes() { + if key == tracetranslator.TagInstrumentationName || key == tracetranslator.TagInstrumentationVersion { + continue + } + if value, ok := tags[key]; ok { + resource.Attributes().UpsertString(key, value) + delete(tags, key) + } + } +} + +func populateILFromZipkinSpan(tags map[string]string, instrLibName string, library pdata.InstrumentationLibrary) { + if instrLibName == "" { + return + } + if value, ok := tags[tracetranslator.TagInstrumentationName]; ok { + library.SetName(value) + delete(tags, tracetranslator.TagInstrumentationName) + } + if value, ok := tags[tracetranslator.TagInstrumentationVersion]; ok { + library.SetVersion(value) + delete(tags, tracetranslator.TagInstrumentationVersion) + } +} + +func copySpanTags(tags map[string]string) map[string]string { + dest := make(map[string]string, len(tags)) + for key, val := range tags { + dest[key] = val + } + return dest +} + +func extractLocalServiceName(zspan *zipkinmodel.SpanModel) string { + if zspan == nil || zspan.LocalEndpoint == nil || zspan.LocalEndpoint.ServiceName == "" { + return tracetranslator.ResourceNoServiceName + } + return zspan.LocalEndpoint.ServiceName +} + +func extractInstrumentationLibrary(zspan *zipkinmodel.SpanModel) string { + if zspan == nil || len(zspan.Tags) == 0 { + return "" + } + return zspan.Tags[tracetranslator.TagInstrumentationName] +} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces_test.go b/internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces_test.go new file mode 100644 index 00000000000..eb81a52866a --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces_test.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "testing" + "time" + + zipkinmodel "github.com/openzipkin/zipkin-go/model" + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/translator/conventions" +) + +func TestZipkinSpansToInternalTraces(t *testing.T) { + tests := []struct { + name string + zs []*zipkinmodel.SpanModel + td pdata.Traces + err error + }{ + { + name: "empty", + zs: make([]*zipkinmodel.SpanModel, 0), + td: testdata.GenerateTraceDataEmpty(), + err: nil, + }, + { + name: "nilSpan", + zs: generateNilSpan(), + td: testdata.GenerateTraceDataEmpty(), + err: nil, + }, + { + name: "minimalSpan", + zs: generateSpanNoEndpoints(), + td: generateTraceSingleSpanNoResourceOrInstrLibrary(), + err: nil, + }, + { + name: "onlyLocalEndpointSpan", + zs: generateSpanNoTags(), + td: generateTraceSingleSpanMinmalResource(), + err: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + td, err := V2SpansToInternalTraces(test.zs, false) + assert.EqualValues(t, test.err, err) + if test.name != "nilSpan" { + assert.Equal(t, len(test.zs), td.SpanCount()) + } + assert.EqualValues(t, test.td, td) + }) + } +} + +func generateNilSpan() []*zipkinmodel.SpanModel { + return make([]*zipkinmodel.SpanModel, 1) +} + +func generateSpanNoEndpoints() []*zipkinmodel.SpanModel { + spans := make([]*zipkinmodel.SpanModel, 1) + spans[0] = &zipkinmodel.SpanModel{ + SpanContext: zipkinmodel.SpanContext{ + TraceID: convertTraceID( + pdata.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), + ID: convertSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + }, + Name: "MinimalData", + Kind: zipkinmodel.Client, + Timestamp: time.Unix(1596911098, 294000000), + Duration: 1000000, + Shared: false, + LocalEndpoint: nil, + RemoteEndpoint: nil, + Annotations: nil, + Tags: nil, + } + return spans +} + +func generateSpanNoTags() []*zipkinmodel.SpanModel { + spans := generateSpanNoEndpoints() + spans[0].LocalEndpoint = &zipkinmodel.Endpoint{ServiceName: "SoleAttr"} + return spans +} + +func generateTraceSingleSpanNoResourceOrInstrLibrary() pdata.Traces { + td := pdata.NewTraces() + td.ResourceSpans().Resize(1) + rs := td.ResourceSpans().At(0) + rs.InstrumentationLibrarySpans().Resize(1) + ils := rs.InstrumentationLibrarySpans().At(0) + ils.Spans().Resize(1) + span := ils.Spans().At(0) + span.SetTraceID( + pdata.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) + span.SetSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) + span.SetName("MinimalData") + span.SetKind(pdata.SpanKindCLIENT) + span.SetStartTime(1596911098294000000) + span.SetEndTime(1596911098295000000) + span.Attributes().InitEmptyWithCapacity(0) + return td +} + +func generateTraceSingleSpanMinmalResource() pdata.Traces { + td := generateTraceSingleSpanNoResourceOrInstrLibrary() + rs := td.ResourceSpans().At(0) + rsc := rs.Resource() + rsc.Attributes().InitEmptyWithCapacity(1) + rsc.Attributes().UpsertString(conventions.AttributeServiceName, "SoleAttr") + return td +} diff --git a/main.go b/main.go index 9b21eba3b5d..e81afade953 100644 --- a/main.go +++ b/main.go @@ -18,6 +18,7 @@ package main //go:generate go run model/modeldecoder/generator/cmd/main.go +//go:generate bash script/vendor_otel.sh import ( "os" diff --git a/script/vendor_otel.sh b/script/vendor_otel.sh new file mode 100644 index 00000000000..1c39c86f8b0 --- /dev/null +++ b/script/vendor_otel.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +set -xe + +go mod edit -dropreplace go.opentelemetry.io/collector +go mod download go.opentelemetry.io/collector + +REPO_ROOT=$(go list -m -f {{.Dir}} github.com/elastic/apm-server) +MIXIN_DIR=$REPO_ROOT/internal/.otel_collector_mixin +TARGET_DIR=$REPO_ROOT/internal/otel_collector +MODULE_DIR=$(go list -m -f {{.Dir}} go.opentelemetry.io/collector) + +rm -fr $TARGET_DIR +mkdir $TARGET_DIR +rsync -cr --no-perms --no-group --chmod=ugo=rwX --delete $MODULE_DIR/* $TARGET_DIR +rsync -cr --no-perms --no-group --chmod=ugo=rwX $MIXIN_DIR/* $TARGET_DIR + +go mod edit -replace go.opentelemetry.io/collector=./internal/otel_collector diff --git a/tests/Dockerfile b/tests/Dockerfile index 20698ec57ed..4cd96dc63d0 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -27,6 +27,7 @@ WORKDIR $HOME COPY go.mod go.sum ./ COPY approvaltest/go.mod approvaltest/go.sum ./approvaltest/ COPY systemtest/go.mod systemtest/go.sum ./systemtest/ +COPY internal/otel_collector/go.mod internal/otel_collector/go.sum ./internal/otel_collector/ RUN go mod download RUN cd approvaltest && go mod download RUN cd systemtest && go mod download diff --git a/tools.go b/tools.go index 64bcf2f3c55..e48cf3d26ff 100644 --- a/tools.go +++ b/tools.go @@ -27,6 +27,7 @@ import ( _ "github.com/jstemmer/go-junit-report" _ "github.com/reviewdog/reviewdog" _ "github.com/t-yuki/gocover-cobertura" + _ "go.elastic.co/go-licence-detector" _ "github.com/elastic/go-licenser" )