diff --git a/Makefile b/Makefile index cab7722c87a..0538e2ac77f 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,7 @@ ocb: # Definitions for ProtoBuf generation. # The source directory for OTLP ProtoBufs. -OPENTELEMETRY_PROTO_SRC_DIR=pdata/internal/opentelemetry-proto +OPENTELEMETRY_PROTO_SRC_DIR?=pdata/internal/opentelemetry-proto # The branch matching the current version of the proto to use OPENTELEMETRY_PROTO_VERSION=v1.3.1 @@ -200,7 +200,7 @@ genproto: genproto-cleanup # Call a sub-make to ensure OPENTELEMETRY_PROTO_FILES is populated $(MAKE) genproto_sub $(MAKE) fmt - $(MAKE) genproto-cleanup + # $(MAKE) genproto-cleanup genproto_sub: @echo Generating code for the following files: @@ -234,8 +234,8 @@ genproto_sub: cp -R $(PROTO_INTERMEDIATE_DIR)/$(PROTO_PACKAGE)/* $(PROTO_TARGET_GEN_DIR)/ rm -rf $(PROTO_INTERMEDIATE_DIR)/go.opentelemetry.io - @rm -rf $(OPENTELEMETRY_PROTO_SRC_DIR)/* - @rm -rf $(OPENTELEMETRY_PROTO_SRC_DIR)/.* > /dev/null 2>&1 || true + #@rm -rf $(OPENTELEMETRY_PROTO_SRC_DIR)/* + #@rm -rf $(OPENTELEMETRY_PROTO_SRC_DIR)/.* > /dev/null 2>&1 || true # Generate structs, functions and tests for pdata package. Must be used after any changes # to proto and after running `make genproto` diff --git a/cmd/mdatagen/internal/loader.go b/cmd/mdatagen/internal/loader.go index b51fc86f141..e0e04216e4f 100644 --- a/cmd/mdatagen/internal/loader.go +++ b/cmd/mdatagen/internal/loader.go @@ -269,6 +269,15 @@ func (t telemetry) Levels() map[string]interface{} { return levels } +type Entity struct { + // Type of the entity. + Type string `mapstructure:"type"` + // Identifying attributes of the entity. + IDAttributes []AttributeName `mapstructure:"id_attributes"` + // Descriptive attributes of the entity. + DescriptiveAttributes []AttributeName `mapstructure:"descriptive_attributes"` +} + type Metadata struct { // Type of the component. Type string `mapstructure:"type"` @@ -284,6 +293,8 @@ type Metadata struct { SemConvVersion string `mapstructure:"sem_conv_version"` // ResourceAttributes that can be emitted by the component. ResourceAttributes map[AttributeName]Attribute `mapstructure:"resource_attributes"` + // Entities associated with the emitted resource attributes. + Entities []Entity `mapstructure:"entities"` // Attributes emitted by one or more metrics. Attributes map[AttributeName]Attribute `mapstructure:"attributes"` // Metrics that can be emitted by the component. diff --git a/cmd/mdatagen/internal/templates/resource.go.tmpl b/cmd/mdatagen/internal/templates/resource.go.tmpl index aa6b8e8b685..4ee3112d224 100644 --- a/cmd/mdatagen/internal/templates/resource.go.tmpl +++ b/cmd/mdatagen/internal/templates/resource.go.tmpl @@ -46,6 +46,22 @@ func (rb *ResourceBuilder) Set{{ $name.Render }}(val {{ $attr.Type.Primitive }}) // Emit returns the built resource and resets the internal builder state. func (rb *ResourceBuilder) Emit() pcommon.Resource { r := rb.res + {{- range $entity := .Entities }} + {{- range $attr := .IDAttributes }} + _, found{{ $attr.Render }} := r.Attributes().Get("{{ $attr }}") + {{- end }} + if {{ range $i, $attr := .IDAttributes }}{{ if $i }}&& {{ end }}found{{ $attr.Render }} {{ end }} { + ref := pcommon.NewResourceEntityRef() + ref.SetType("{{ $entity.Type }}") + ref.IdAttrKeys().Append({{ range $i, $attr := .IDAttributes }}{{ if $i }}, {{ end }}"{{ $attr }}"{{ end }}) + {{- range $attr := .DescriptiveAttributes }} + if _, ok := r.Attributes().Get("{{ $attr }}"); ok { + ref.DescrAttrKeys().Append("{{ $attr }}") + } + {{- end }} + ref.CopyTo(r.Entities().AppendEmpty()) + } + {{- end }} rb.res = pcommon.NewResource() return r } diff --git a/cmd/mdatagen/internal/validate.go b/cmd/mdatagen/internal/validate.go index b784a94fbba..76b4aa5b7fd 100644 --- a/cmd/mdatagen/internal/validate.go +++ b/cmd/mdatagen/internal/validate.go @@ -25,6 +25,9 @@ func (md *Metadata) Validate() error { if err := md.validateMetrics(); err != nil { errs = errors.Join(errs, err) } + if err := md.validateEntities(); err != nil { + errs = errors.Join(errs, err) + } return errs } @@ -96,6 +99,7 @@ func (s *Status) validateStability() error { c != "traces" && c != "logs" && c != "profiles" && + c != "entities" && c != "traces_to_traces" && c != "traces_to_metrics" && c != "traces_to_logs" && @@ -143,6 +147,18 @@ func (md *Metadata) validateMetrics() error { return errs } +func (md *Metadata) validateEntities() error { + var errs error + for _, entity := range md.Entities { + for _, attr := range append(entity.IDAttributes, entity.DescriptiveAttributes...) { + if _, ok := md.ResourceAttributes[attr]; !ok { + errs = errors.Join(errs, fmt.Errorf("undefined resource attribute: %v", attr)) + } + } + } + return errs +} + func (m *Metric) validate() error { var errs error if m.Description == "" { diff --git a/cmd/mdatagen/metadata-schema.yaml b/cmd/mdatagen/metadata-schema.yaml index afd1f09b62a..23662584b99 100644 --- a/cmd/mdatagen/metadata-schema.yaml +++ b/cmd/mdatagen/metadata-schema.yaml @@ -59,6 +59,13 @@ resource_attributes: # Should be used for deprecated optional resource_attributes that will be removed soon. if_configured: +# Optional: list of entities associated with the produced resource. +entities: + - type: string + # Array of attribute names that are used to identify the entity. + id_attributes: [string] + # Optional: array of attribute names that are used to describe the entity. + descriptive_attributes: [string] # Optional: map of attribute definitions with the key being the attribute name and value # being described below. diff --git a/connector/connector.go b/connector/connector.go index 2ae78f26a6b..495830e3ca0 100644 --- a/connector/connector.go +++ b/connector/connector.go @@ -61,6 +61,21 @@ type Logs interface { consumer.Logs } +// An Entities connector acts as an exporter from a logs pipeline and a receiver +// to one or more traces, metrics, or logs pipelines. +// Entities feeds a consumer.Traces, consumer.Metrics, or consumer.Entities with data. +// +// Examples: +// - Structured logs containing span information could be consumed and emitted as traces. +// - Metrics could be extracted from structured logs that contain numeric data. +// - Entities could be collected in one pipeline and routed to another logs pipeline +// based on criteria such as attributes or other content of the log. The second +// pipeline can then process and export the log to the appropriate backend. +type Entities interface { + component.Component + consumer.Entities +} + // Settings configures Connector creators. type Settings struct { // ID returns the ID of the component that will be created. @@ -91,26 +106,42 @@ type Factory interface { CreateTracesToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) CreateTracesToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Traces, error) CreateTracesToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Traces, error) + CreateTracesToEntities(ctx context.Context, set Settings, cfg component.Config, next consumer.Entities) (Traces, error) CreateMetricsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Metrics, error) CreateMetricsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) CreateMetricsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Metrics, error) + CreateMetricsToEntities(ctx context.Context, set Settings, cfg component.Config, next consumer.Entities) (Metrics, error) CreateLogsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Logs, error) CreateLogsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Logs, error) CreateLogsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) + CreateLogsToEntities(ctx context.Context, set Settings, cfg component.Config, next consumer.Entities) (Logs, error) + + CreateEntitiesToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Entities, error) + CreateEntitiesToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Entities, error) + CreateEntitiesToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Entities, error) + CreateEntitiesToEntities(ctx context.Context, set Settings, cfg component.Config, next consumer.Entities) (Entities, error) TracesToTracesStability() component.StabilityLevel TracesToMetricsStability() component.StabilityLevel TracesToLogsStability() component.StabilityLevel + TracesToEntitiesStability() component.StabilityLevel MetricsToTracesStability() component.StabilityLevel MetricsToMetricsStability() component.StabilityLevel MetricsToLogsStability() component.StabilityLevel + MetricsToEntitiesStability() component.StabilityLevel LogsToTracesStability() component.StabilityLevel LogsToMetricsStability() component.StabilityLevel LogsToLogsStability() component.StabilityLevel + LogsToEntitiesStability() component.StabilityLevel + + EntitiesToTracesStability() component.StabilityLevel + EntitiesToMetricsStability() component.StabilityLevel + EntitiesToLogsStability() component.StabilityLevel + EntitiesToEntitiesStability() component.StabilityLevel unexportedFactoryFunc() } @@ -163,6 +194,18 @@ func (f CreateTracesToLogsFunc) CreateTracesToLogs(ctx context.Context, set Sett return f(ctx, set, cfg, next) } +// CreateTracesToEntitiesFunc is the equivalent of Factory.CreateTracesToEntities(). +type CreateTracesToEntitiesFunc func(context.Context, Settings, component.Config, consumer.Entities) (Traces, error) + +// CreateTracesToEntities implements Factory.CreateTracesToEntities(). +func (f CreateTracesToEntitiesFunc) CreateTracesToEntities(ctx context.Context, set Settings, cfg component.Config, + next consumer.Entities) (Traces, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalEntities) + } + return f(ctx, set, cfg, next) +} + // CreateMetricsToTracesFunc is the equivalent of Factory.CreateMetricsToTraces(). type CreateMetricsToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Metrics, error) @@ -196,6 +239,18 @@ func (f CreateMetricsToLogsFunc) CreateMetricsToLogs(ctx context.Context, set Se return f(ctx, set, cfg, next) } +// CreateMetricsToEntitiesFunc is the equivalent of Factory.CreateMetricsToEntities(). +type CreateMetricsToEntitiesFunc func(context.Context, Settings, component.Config, consumer.Entities) (Metrics, error) + +// CreateMetricsToEntities implements Factory.CreateMetricsToEntities(). +func (f CreateMetricsToEntitiesFunc) CreateMetricsToEntities(ctx context.Context, set Settings, cfg component.Config, + next consumer.Entities) (Metrics, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalEntities) + } + return f(ctx, set, cfg, next) +} + // CreateLogsToTracesFunc is the equivalent of Factory.CreateLogsToTraces(). type CreateLogsToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Logs, error) @@ -229,6 +284,66 @@ func (f CreateLogsToLogsFunc) CreateLogsToLogs(ctx context.Context, set Settings return f(ctx, set, cfg, next) } +// CreateLogsToEntitiesFunc is the equivalent of Factory.CreateLogsToEntities(). +type CreateLogsToEntitiesFunc func(context.Context, Settings, component.Config, consumer.Entities) (Logs, error) + +// CreateLogsToEntities implements Factory.CreateLogsToEntities(). +func (f CreateLogsToEntitiesFunc) CreateLogsToEntities(ctx context.Context, set Settings, cfg component.Config, + next consumer.Entities) (Logs, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalEntities) + } + return f(ctx, set, cfg, next) +} + +// CreateEntitiesToTracesFunc is the equivalent of Factory.CreateEntitiesToTraces(). +type CreateEntitiesToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Entities, error) + +// CreateEntitiesToTraces implements Factory.CreateEntitiesToTraces(). +func (f CreateEntitiesToTracesFunc) CreateEntitiesToTraces(ctx context.Context, set Settings, cfg component.Config, + next consumer.Traces) (Entities, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalEntities, pipeline.SignalTraces) + } + return f(ctx, set, cfg, next) +} + +// CreateEntitiesToMetricsFunc is the equivalent of Factory.CreateEntitiesToMetrics(). +type CreateEntitiesToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Entities, error) + +// CreateEntitiesToMetrics implements Factory.CreateEntitiesToMetrics(). +func (f CreateEntitiesToMetricsFunc) CreateEntitiesToMetrics(ctx context.Context, set Settings, cfg component.Config, + next consumer.Metrics) (Entities, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalEntities, pipeline.SignalMetrics) + } + return f(ctx, set, cfg, next) +} + +// CreateEntitiesToLogsFunc is the equivalent of Factory.CreateEntitiesToLogs(). +type CreateEntitiesToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Entities, error) + +// CreateEntitiesToLogs implements Factory.CreateEntitiesToLogs(). +func (f CreateEntitiesToLogsFunc) CreateEntitiesToLogs(ctx context.Context, set Settings, cfg component.Config, + next consumer.Logs) (Entities, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalEntities, pipeline.SignalLogs) + } + return f(ctx, set, cfg, next) +} + +// CreateEntitiesToEntitiesFunc is the equivalent of Factory.CreateEntitiesToLogs(). +type CreateEntitiesToEntitiesFunc func(context.Context, Settings, component.Config, consumer.Entities) (Entities, error) + +// CreateEntitiesToEntities implements Factory.CreateEntitiesToEntities(). +func (f CreateEntitiesToEntitiesFunc) CreateEntitiesToEntities(ctx context.Context, set Settings, cfg component.Config, + next consumer.Entities) (Entities, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalEntities, pipeline.SignalLogs) + } + return f(ctx, set, cfg, next) +} + // WithTracesToTraces overrides the default "error not supported" implementation for WithTracesToTraces and the default "undefined" stability level. func WithTracesToTraces(createTracesToTraces CreateTracesToTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { @@ -309,26 +424,42 @@ type factory struct { CreateTracesToTracesFunc CreateTracesToMetricsFunc CreateTracesToLogsFunc + CreateTracesToEntitiesFunc CreateMetricsToTracesFunc CreateMetricsToMetricsFunc CreateMetricsToLogsFunc + CreateMetricsToEntitiesFunc CreateLogsToTracesFunc CreateLogsToMetricsFunc CreateLogsToLogsFunc + CreateLogsToEntitiesFunc + + CreateEntitiesToTracesFunc + CreateEntitiesToMetricsFunc + CreateEntitiesToLogsFunc + CreateEntitiesToEntitiesFunc + + tracesToTracesStabilityLevel component.StabilityLevel + tracesToMetricsStabilityLevel component.StabilityLevel + tracesToLogsStabilityLevel component.StabilityLevel + tracesToEntitiesStabilityLevel component.StabilityLevel - tracesToTracesStabilityLevel component.StabilityLevel - tracesToMetricsStabilityLevel component.StabilityLevel - tracesToLogsStabilityLevel component.StabilityLevel + metricsToTracesStabilityLevel component.StabilityLevel + metricsToMetricsStabilityLevel component.StabilityLevel + metricsToLogsStabilityLevel component.StabilityLevel + metricsToEntitiesStabilityLevel component.StabilityLevel - metricsToTracesStabilityLevel component.StabilityLevel - metricsToMetricsStabilityLevel component.StabilityLevel - metricsToLogsStabilityLevel component.StabilityLevel + logsToTracesStabilityLevel component.StabilityLevel + logsToMetricsStabilityLevel component.StabilityLevel + logsToLogsStabilityLevel component.StabilityLevel + logsToEntitiesStabilityLevel component.StabilityLevel - logsToTracesStabilityLevel component.StabilityLevel - logsToMetricsStabilityLevel component.StabilityLevel - logsToLogsStabilityLevel component.StabilityLevel + entitiesToTracesStabilityLevel component.StabilityLevel + entitiesToMetricsStabilityLevel component.StabilityLevel + entitiesToLogsStabilityLevel component.StabilityLevel + entitiesToEntitiesStabilityLevel component.StabilityLevel } // Type returns the type of component. @@ -350,6 +481,10 @@ func (f *factory) TracesToLogsStability() component.StabilityLevel { return f.tracesToLogsStabilityLevel } +func (f *factory) TracesToEntitiesStability() component.StabilityLevel { + return f.tracesToEntitiesStabilityLevel +} + func (f *factory) MetricsToTracesStability() component.StabilityLevel { return f.metricsToTracesStabilityLevel } @@ -362,6 +497,10 @@ func (f *factory) MetricsToLogsStability() component.StabilityLevel { return f.metricsToLogsStabilityLevel } +func (f *factory) MetricsToEntitiesStability() component.StabilityLevel { + return f.metricsToEntitiesStabilityLevel +} + func (f *factory) LogsToTracesStability() component.StabilityLevel { return f.logsToTracesStabilityLevel } @@ -374,6 +513,26 @@ func (f *factory) LogsToLogsStability() component.StabilityLevel { return f.logsToLogsStabilityLevel } +func (f *factory) LogsToEntitiesStability() component.StabilityLevel { + return f.logsToEntitiesStabilityLevel +} + +func (f *factory) EntitiesToTracesStability() component.StabilityLevel { + return f.entitiesToTracesStabilityLevel +} + +func (f *factory) EntitiesToMetricsStability() component.StabilityLevel { + return f.entitiesToMetricsStabilityLevel +} + +func (f *factory) EntitiesToLogsStability() component.StabilityLevel { + return f.entitiesToLogsStabilityLevel +} + +func (f *factory) EntitiesToEntitiesStability() component.StabilityLevel { + return f.entitiesToEntitiesStabilityLevel +} + // NewFactory returns a Factory. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { f := &factory{ diff --git a/connector/entities_router.go b/connector/entities_router.go new file mode 100644 index 00000000000..0c117730a4c --- /dev/null +++ b/connector/entities_router.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package connector // import "go.opentelemetry.io/collector/connector" + +import ( + "fmt" + + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/connector/internal" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" +) + +// EntitiesRouterAndConsumer feeds the first consumer.Entities in each of the specified pipelines. +type EntitiesRouterAndConsumer interface { + consumer.Entities + Consumer(...pipeline.ID) (consumer.Entities, error) + PipelineIDs() []pipeline.ID + privateFunc() +} + +type entitiesRouter struct { + consumer.Entities + internal.BaseRouter[consumer.Entities] +} + +func NewEntitiesRouter(cm map[pipeline.ID]consumer.Entities) EntitiesRouterAndConsumer { + consumers := make([]consumer.Entities, 0, len(cm)) + for _, cons := range cm { + consumers = append(consumers, cons) + } + return &entitiesRouter{ + Entities: fanoutconsumer.NewEntities(consumers), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewEntities, cm), + } +} + +func (r *entitiesRouter) PipelineIDs() []pipeline.ID { + ids := make([]pipeline.ID, 0, len(r.Consumers)) + for id := range r.Consumers { + ids = append(ids, id) + } + return ids +} + +func (r *entitiesRouter) Consumer(pipelineIDs ...pipeline.ID) (consumer.Entities, error) { + if len(pipelineIDs) == 0 { + return nil, fmt.Errorf("missing consumers") + } + consumers := make([]consumer.Entities, 0, len(pipelineIDs)) + var errors error + for _, pipelineID := range pipelineIDs { + c, ok := r.Consumers[pipelineID] + if ok { + consumers = append(consumers, c) + } else { + errors = multierr.Append(errors, fmt.Errorf("missing consumer: %q", pipelineID)) + } + } + if errors != nil { + // TODO potentially this could return a NewEntities with the valid consumers + return nil, errors + } + return fanoutconsumer.NewEntities(consumers), nil +} + +func (r *entitiesRouter) privateFunc() {} diff --git a/consumer/consumererror/internal/retryable.go b/consumer/consumererror/internal/retryable.go index feed1bc5bc7..f4faa418088 100644 --- a/consumer/consumererror/internal/retryable.go +++ b/consumer/consumererror/internal/retryable.go @@ -4,13 +4,14 @@ package internal // import "go.opentelemetry.io/collector/consumer/consumererror/internal" import ( + "go.opentelemetry.io/collector/pdata/pentity" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) -type Retryable[V ptrace.Traces | pmetric.Metrics | plog.Logs | pprofile.Profiles] struct { +type Retryable[V ptrace.Traces | pmetric.Metrics | plog.Logs | pprofile.Profiles | pentity.Entities] struct { Err error Value V } diff --git a/consumer/consumererror/signalerrors.go b/consumer/consumererror/signalerrors.go index 69af253dae7..d0b34e75f29 100644 --- a/consumer/consumererror/signalerrors.go +++ b/consumer/consumererror/signalerrors.go @@ -5,6 +5,7 @@ package consumererror // import "go.opentelemetry.io/collector/consumer/consumer import ( "go.opentelemetry.io/collector/consumer/consumererror/internal" + "go.opentelemetry.io/collector/pdata/pentity" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" @@ -57,3 +58,19 @@ func NewMetrics(err error, data pmetric.Metrics) error { }, } } + +// Entities is an error that may carry associated Log data for a subset of received data +// that failed to be processed or sent. +type Entities struct { + internal.Retryable[pentity.Entities] +} + +// NewEntities creates a Entities that can encapsulate received data that failed to be processed or sent. +func NewEntities(err error, data pentity.Entities) error { + return Entities{ + Retryable: internal.Retryable[pentity.Entities]{ + Err: err, + Value: data, + }, + } +} diff --git a/consumer/consumertest/consumer.go b/consumer/consumertest/consumer.go index 4b699b9449a..78f3bf3d500 100644 --- a/consumer/consumertest/consumer.go +++ b/consumer/consumertest/consumer.go @@ -8,6 +8,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumerprofiles" + "go.opentelemetry.io/collector/pdata/pentity" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pprofile" @@ -34,6 +35,9 @@ type Consumer interface { // ConsumeProfiles to implement the consumerprofiles.Profiles. ConsumeProfiles(context.Context, pprofile.Profiles) error + // ConsumeEntities to implement the consumer.Entities. + ConsumeEntities(context.Context, pentity.Entities) error + unexported() } @@ -41,6 +45,7 @@ var _ consumer.Logs = (Consumer)(nil) var _ consumer.Metrics = (Consumer)(nil) var _ consumer.Traces = (Consumer)(nil) var _ consumerprofiles.Profiles = (Consumer)(nil) +var _ consumer.Entities = (Consumer)(nil) type nonMutatingConsumer struct{} @@ -55,6 +60,7 @@ type baseConsumer struct { consumer.ConsumeMetricsFunc consumer.ConsumeLogsFunc consumerprofiles.ConsumeProfilesFunc + consumer.ConsumeEntitiesFunc } func (bc baseConsumer) unexported() {} diff --git a/consumer/consumertest/nop.go b/consumer/consumertest/nop.go index 25b898a7751..a36e5014333 100644 --- a/consumer/consumertest/nop.go +++ b/consumer/consumertest/nop.go @@ -6,6 +6,7 @@ package consumertest // import "go.opentelemetry.io/collector/consumer/consumert import ( "context" + "go.opentelemetry.io/collector/pdata/pentity" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pprofile" @@ -19,5 +20,6 @@ func NewNop() Consumer { ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return nil }, ConsumeLogsFunc: func(context.Context, plog.Logs) error { return nil }, ConsumeProfilesFunc: func(context.Context, pprofile.Profiles) error { return nil }, + ConsumeEntitiesFunc: func(context.Context, pentity.Entities) error { return nil }, } } diff --git a/consumer/entities.go b/consumer/entities.go new file mode 100644 index 00000000000..284488c8bce --- /dev/null +++ b/consumer/entities.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumer // import "go.opentelemetry.io/collector/consumer" + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/internal" + "go.opentelemetry.io/collector/pdata/pentity" +) + +// Entities is an interface that receives pentity.Entities, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Entities interface { + internal.BaseConsumer + // ConsumeEntities receives pentity.Entities for consumption. + ConsumeEntities(ctx context.Context, td pentity.Entities) error +} + +// ConsumeEntitiesFunc is a helper function that is similar to ConsumeEntities. +type ConsumeEntitiesFunc func(ctx context.Context, td pentity.Entities) error + +// ConsumeEntities calls f(ctx, td). +func (f ConsumeEntitiesFunc) ConsumeEntities(ctx context.Context, td pentity.Entities) error { + return f(ctx, td) +} + +type baseEntities struct { + *internal.BaseImpl + ConsumeEntitiesFunc +} + +// NewEntities returns a Entities configured with the provided options. +func NewEntities(consume ConsumeEntitiesFunc, options ...Option) (Entities, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseEntities{ + BaseImpl: internal.NewBaseImpl(options...), + ConsumeEntitiesFunc: consume, + }, nil +} diff --git a/exporter/debugexporter/README.md b/exporter/debugexporter/README.md index d9cb1304b39..b0d916b4483 100644 --- a/exporter/debugexporter/README.md +++ b/exporter/debugexporter/README.md @@ -3,7 +3,7 @@ | Status | | | ------------- |-----------| -| Stability | [development]: traces, metrics, logs | +| Stability | [development]: traces, metrics, logs, entities | | Distributions | [core], [contrib], [k8s] | | Warnings | [Unstable Output Format](#warnings) | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fdebug%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fdebug) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fdebug%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fdebug) | diff --git a/exporter/debugexporter/exporter.go b/exporter/debugexporter/exporter.go index a4ffe6f16f2..5a465a4802f 100644 --- a/exporter/debugexporter/exporter.go +++ b/exporter/debugexporter/exporter.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/exporter/debugexporter/internal/normal" "go.opentelemetry.io/collector/exporter/debugexporter/internal/otlptext" + "go.opentelemetry.io/collector/pdata/pentity" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" @@ -22,12 +23,15 @@ type debugExporter struct { logsMarshaler plog.Marshaler metricsMarshaler pmetric.Marshaler tracesMarshaler ptrace.Marshaler + entityMarshaler pentity.Marshaler } func newDebugExporter(logger *zap.Logger, verbosity configtelemetry.Level) *debugExporter { var logsMarshaler plog.Marshaler var metricsMarshaler pmetric.Marshaler var tracesMarshaler ptrace.Marshaler + // TODO: Implement separate entity marshalers + entityMarshaler := otlptext.NewTextEntitiesMarshaler() if verbosity == configtelemetry.LevelDetailed { logsMarshaler = otlptext.NewTextLogsMarshaler() metricsMarshaler = otlptext.NewTextMetricsMarshaler() @@ -43,6 +47,7 @@ func newDebugExporter(logger *zap.Logger, verbosity configtelemetry.Level) *debu logsMarshaler: logsMarshaler, metricsMarshaler: metricsMarshaler, tracesMarshaler: tracesMarshaler, + entityMarshaler: entityMarshaler, } } @@ -95,3 +100,20 @@ func (s *debugExporter) pushLogs(_ context.Context, ld plog.Logs) error { s.logger.Info(string(buf)) return nil } + +func (s *debugExporter) pushEntities(_ context.Context, ld pentity.Entities) error { + s.logger.Info("Entities", + zap.Int("resource entities", ld.ResourceEntities().Len()), + zap.Int("entities", ld.EntityCount())) + + if s.verbosity == configtelemetry.LevelBasic { + return nil + } + + buf, err := s.entityMarshaler.MarshalEntities(ld) + if err != nil { + return err + } + s.logger.Info(string(buf)) + return nil +} diff --git a/exporter/debugexporter/factory.go b/exporter/debugexporter/factory.go index 15f6407e928..0e985af6f28 100644 --- a/exporter/debugexporter/factory.go +++ b/exporter/debugexporter/factory.go @@ -35,6 +35,7 @@ func NewFactory() exporter.Factory { exporter.WithTraces(createTraces, metadata.TracesStability), exporter.WithMetrics(createMetrics, metadata.MetricsStability), exporter.WithLogs(createLogs, metadata.LogsStability), + exporter.WithEntities(createEntities, metadata.EntitiesStability), ) } @@ -83,6 +84,18 @@ func createLogs(ctx context.Context, set exporter.Settings, config component.Con ) } +func createEntities(ctx context.Context, set exporter.Settings, config component.Config) (exporter.Entities, error) { + cfg := config.(*Config) + exporterLogger := createLogger(cfg, set.TelemetrySettings.Logger) + debug := newDebugExporter(exporterLogger, cfg.Verbosity) + return exporterhelper.NewEntities(ctx, set, config, + debug.pushEntities, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), + exporterhelper.WithShutdown(otlptext.LoggerSync(exporterLogger)), + ) +} + func createLogger(cfg *Config, logger *zap.Logger) *zap.Logger { var exporterLogger *zap.Logger if cfg.UseInternalLogger { diff --git a/exporter/debugexporter/internal/metadata/generated_status.go b/exporter/debugexporter/internal/metadata/generated_status.go index b4588f75200..a27472c9ce6 100644 --- a/exporter/debugexporter/internal/metadata/generated_status.go +++ b/exporter/debugexporter/internal/metadata/generated_status.go @@ -12,7 +12,8 @@ var ( ) const ( - TracesStability = component.StabilityLevelDevelopment - MetricsStability = component.StabilityLevelDevelopment - LogsStability = component.StabilityLevelDevelopment + TracesStability = component.StabilityLevelDevelopment + MetricsStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelDevelopment + EntitiesStability = component.StabilityLevelDevelopment ) diff --git a/exporter/debugexporter/internal/otlptext/databuffer.go b/exporter/debugexporter/internal/otlptext/databuffer.go index 4e73794da7a..ef46dc3df7a 100644 --- a/exporter/debugexporter/internal/otlptext/databuffer.go +++ b/exporter/debugexporter/internal/otlptext/databuffer.go @@ -440,6 +440,22 @@ func linkTableToMap(ls pprofile.LinkSlice) pcommon.Map { return m } +func (b *dataBuffer) logResourceEntities(rers pcommon.ResourceEntityRefSlice) { + if rers.Len() == 0 { + return + } + + b.logEntry("Entities:") + for i := 0; i < rers.Len(); i++ { + rer := rers.At(i) + b.logEntry("Entity Ref #%d", i) + b.logEntry(" -> Entity Type: %s", rer.Type()) + b.logEntry(" -> SchemaURL: %s", rer.SchemaUrl()) + b.logEntry(" -> Identifying Attributes: %s", strings.Join(rer.IdAttrKeys().AsRaw(), ", ")) + b.logEntry(" -> Descriptive Attributes: %s", strings.Join(rer.DescrAttrKeys().AsRaw(), ", ")) + } +} + func valueToString(v pcommon.Value) string { return fmt.Sprintf("%s(%s)", v.Type().String(), v.AsString()) } diff --git a/exporter/debugexporter/internal/otlptext/entities.go b/exporter/debugexporter/internal/otlptext/entities.go new file mode 100644 index 00000000000..c32ae1a4e9e --- /dev/null +++ b/exporter/debugexporter/internal/otlptext/entities.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptext // import "go.opentelemetry.io/collector/exporter/debugexporter/internal/otlptext" + +import "go.opentelemetry.io/collector/pdata/pentity" + +// NewTextEntitiesMarshaler returns a pentity.Marshaler to encode to OTLP text bytes. +func NewTextEntitiesMarshaler() pentity.Marshaler { + return textEntitiesMarshaler{} +} + +type textEntitiesMarshaler struct{} + +// MarshalEntities pentity.Entities to OTLP text. +func (textEntitiesMarshaler) MarshalEntities(ld pentity.Entities) ([]byte, error) { + buf := dataBuffer{} + rls := ld.ResourceEntities() + for i := 0; i < rls.Len(); i++ { + buf.logEntry("ResourceEntity #%d", i) + rl := rls.At(i) + buf.logEntry("Resource SchemaURL: %s", rl.SchemaUrl()) + marshalResource(rl.Resource(), &buf) + ills := rl.ScopeEntities() + for j := 0; j < ills.Len(); j++ { + buf.logEntry("ScopeEntities #%d", j) + ils := ills.At(j) + buf.logEntry("ScopeEntities SchemaURL: %s", ils.SchemaUrl()) + buf.logInstrumentationScope(ils.Scope()) + + logs := ils.EntityEvents() + for k := 0; k < logs.Len(); k++ { + buf.logEntry("EntityEvent #%d", k) + e := logs.At(k) + buf.logEntry("EntityType: %s", e.EntityType()) + buf.logEntry("Timestamp: %s", e.Timestamp()) + buf.logAttributes("IDAttributes", e.Id()) + buf.logEntry("Event Type: %s", e.Type()) + if e.Type() == pentity.EventTypeEntityState { + buf.logAttributes("Attributes", e.EntityState().Attributes()) + } + } + } + } + + return buf.buf.Bytes(), nil +} diff --git a/exporter/debugexporter/internal/otlptext/logs.go b/exporter/debugexporter/internal/otlptext/logs.go index 399e9f00d39..7d7aa875780 100644 --- a/exporter/debugexporter/internal/otlptext/logs.go +++ b/exporter/debugexporter/internal/otlptext/logs.go @@ -22,7 +22,7 @@ func (textLogsMarshaler) MarshalLogs(ld plog.Logs) ([]byte, error) { buf.logEntry("ResourceLog #%d", i) rl := rls.At(i) buf.logEntry("Resource SchemaURL: %s", rl.SchemaUrl()) - buf.logAttributes("Resource attributes", rl.Resource().Attributes()) + marshalResource(rl.Resource(), &buf) ills := rl.ScopeLogs() for j := 0; j < ills.Len(); j++ { buf.logEntry("ScopeLogs #%d", j) diff --git a/exporter/debugexporter/internal/otlptext/metrics.go b/exporter/debugexporter/internal/otlptext/metrics.go index 489d70925b5..870ece5075a 100644 --- a/exporter/debugexporter/internal/otlptext/metrics.go +++ b/exporter/debugexporter/internal/otlptext/metrics.go @@ -20,7 +20,7 @@ func (textMetricsMarshaler) MarshalMetrics(md pmetric.Metrics) ([]byte, error) { buf.logEntry("ResourceMetrics #%d", i) rm := rms.At(i) buf.logEntry("Resource SchemaURL: %s", rm.SchemaUrl()) - buf.logAttributes("Resource attributes", rm.Resource().Attributes()) + marshalResource(rm.Resource(), &buf) ilms := rm.ScopeMetrics() for j := 0; j < ilms.Len(); j++ { buf.logEntry("ScopeMetrics #%d", j) diff --git a/exporter/debugexporter/internal/otlptext/resource.go b/exporter/debugexporter/internal/otlptext/resource.go new file mode 100644 index 00000000000..549aa1cbc8a --- /dev/null +++ b/exporter/debugexporter/internal/otlptext/resource.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptext // import "go.opentelemetry.io/collector/exporter/debugexporter/internal/otlptext" + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func marshalResource(res pcommon.Resource, buf *dataBuffer) { + buf.logAttributes("Resource attributes", res.Attributes()) + buf.logResourceEntities(res.Entities()) +} diff --git a/exporter/debugexporter/internal/otlptext/traces.go b/exporter/debugexporter/internal/otlptext/traces.go index 90d5c400a4f..898aa7b72d5 100644 --- a/exporter/debugexporter/internal/otlptext/traces.go +++ b/exporter/debugexporter/internal/otlptext/traces.go @@ -22,7 +22,7 @@ func (textTracesMarshaler) MarshalTraces(td ptrace.Traces) ([]byte, error) { buf.logEntry("ResourceSpans #%d", i) rs := rss.At(i) buf.logEntry("Resource SchemaURL: %s", rs.SchemaUrl()) - buf.logAttributes("Resource attributes", rs.Resource().Attributes()) + marshalResource(rs.Resource(), &buf) ilss := rs.ScopeSpans() for j := 0; j < ilss.Len(); j++ { buf.logEntry("ScopeSpans #%d", j) diff --git a/exporter/debugexporter/metadata.yaml b/exporter/debugexporter/metadata.yaml index 2d927eff8dc..2c53346c2f4 100644 --- a/exporter/debugexporter/metadata.yaml +++ b/exporter/debugexporter/metadata.yaml @@ -4,6 +4,6 @@ github_project: open-telemetry/opentelemetry-collector status: class: exporter stability: - development: [traces, metrics, logs] + development: [traces, metrics, logs, entities] distributions: [core, contrib, k8s] warnings: [Unstable Output Format] diff --git a/exporter/exporter.go b/exporter/exporter.go index 98329fdcf05..9c3c04028d7 100644 --- a/exporter/exporter.go +++ b/exporter/exporter.go @@ -30,6 +30,12 @@ type Logs interface { consumer.Logs } +// Entities is an exporter that can consume entities. +type Entities interface { + component.Component + consumer.Entities +} + // Settings configures exporter creators. type Settings struct { // ID returns the ID of the component that will be created. @@ -90,6 +96,14 @@ type Factory interface { // Deprecated: [v0.112.0] use LogsStability. LogsExporterStability() component.StabilityLevel + // CreateEntities creates an EntitiesExporter based on the config. + // If the exporter type does not support entities, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateEntities(ctx context.Context, set Settings, cfg component.Config) (Entities, error) + + // EntitiesStability gets the stability level of the EntityExporter. + EntitiesStability() component.StabilityLevel + unexportedFactoryFunc() } @@ -156,6 +170,17 @@ func (f CreateLogsFunc) CreateLogsExporter(ctx context.Context, set Settings, cf return f.CreateLogs(ctx, set, cfg) } +// CreateEntitiesFunc is the equivalent of Factory.CreateEntities. +type CreateEntitiesFunc func(context.Context, Settings, component.Config) (Entities, error) + +// CreateEntities implements Factory.CreateEntities. +func (f CreateEntitiesFunc) CreateEntities(ctx context.Context, set Settings, cfg component.Config) (Entities, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg) +} + type factory struct { cfgType component.Type component.CreateDefaultConfigFunc @@ -165,6 +190,8 @@ type factory struct { metricsStabilityLevel component.StabilityLevel CreateLogsFunc logsStabilityLevel component.StabilityLevel + CreateEntitiesFunc + entitiesStabilityLevel component.StabilityLevel } func (f *factory) Type() component.Type { @@ -200,7 +227,11 @@ func (f *factory) LogsExporterStability() component.StabilityLevel { return f.logsStabilityLevel } -// WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. +func (f *factory) EntitiesStability() component.StabilityLevel { + return f.entitiesStabilityLevel +} + +// WithTraces overrides the default "error not supported" implementation for CreateTracesExporter and the default "undefined" stability level. func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.tracesStabilityLevel = sl @@ -224,6 +255,14 @@ func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOpt }) } +// WithEntities overrides the default "error not supported" implementation for CreateEntities and the default "undefined" stability level. +func WithEntities(createEntities CreateEntitiesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.entitiesStabilityLevel = sl + o.CreateEntitiesFunc = createEntities + }) +} + // NewFactory returns a Factory. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { f := &factory{ diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index b849b3a05e6..1a26368ad8b 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -26,8 +26,10 @@ func TestNewFactory(t *testing.T) { require.Error(t, err) _, err = factory.CreateMetrics(context.Background(), Settings{}, &defaultCfg) require.Error(t, err) - _, err = factory.CreateLogs(context.Background(), Settings{}, &defaultCfg) - assert.Error(t, err) + _, err = factory.CreateLogsExporter(context.Background(), Settings{}, &defaultCfg) + require.Error(t, err) + _, err = factory.CreateEntities(context.Background(), Settings{}, &defaultCfg) + require.Error(t, err) } func TestNewFactoryWithOptions(t *testing.T) { diff --git a/exporter/exporterhelper/constants.go b/exporter/exporterhelper/constants.go index 57829f08c04..946394cd618 100644 --- a/exporter/exporterhelper/constants.go +++ b/exporter/exporterhelper/constants.go @@ -18,10 +18,14 @@ var ( errNilPushMetricsData = errors.New("nil PushMetrics") // errNilPushLogsData is returned when a nil PushLogs is given. errNilPushLogsData = errors.New("nil PushLogs") + // errNilPushEntitiesData is returned when a nil PushEntities is given. + errNilPushEntitiesData = errors.New("nil PushEntities") // errNilTracesConverter is returned when a nil RequestFromTracesFunc is given. errNilTracesConverter = errors.New("nil RequestFromTracesFunc") // errNilMetricsConverter is returned when a nil RequestFromMetricsFunc is given. errNilMetricsConverter = errors.New("nil RequestFromMetricsFunc") // errNilLogsConverter is returned when a nil RequestFromLogsFunc is given. errNilLogsConverter = errors.New("nil RequestFromLogsFunc") + // errNilEntitiesConverter is returned when a nil RequestFromEntitiesFunc is given. + errNilEntitiesConverter = errors.New("nil RequestFromEntitiesFunc") ) diff --git a/exporter/exporterhelper/entities.go b/exporter/exporterhelper/entities.go new file mode 100644 index 00000000000..4f6b8c22304 --- /dev/null +++ b/exporter/exporterhelper/entities.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" + +import ( + "context" + "errors" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal/queue" + "go.opentelemetry.io/collector/pdata/pentity" + "go.opentelemetry.io/collector/pipeline" +) + +var entitiesMarshaler = &pentity.ProtoMarshaler{} +var entitiesUnmarshaler = &pentity.ProtoUnmarshaler{} + +type entitiesRequest struct { + ld pentity.Entities + pusher consumer.ConsumeEntitiesFunc +} + +func newEntitiesRequest(ld pentity.Entities, pusher consumer.ConsumeEntitiesFunc) Request { + return &entitiesRequest{ + ld: ld, + pusher: pusher, + } +} + +// Merge merges the provided entities request into the current request and returns the merged request. +func (req *entitiesRequest) Merge(context.Context, Request) (Request, error) { + // TODO: Implement this method + return req, nil +} + +// MergeSplit splits and/or merges the provided entities request and the current request into one or more requests +// conforming with the MaxSizeConfig. +func (req *entitiesRequest) MergeSplit(context.Context, exporterbatcher.MaxSizeConfig, Request) ([]Request, error) { + // TODO: Implement this method + return nil, nil +} + +func newEntitiesRequestUnmarshalerFunc(pusher consumer.ConsumeEntitiesFunc) exporterqueue.Unmarshaler[Request] { + return func(bytes []byte) (Request, error) { + entities, err := entitiesUnmarshaler.UnmarshalEntities(bytes) + if err != nil { + return nil, err + } + return newEntitiesRequest(entities, pusher), nil + } +} + +func entitiesRequestMarshaler(req Request) ([]byte, error) { + return entitiesMarshaler.MarshalEntities(req.(*entitiesRequest).ld) +} + +func (req *entitiesRequest) OnError(err error) Request { + var eError consumererror.Entities + if errors.As(err, &eError) { + return newEntitiesRequest(eError.Data(), req.pusher) + } + return req +} + +func (req *entitiesRequest) Export(ctx context.Context) error { + return req.pusher(ctx, req.ld) +} + +func (req *entitiesRequest) ItemsCount() int { + return req.ld.EntityCount() +} + +type entitiesExporter struct { + *internal.BaseExporter + consumer.Entities +} + +// NewEntities creates an exporter.Entities that records observability metrics and wraps every request with a Span. +func NewEntities( + ctx context.Context, + set exporter.Settings, + cfg component.Config, + pusher consumer.ConsumeEntitiesFunc, + options ...Option, +) (exporter.Entities, error) { + if cfg == nil { + return nil, errNilConfig + } + if pusher == nil { + return nil, errNilPushEntitiesData + } + entitiesOpts := []Option{ + internal.WithMarshaler(entitiesRequestMarshaler), internal.WithUnmarshaler(newEntitiesRequestUnmarshalerFunc(pusher)), + } + return NewEntitiesRequest(ctx, set, requestFromEntities(pusher), append(entitiesOpts, options...)...) +} + +// RequestFromEntitiesFunc converts pentity.Entities data into a user-defined request. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type RequestFromEntitiesFunc func(context.Context, pentity.Entities) (Request, error) + +// requestFromEntities returns a RequestFromEntitiesFunc that converts pentity.Entities into a Request. +func requestFromEntities(pusher consumer.ConsumeEntitiesFunc) RequestFromEntitiesFunc { + return func(_ context.Context, ld pentity.Entities) (Request, error) { + return newEntitiesRequest(ld, pusher), nil + } +} + +// NewEntitiesRequest creates new entities exporter based on custom EntitiesConverter and RequestSender. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func NewEntitiesRequest( + _ context.Context, + set exporter.Settings, + converter RequestFromEntitiesFunc, + options ...Option, +) (exporter.Entities, error) { + if set.Logger == nil { + return nil, errNilLogger + } + + if converter == nil { + return nil, errNilEntitiesConverter + } + + be, err := internal.NewBaseExporter(set, pipeline.SignalEntities, newEntitiesWithObservability, options...) + if err != nil { + return nil, err + } + + lc, err := consumer.NewEntities(func(ctx context.Context, ld pentity.Entities) error { + req, cErr := converter(ctx, ld) + if cErr != nil { + set.Logger.Error("Failed to convert entities. Dropping data.", + zap.Int("dropped_log_records", ld.EntityCount()), + zap.Error(err)) + return consumererror.NewPermanent(cErr) + } + sErr := be.Send(ctx, req) + if errors.Is(sErr, queue.ErrQueueIsFull) { + be.Obsrep.RecordEnqueueFailure(ctx, pipeline.SignalEntities, int64(req.ItemsCount())) + } + return sErr + }, be.ConsumerOptions...) + + return &entitiesExporter{ + BaseExporter: be, + Entities: lc, + }, err +} + +type entitiesExporterWithObservability struct { + internal.BaseRequestSender + obsrep *internal.ObsReport +} + +func newEntitiesWithObservability(obsrep *internal.ObsReport) internal.RequestSender { + return &entitiesExporterWithObservability{obsrep: obsrep} +} + +func (lewo *entitiesExporterWithObservability) Send(ctx context.Context, req Request) error { + c := lewo.obsrep.StartEntitiesOp(ctx) + numLogRecords := req.ItemsCount() + err := lewo.NextSender.Send(c, req) + lewo.obsrep.EndEntitiesOp(c, numLogRecords, err) + return err +} diff --git a/exporter/exporterhelper/internal/obsexporter.go b/exporter/exporterhelper/internal/obsexporter.go index 004e5c48248..e081b1ddf79 100644 --- a/exporter/exporterhelper/internal/obsexporter.go +++ b/exporter/exporterhelper/internal/obsexporter.go @@ -107,6 +107,19 @@ func (or *ObsReport) EndProfilesOp(ctx context.Context, numSpans int, err error) endSpan(ctx, err, numSent, numFailedToSend, SentSamplesKey, FailedToSendSamplesKey) } +// StartEntitiesOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartEntitiesOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportTraceDataOperationSuffix) +} + +// EndEntitiesOp completes the export operation that was started with startEntitiesOp. +func (or *ObsReport) EndEntitiesOp(ctx context.Context, numEntities int, err error) { + numSent, numFailedToSend := toNumItems(numEntities, err) + endSpan(ctx, err, numSent, numFailedToSend, SentSamplesKey, FailedToSendSamplesKey) +} + // startOp creates the span used to trace the operation. Returning // the updated context and the created span. func (or *ObsReport) startOp(ctx context.Context, operationSuffix string) context.Context { diff --git a/exporter/exporterprofiles/exporter.go b/exporter/exporterprofiles/exporter.go index ca49317f016..a8c5ed03eb8 100644 --- a/exporter/exporterprofiles/exporter.go +++ b/exporter/exporterprofiles/exporter.go @@ -99,6 +99,13 @@ func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel }) } +// WithEntities overrides the default "error not supported" implementation for CreateEntities and the default "undefined" stability level. +func WithEntities(createEntities exporter.CreateEntitiesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, exporter.WithEntities(createEntities, sl)) + }) +} + type factory struct { exporter.Factory CreateProfilesFunc diff --git a/exporter/exportertest/nop_exporter.go b/exporter/exportertest/nop_exporter.go index 6d6435ac43c..37c9123448f 100644 --- a/exporter/exportertest/nop_exporter.go +++ b/exporter/exportertest/nop_exporter.go @@ -35,6 +35,7 @@ func NewNopFactory() exporter.Factory { exporterprofiles.WithMetrics(createMetrics, component.StabilityLevelStable), exporterprofiles.WithLogs(createLogs, component.StabilityLevelStable), exporterprofiles.WithProfiles(createProfiles, component.StabilityLevelAlpha), + exporterprofiles.WithEntities(createEntities, component.StabilityLevelStable), ) } @@ -54,6 +55,10 @@ func createProfiles(context.Context, exporter.Settings, component.Config) (expor return nopInstance, nil } +func createEntities(context.Context, exporter.Settings, component.Config) (exporter.Entities, error) { + return nopInstance, nil +} + type nopConfig struct{} var nopInstance = &nop{ diff --git a/exporter/internal/entities.go b/exporter/internal/entities.go new file mode 100644 index 00000000000..b7cfb5076fa --- /dev/null +++ b/exporter/internal/entities.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/internal" + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" +) + +// Profiles is an exporter that can consume profiles. +type Entities interface { + component.Component + consumer.Entities +} diff --git a/exporter/nopexporter/README.md b/exporter/nopexporter/README.md index 53ed9ac4c14..51e4a82a150 100644 --- a/exporter/nopexporter/README.md +++ b/exporter/nopexporter/README.md @@ -3,7 +3,7 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: traces, metrics, logs | +| Stability | [beta]: traces, metrics, logs, entities | | Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fnop%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fnop) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fnop%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fnop) | diff --git a/exporter/nopexporter/internal/metadata/generated_status.go b/exporter/nopexporter/internal/metadata/generated_status.go index d7336115c61..60fac80efcb 100644 --- a/exporter/nopexporter/internal/metadata/generated_status.go +++ b/exporter/nopexporter/internal/metadata/generated_status.go @@ -12,7 +12,8 @@ var ( ) const ( - TracesStability = component.StabilityLevelBeta - MetricsStability = component.StabilityLevelBeta - LogsStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelBeta + MetricsStability = component.StabilityLevelBeta + LogsStability = component.StabilityLevelBeta + EntitiesStability = component.StabilityLevelBeta ) diff --git a/exporter/nopexporter/metadata.yaml b/exporter/nopexporter/metadata.yaml index 352312a44c4..d2437ec3ce0 100644 --- a/exporter/nopexporter/metadata.yaml +++ b/exporter/nopexporter/metadata.yaml @@ -4,5 +4,5 @@ github_project: open-telemetry/opentelemetry-collector status: class: exporter stability: - beta: [traces, metrics, logs] + beta: [traces, metrics, logs, entities] distributions: [core, contrib, k8s] diff --git a/internal/fanoutconsumer/entities.go b/internal/fanoutconsumer/entities.go new file mode 100644 index 00000000000..40628618c50 --- /dev/null +++ b/internal/fanoutconsumer/entities.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package fanoutconsumer contains implementations of Traces/Metrics/Entities consumers +// that fan out the data to multiple other consumers. +package fanoutconsumer // import "go.opentelemetry.io/collector/internal/fanoutconsumer" + +import ( + "context" + + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pentity" +) + +// NewEntities wraps multiple log consumers in a single one. +// It fans out the incoming data to all the consumers, and does smart routing: +// - Clones only to the consumer that needs to mutate the data. +// - If all consumers needs to mutate the data one will get the original mutable data. +func NewEntities(lcs []consumer.Entities) consumer.Entities { + // Don't wrap if there is only one non-mutating consumer. + if len(lcs) == 1 && !lcs[0].Capabilities().MutatesData { + return lcs[0] + } + + lc := &entitiesConsumer{} + for i := 0; i < len(lcs); i++ { + if lcs[i].Capabilities().MutatesData { + lc.mutable = append(lc.mutable, lcs[i]) + } else { + lc.readonly = append(lc.readonly, lcs[i]) + } + } + return lc +} + +type entitiesConsumer struct { + mutable []consumer.Entities + readonly []consumer.Entities +} + +func (lsc *entitiesConsumer) Capabilities() consumer.Capabilities { + // If all consumers are mutating, then the original data will be passed to one of them. + return consumer.Capabilities{MutatesData: len(lsc.mutable) > 0 && len(lsc.readonly) == 0} +} + +// ConsumeEntities exports the pentity.Entities to all consumers wrapped by the current one. +func (lsc *entitiesConsumer) ConsumeEntities(ctx context.Context, ld pentity.Entities) error { + var errs error + + if len(lsc.mutable) > 0 { + // Clone the data before sending to all mutating consumers except the last one. + for i := 0; i < len(lsc.mutable)-1; i++ { + errs = multierr.Append(errs, lsc.mutable[i].ConsumeEntities(ctx, cloneEntities(ld))) + } + // Send data as is to the last mutating consumer only if there are no other non-mutating consumers and the + // data is mutable. Never share the same data between a mutating and a non-mutating consumer since the + // non-mutating consumer may process data async and the mutating consumer may change the data before that. + lastConsumer := lsc.mutable[len(lsc.mutable)-1] + if len(lsc.readonly) == 0 && !ld.IsReadOnly() { + errs = multierr.Append(errs, lastConsumer.ConsumeEntities(ctx, ld)) + } else { + errs = multierr.Append(errs, lastConsumer.ConsumeEntities(ctx, cloneEntities(ld))) + } + } + + // Mark the data as read-only if it will be sent to more than one read-only consumer. + if len(lsc.readonly) > 1 && !ld.IsReadOnly() { + ld.MarkReadOnly() + } + for _, lc := range lsc.readonly { + errs = multierr.Append(errs, lc.ConsumeEntities(ctx, ld)) + } + + return errs +} + +func cloneEntities(ld pentity.Entities) pentity.Entities { + clonedEntities := pentity.NewEntities() + ld.CopyTo(clonedEntities) + return clonedEntities +} diff --git a/pdata/internal/cmd/pdatagen/internal/packages.go b/pdata/internal/cmd/pdatagen/internal/packages.go index 47708bc975e..7bec8f0e053 100644 --- a/pdata/internal/cmd/pdatagen/internal/packages.go +++ b/pdata/internal/cmd/pdatagen/internal/packages.go @@ -18,6 +18,8 @@ const header = `// Copyright The OpenTelemetry Authors // AllPackages is a list of all packages that needs to be generated. var AllPackages = []*Package{ pcommon, + pentity, + pentityotlp, plog, plogotlp, pmetric, diff --git a/pdata/internal/cmd/pdatagen/internal/pcommon_package.go b/pdata/internal/cmd/pdatagen/internal/pcommon_package.go index d6085334f56..cb9ce82f495 100644 --- a/pdata/internal/cmd/pdatagen/internal/pcommon_package.go +++ b/pdata/internal/cmd/pdatagen/internal/pcommon_package.go @@ -23,6 +23,8 @@ var pcommon = &Package{ structs: []baseStruct{ scope, resource, + entityRefSlice, + entityRef, byteSlice, float64Slice, uInt64Slice, @@ -159,6 +161,10 @@ var resource = &messageValueStruct{ fields: []baseField{ attributes, droppedAttributesCount, + &sliceField{ + fieldName: "Entities", + returnSlice: entityRefSlice, + }, }, } @@ -167,6 +173,35 @@ var resourceField = &messageValueField{ returnMessage: resource, } +var entityRefSlice = &sliceOfPtrs{ + structName: "ResourceEntityRefSlice", + packageName: "pcommon", + element: entityRef, +} + +var entityRef = &messageValueStruct{ + structName: "ResourceEntityRef", + packageName: "pcommon", + originFullName: "otlpresource.ResourceEntityRef", + fields: []baseField{ + schemaURLField, + &primitiveField{ + fieldName: "Type", + returnType: "string", + defaultVal: `""`, + testVal: `"host"`, + }, + &sliceField{ + fieldName: "IdAttrKeys", + returnSlice: stringSlice, + }, + &sliceField{ + fieldName: "DescrAttrKeys", + returnSlice: stringSlice, + }, + }, +} + var byteSlice = &primitiveSliceStruct{ structName: "ByteSlice", packageName: "pcommon", diff --git a/pdata/internal/cmd/pdatagen/internal/pentity_package.go b/pdata/internal/cmd/pdatagen/internal/pentity_package.go new file mode 100644 index 00000000000..7ed69de8013 --- /dev/null +++ b/pdata/internal/cmd/pdatagen/internal/pentity_package.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal/cmd/pdatagen/internal" + +var pentity = &Package{ + info: &PackageInfo{ + name: "pentity", + path: "pentity", + imports: []string{ + `"sort"`, + ``, + `"go.opentelemetry.io/collector/pdata/internal"`, + `"go.opentelemetry.io/collector/pdata/internal/data"`, + `otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1"`, + `"go.opentelemetry.io/collector/pdata/pcommon"`, + }, + testImports: []string{ + `"testing"`, + `"unsafe"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `"go.opentelemetry.io/collector/pdata/internal"`, + `"go.opentelemetry.io/collector/pdata/internal/data"`, + `otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1"`, + `"go.opentelemetry.io/collector/pdata/pcommon"`, + }, + }, + structs: []baseStruct{ + resourceEntitiesSlice, + resourceEntities, + scopeEntitiesSlice, + scopeEntities, + entityEventSlice, + entityEvent, + entityState, + entityDelete, + }, +} + +var resourceEntitiesSlice = &sliceOfPtrs{ + structName: "ResourceEntitiesSlice", + element: resourceEntities, +} + +var resourceEntities = &messageValueStruct{ + structName: "ResourceEntities", + description: "// ResourceEntities is a collection of entities from a Resource.", + originFullName: "otlpentities.ResourceEntities", + fields: []baseField{ + resourceField, + schemaURLField, + &sliceField{ + fieldName: "ScopeEntities", + returnSlice: scopeEntitiesSlice, + }, + }, +} + +var scopeEntitiesSlice = &sliceOfPtrs{ + structName: "ScopeEntitiesSlice", + element: scopeEntities, +} + +var scopeEntities = &messageValueStruct{ + structName: "ScopeEntities", + description: "// ScopeEntities is a collection of entities from a LibraryInstrumentation.", + originFullName: "otlpentities.ScopeEntities", + fields: []baseField{ + scopeField, + schemaURLField, + &sliceField{ + fieldName: "EntityEvents", + returnSlice: entityEventSlice, + }, + }, +} + +var entityEventSlice = &sliceOfPtrs{ + structName: "EntityEventSlice", + element: entityEvent, +} + +var entityEvent = &messageValueStruct{ + structName: "EntityEvent", + description: "// EntityEvent are experimental implementation of OpenTelemetry Entity Data Model.\n", + originFullName: "otlpentities.EntityEvent", + fields: []baseField{ + &primitiveTypedField{ + fieldName: "Timestamp", + originFieldName: "TimeUnixNano", + returnType: timestampType, + }, + &primitiveField{ + fieldName: "EntityType", + returnType: "string", + defaultVal: `""`, + testVal: `"service"`, + }, + entityID, + &oneOfField{ + typeName: "EventType", + originFieldName: "Data", + testValueIdx: 1, // Delete + omitOriginFieldNameInNames: true, + values: []oneOfValue{ + &oneOfMessageValue{ + fieldName: "EntityState", + originFieldPackageName: "otlpentities", + returnMessage: entityState, + }, + &oneOfMessageValue{ + fieldName: "EntityDelete", + originFieldPackageName: "otlpentities", + returnMessage: entityDelete, + }, + }, + }, + }, +} + +var entityID = &sliceField{ + fieldName: "Id", + returnSlice: mapStruct, +} + +var entityState = &messageValueStruct{ + structName: "EntityState", + description: "// EntityState are experimental implementation of OpenTelemetry Entity Data Model.\n", + originFullName: "otlpentities.EntityState", + fields: []baseField{ + attributes, + droppedAttributesCount, + }, +} + +var entityDelete = &messageValueStruct{ + structName: "EntityDelete", + description: "// EntityDelete are experimental implementation of OpenTelemetry Entity Data Model.\n", + originFullName: "otlpentities.EntityDelete", + fields: []baseField{}, +} diff --git a/pdata/internal/cmd/pdatagen/internal/pentityotlp_package.go b/pdata/internal/cmd/pdatagen/internal/pentityotlp_package.go new file mode 100644 index 00000000000..ff41202d3b9 --- /dev/null +++ b/pdata/internal/cmd/pdatagen/internal/pentityotlp_package.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal/cmd/pdatagen/internal" +import ( + "path/filepath" +) + +var pentityotlp = &Package{ + info: &PackageInfo{ + name: "pentityotlp", + path: filepath.Join("pentity", "pentityotlp"), + imports: []string{ + `otlpcollectorentity "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/entities/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + }, + }, + structs: []baseStruct{ + exportEntitiesPartialSuccess, + }, +} + +var exportEntitiesPartialSuccess = &messageValueStruct{ + structName: "ExportPartialSuccess", + description: "// ExportPartialSuccess represents the details of a partially successful export request.", + originFullName: "otlpcollectorentity.ExportEntitiesPartialSuccess", + fields: []baseField{ + &primitiveField{ + fieldName: "RejectedEntities", + returnType: "int64", + defaultVal: `int64(0)`, + testVal: `int64(13)`, + }, + &primitiveField{ + fieldName: "ErrorMessage", + returnType: "string", + defaultVal: `""`, + testVal: `"error message"`, + }, + }, +} diff --git a/pdata/internal/data/protogen/collector/entities/v1/entities_service.pb.go b/pdata/internal/data/protogen/collector/entities/v1/entities_service.pb.go new file mode 100644 index 00000000000..0e2756660c5 --- /dev/null +++ b/pdata/internal/data/protogen/collector/entities/v1/entities_service.pb.go @@ -0,0 +1,840 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/entities/v1/entities_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportEntitiesServiceRequest struct { + // An array of ResourceEntities. + ResourceEntities []*v1.ResourceEntities `protobuf:"bytes,1,rep,name=resource_entities,json=resourceEntities,proto3" json:"resource_entities,omitempty"` +} + +func (m *ExportEntitiesServiceRequest) Reset() { *m = ExportEntitiesServiceRequest{} } +func (m *ExportEntitiesServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportEntitiesServiceRequest) ProtoMessage() {} +func (*ExportEntitiesServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fbd01640b6ae8848, []int{0} +} +func (m *ExportEntitiesServiceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportEntitiesServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportEntitiesServiceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportEntitiesServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportEntitiesServiceRequest.Merge(m, src) +} +func (m *ExportEntitiesServiceRequest) XXX_Size() int { + return m.Size() +} +func (m *ExportEntitiesServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportEntitiesServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportEntitiesServiceRequest proto.InternalMessageInfo + +func (m *ExportEntitiesServiceRequest) GetResourceEntities() []*v1.ResourceEntities { + if m != nil { + return m.ResourceEntities + } + return nil +} + +type ExportEntitiesServiceResponse struct { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess ExportEntitiesPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"` +} + +func (m *ExportEntitiesServiceResponse) Reset() { *m = ExportEntitiesServiceResponse{} } +func (m *ExportEntitiesServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportEntitiesServiceResponse) ProtoMessage() {} +func (*ExportEntitiesServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fbd01640b6ae8848, []int{1} +} +func (m *ExportEntitiesServiceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportEntitiesServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportEntitiesServiceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportEntitiesServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportEntitiesServiceResponse.Merge(m, src) +} +func (m *ExportEntitiesServiceResponse) XXX_Size() int { + return m.Size() +} +func (m *ExportEntitiesServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportEntitiesServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportEntitiesServiceResponse proto.InternalMessageInfo + +func (m *ExportEntitiesServiceResponse) GetPartialSuccess() ExportEntitiesPartialSuccess { + if m != nil { + return m.PartialSuccess + } + return ExportEntitiesPartialSuccess{} +} + +type ExportEntitiesPartialSuccess struct { + // The number of rejected EntityEvents. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedEntities int64 `protobuf:"varint,1,opt,name=rejected_entities,json=rejectedEntities,proto3" json:"rejected_entities,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (m *ExportEntitiesPartialSuccess) Reset() { *m = ExportEntitiesPartialSuccess{} } +func (m *ExportEntitiesPartialSuccess) String() string { return proto.CompactTextString(m) } +func (*ExportEntitiesPartialSuccess) ProtoMessage() {} +func (*ExportEntitiesPartialSuccess) Descriptor() ([]byte, []int) { + return fileDescriptor_fbd01640b6ae8848, []int{2} +} +func (m *ExportEntitiesPartialSuccess) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportEntitiesPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportEntitiesPartialSuccess.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportEntitiesPartialSuccess) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportEntitiesPartialSuccess.Merge(m, src) +} +func (m *ExportEntitiesPartialSuccess) XXX_Size() int { + return m.Size() +} +func (m *ExportEntitiesPartialSuccess) XXX_DiscardUnknown() { + xxx_messageInfo_ExportEntitiesPartialSuccess.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportEntitiesPartialSuccess proto.InternalMessageInfo + +func (m *ExportEntitiesPartialSuccess) GetRejectedEntities() int64 { + if m != nil { + return m.RejectedEntities + } + return 0 +} + +func (m *ExportEntitiesPartialSuccess) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func init() { + proto.RegisterType((*ExportEntitiesServiceRequest)(nil), "opentelemetry.proto.collector.entities.v1.ExportEntitiesServiceRequest") + proto.RegisterType((*ExportEntitiesServiceResponse)(nil), "opentelemetry.proto.collector.entities.v1.ExportEntitiesServiceResponse") + proto.RegisterType((*ExportEntitiesPartialSuccess)(nil), "opentelemetry.proto.collector.entities.v1.ExportEntitiesPartialSuccess") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/entities/v1/entities_service.proto", fileDescriptor_fbd01640b6ae8848) +} + +var fileDescriptor_fbd01640b6ae8848 = []byte{ + // 424 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xcd, 0xaa, 0xd3, 0x40, + 0x14, 0xce, 0xb4, 0x52, 0x70, 0xaa, 0x56, 0x43, 0x17, 0xa5, 0x68, 0x2c, 0x71, 0x93, 0xa2, 0x4c, + 0x48, 0x7d, 0x01, 0xa9, 0x14, 0xdd, 0x88, 0x21, 0x15, 0x17, 0x2e, 0x0c, 0x31, 0x3d, 0x84, 0x48, + 0x9a, 0x19, 0x67, 0xa6, 0x41, 0x37, 0x3e, 0x83, 0x3b, 0x57, 0xbe, 0x80, 0x3b, 0xdf, 0xa2, 0xcb, + 0x2e, 0x5d, 0x88, 0x5c, 0xda, 0x17, 0xb9, 0x24, 0xd3, 0x84, 0x26, 0xe4, 0x42, 0xb9, 0x77, 0x37, + 0xf9, 0x72, 0xbe, 0x9f, 0xf9, 0x86, 0x83, 0x5f, 0x50, 0x06, 0xa9, 0x84, 0x04, 0xd6, 0x20, 0xf9, + 0x37, 0x9b, 0x71, 0x2a, 0xa9, 0x1d, 0xd2, 0x24, 0x81, 0x50, 0x52, 0x6e, 0x43, 0x2a, 0x63, 0x19, + 0x83, 0xb0, 0x33, 0xa7, 0x3a, 0xfb, 0x02, 0x78, 0x16, 0x87, 0x40, 0x8a, 0x61, 0x7d, 0x5a, 0x53, + 0x50, 0x20, 0xa9, 0x14, 0x48, 0xc9, 0x22, 0x99, 0x33, 0x1e, 0x46, 0x34, 0xa2, 0xca, 0x22, 0x3f, + 0xa9, 0xd9, 0x31, 0x69, 0x8b, 0xd0, 0x66, 0xac, 0xe6, 0xcd, 0xef, 0xf8, 0xe1, 0xe2, 0x2b, 0xa3, + 0x5c, 0x2e, 0x8e, 0xf8, 0x52, 0xe5, 0xf1, 0xe0, 0xcb, 0x06, 0x84, 0xd4, 0x3f, 0xe2, 0x07, 0x1c, + 0x04, 0xdd, 0xf0, 0x10, 0xfc, 0x92, 0x3a, 0x42, 0x93, 0xae, 0xd5, 0x9f, 0x39, 0xa4, 0x2d, 0xec, + 0x49, 0x44, 0xe2, 0x1d, 0x99, 0xa5, 0xb6, 0x77, 0x9f, 0x37, 0x10, 0xf3, 0x27, 0xc2, 0x8f, 0xae, + 0x08, 0x20, 0x18, 0x4d, 0x05, 0xe8, 0x19, 0x1e, 0xb0, 0x80, 0xcb, 0x38, 0x48, 0x7c, 0xb1, 0x09, + 0x43, 0x10, 0xb9, 0x3f, 0xb2, 0xfa, 0xb3, 0x57, 0xe4, 0xec, 0xb2, 0x48, 0xdd, 0xc2, 0x55, 0x7a, + 0x4b, 0x25, 0x37, 0xbf, 0xb5, 0xfd, 0xff, 0x58, 0xf3, 0xee, 0xb1, 0x1a, 0x6a, 0xb2, 0x66, 0x33, + 0x75, 0x96, 0xfe, 0x34, 0x6f, 0xe6, 0x33, 0x84, 0x12, 0x56, 0xa7, 0xcd, 0x20, 0xab, 0x9b, 0x5f, + 0x53, 0xfd, 0x28, 0xa9, 0xfa, 0x13, 0x7c, 0x17, 0x38, 0xa7, 0xdc, 0x5f, 0x83, 0x10, 0x41, 0x04, + 0xa3, 0xce, 0x04, 0x59, 0xb7, 0xbd, 0x3b, 0x05, 0xf8, 0x46, 0x61, 0xb3, 0x3f, 0x08, 0x0f, 0x1a, + 0x2d, 0xe8, 0xbf, 0x10, 0xee, 0xa9, 0x18, 0xfa, 0xf5, 0xef, 0x5b, 0x7f, 0xd3, 0xf1, 0xeb, 0x9b, + 0x0b, 0xa9, 0xb7, 0x31, 0xb5, 0xf9, 0x3f, 0xb4, 0xdd, 0x1b, 0x68, 0xb7, 0x37, 0xd0, 0xc5, 0xde, + 0x40, 0x3f, 0x0e, 0x86, 0xb6, 0x3b, 0x18, 0xda, 0xdf, 0x83, 0xa1, 0xe1, 0x67, 0x31, 0x3d, 0xdf, + 0x68, 0x3e, 0x6c, 0x78, 0xb8, 0xf9, 0xac, 0x8b, 0x3e, 0xb8, 0x51, 0x53, 0x25, 0x3e, 0xdd, 0x29, + 0xb6, 0x0a, 0x64, 0x60, 0xc7, 0xa9, 0x04, 0x9e, 0x06, 0x89, 0x5d, 0x7c, 0x15, 0x36, 0x11, 0xa4, + 0xed, 0xab, 0xf7, 0xbb, 0x33, 0x7d, 0xcb, 0x20, 0x7d, 0x57, 0xe9, 0x15, 0x4e, 0xe4, 0x65, 0x95, + 0xaa, 0x0c, 0x42, 0xde, 0x3b, 0x9f, 0x7a, 0x85, 0xd6, 0xf3, 0xcb, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xc4, 0xc3, 0xd2, 0xe0, 0xda, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// EntitiesServiceClient is the client API for EntitiesService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type EntitiesServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportEntitiesServiceRequest, opts ...grpc.CallOption) (*ExportEntitiesServiceResponse, error) +} + +type entitiesServiceClient struct { + cc *grpc.ClientConn +} + +func NewEntitiesServiceClient(cc *grpc.ClientConn) EntitiesServiceClient { + return &entitiesServiceClient{cc} +} + +func (c *entitiesServiceClient) Export(ctx context.Context, in *ExportEntitiesServiceRequest, opts ...grpc.CallOption) (*ExportEntitiesServiceResponse, error) { + out := new(ExportEntitiesServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.entities.v1.EntitiesService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EntitiesServiceServer is the server API for EntitiesService service. +type EntitiesServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportEntitiesServiceRequest) (*ExportEntitiesServiceResponse, error) +} + +// UnimplementedEntitiesServiceServer can be embedded to have forward compatible implementations. +type UnimplementedEntitiesServiceServer struct { +} + +func (*UnimplementedEntitiesServiceServer) Export(ctx context.Context, req *ExportEntitiesServiceRequest) (*ExportEntitiesServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterEntitiesServiceServer(s *grpc.Server, srv EntitiesServiceServer) { + s.RegisterService(&_EntitiesService_serviceDesc, srv) +} + +func _EntitiesService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportEntitiesServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntitiesServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.entities.v1.EntitiesService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntitiesServiceServer).Export(ctx, req.(*ExportEntitiesServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _EntitiesService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.entities.v1.EntitiesService", + HandlerType: (*EntitiesServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _EntitiesService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/entities/v1/entities_service.proto", +} + +func (m *ExportEntitiesServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportEntitiesServiceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportEntitiesServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceEntities) > 0 { + for iNdEx := len(m.ResourceEntities) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceEntities[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntitiesService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExportEntitiesServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportEntitiesServiceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportEntitiesServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntitiesService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExportEntitiesPartialSuccess) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportEntitiesPartialSuccess) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportEntitiesPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintEntitiesService(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x12 + } + if m.RejectedEntities != 0 { + i = encodeVarintEntitiesService(dAtA, i, uint64(m.RejectedEntities)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEntitiesService(dAtA []byte, offset int, v uint64) int { + offset -= sovEntitiesService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExportEntitiesServiceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceEntities) > 0 { + for _, e := range m.ResourceEntities { + l = e.Size() + n += 1 + l + sovEntitiesService(uint64(l)) + } + } + return n +} + +func (m *ExportEntitiesServiceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PartialSuccess.Size() + n += 1 + l + sovEntitiesService(uint64(l)) + return n +} + +func (m *ExportEntitiesPartialSuccess) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RejectedEntities != 0 { + n += 1 + sovEntitiesService(uint64(m.RejectedEntities)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovEntitiesService(uint64(l)) + } + return n +} + +func sovEntitiesService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEntitiesService(x uint64) (n int) { + return sovEntitiesService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExportEntitiesServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportEntitiesServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportEntitiesServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceEntities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntitiesService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntitiesService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceEntities = append(m.ResourceEntities, &v1.ResourceEntities{}) + if err := m.ResourceEntities[len(m.ResourceEntities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEntitiesService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntitiesService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportEntitiesServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportEntitiesServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportEntitiesServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntitiesService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntitiesService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEntitiesService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntitiesService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportEntitiesPartialSuccess) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportEntitiesPartialSuccess: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportEntitiesPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectedEntities", wireType) + } + m.RejectedEntities = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectedEntities |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntitiesService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEntitiesService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEntitiesService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntitiesService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEntitiesService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntitiesService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEntitiesService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEntitiesService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEntitiesService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEntitiesService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEntitiesService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEntitiesService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/pdata/internal/data/protogen/entities/v1/entities.pb.go b/pdata/internal/data/protogen/entities/v1/entities.pb.go new file mode 100644 index 00000000000..46f9caa6478 --- /dev/null +++ b/pdata/internal/data/protogen/entities/v1/entities.pb.go @@ -0,0 +1,1763 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/entities/v1/entities.proto + +package v1 + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EntitiesData represents the entities data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP entities data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type EntitiesData struct { + ResourceEntities []*ResourceEntities `protobuf:"bytes,1,rep,name=resource_entities,json=resourceEntities,proto3" json:"resource_entities,omitempty"` +} + +func (m *EntitiesData) Reset() { *m = EntitiesData{} } +func (m *EntitiesData) String() string { return proto.CompactTextString(m) } +func (*EntitiesData) ProtoMessage() {} +func (*EntitiesData) Descriptor() ([]byte, []int) { + return fileDescriptor_23658fae9436c0cd, []int{0} +} +func (m *EntitiesData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EntitiesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EntitiesData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EntitiesData) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntitiesData.Merge(m, src) +} +func (m *EntitiesData) XXX_Size() int { + return m.Size() +} +func (m *EntitiesData) XXX_DiscardUnknown() { + xxx_messageInfo_EntitiesData.DiscardUnknown(m) +} + +var xxx_messageInfo_EntitiesData proto.InternalMessageInfo + +func (m *EntitiesData) GetResourceEntities() []*ResourceEntities { + if m != nil { + return m.ResourceEntities + } + return nil +} + +// A collection of ScopeEntities from a Resource. +type ResourceEntities struct { + // The resource represents a parent entity for all entities in this message. + // If this field is not set then no resource info is known. + Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` + // A list of ScopeEntities that originate from a resource. + ScopeEntities []*ScopeEntities `protobuf:"bytes,2,rep,name=scope_entities,json=scopeEntities,proto3" json:"scope_entities,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_entities" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ResourceEntities) Reset() { *m = ResourceEntities{} } +func (m *ResourceEntities) String() string { return proto.CompactTextString(m) } +func (*ResourceEntities) ProtoMessage() {} +func (*ResourceEntities) Descriptor() ([]byte, []int) { + return fileDescriptor_23658fae9436c0cd, []int{1} +} +func (m *ResourceEntities) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceEntities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceEntities.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceEntities) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceEntities.Merge(m, src) +} +func (m *ResourceEntities) XXX_Size() int { + return m.Size() +} +func (m *ResourceEntities) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceEntities.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceEntities proto.InternalMessageInfo + +func (m *ResourceEntities) GetResource() v1.Resource { + if m != nil { + return m.Resource + } + return v1.Resource{} +} + +func (m *ResourceEntities) GetScopeEntities() []*ScopeEntities { + if m != nil { + return m.ScopeEntities + } + return nil +} + +func (m *ResourceEntities) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// A collection of Entities produced by a Scope. +type ScopeEntities struct { + // The instrumentation scope information for the entities in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` + EntityEvents []*EntityEvent `protobuf:"bytes,2,rep,name=entity_events,json=entityEvents,proto3" json:"entity_events,omitempty"` + // This schema_url applies to all entities in the "entity_events" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ScopeEntities) Reset() { *m = ScopeEntities{} } +func (m *ScopeEntities) String() string { return proto.CompactTextString(m) } +func (*ScopeEntities) ProtoMessage() {} +func (*ScopeEntities) Descriptor() ([]byte, []int) { + return fileDescriptor_23658fae9436c0cd, []int{2} +} +func (m *ScopeEntities) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeEntities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScopeEntities.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ScopeEntities) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeEntities.Merge(m, src) +} +func (m *ScopeEntities) XXX_Size() int { + return m.Size() +} +func (m *ScopeEntities) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeEntities.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopeEntities proto.InternalMessageInfo + +func (m *ScopeEntities) GetScope() v11.InstrumentationScope { + if m != nil { + return m.Scope + } + return v11.InstrumentationScope{} +} + +func (m *ScopeEntities) GetEntityEvents() []*EntityEvent { + if m != nil { + return m.EntityEvents + } + return nil +} + +func (m *ScopeEntities) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// Entity event, describes something that happened with the entity. +type EntityEvent struct { + // Time when this state was observed. + TimeUnixNano uint64 `protobuf:"varint,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Type of the entity, e.g. "service", "host", etc. + EntityType string `protobuf:"bytes,2,opt,name=entity_type,json=entityType,proto3" json:"entity_type,omitempty"` + // Set of attributes that identify the entity. + Id []v11.KeyValue `protobuf:"bytes,3,rep,name=id,proto3" json:"id"` + // One of the following event types must be set. + // + // Types that are valid to be assigned to Data: + // *EntityEvent_EntityState + // *EntityEvent_EntityDelete + Data isEntityEvent_Data `protobuf_oneof:"data"` +} + +func (m *EntityEvent) Reset() { *m = EntityEvent{} } +func (m *EntityEvent) String() string { return proto.CompactTextString(m) } +func (*EntityEvent) ProtoMessage() {} +func (*EntityEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_23658fae9436c0cd, []int{3} +} +func (m *EntityEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EntityEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EntityEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EntityEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityEvent.Merge(m, src) +} +func (m *EntityEvent) XXX_Size() int { + return m.Size() +} +func (m *EntityEvent) XXX_DiscardUnknown() { + xxx_messageInfo_EntityEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityEvent proto.InternalMessageInfo + +type isEntityEvent_Data interface { + isEntityEvent_Data() + MarshalTo([]byte) (int, error) + Size() int +} + +type EntityEvent_EntityState struct { + EntityState *EntityState `protobuf:"bytes,4,opt,name=entity_state,json=entityState,proto3,oneof" json:"entity_state,omitempty"` +} +type EntityEvent_EntityDelete struct { + EntityDelete *EntityDelete `protobuf:"bytes,5,opt,name=entity_delete,json=entityDelete,proto3,oneof" json:"entity_delete,omitempty"` +} + +func (*EntityEvent_EntityState) isEntityEvent_Data() {} +func (*EntityEvent_EntityDelete) isEntityEvent_Data() {} + +func (m *EntityEvent) GetData() isEntityEvent_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *EntityEvent) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *EntityEvent) GetEntityType() string { + if m != nil { + return m.EntityType + } + return "" +} + +func (m *EntityEvent) GetId() []v11.KeyValue { + if m != nil { + return m.Id + } + return nil +} + +func (m *EntityEvent) GetEntityState() *EntityState { + if x, ok := m.GetData().(*EntityEvent_EntityState); ok { + return x.EntityState + } + return nil +} + +func (m *EntityEvent) GetEntityDelete() *EntityDelete { + if x, ok := m.GetData().(*EntityEvent_EntityDelete); ok { + return x.EntityDelete + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*EntityEvent) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*EntityEvent_EntityState)(nil), + (*EntityEvent_EntityDelete)(nil), + } +} + +// The full state of the Entity. +type EntityState struct { + // Set of non-identifying attributes only. + Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (m *EntityState) Reset() { *m = EntityState{} } +func (m *EntityState) String() string { return proto.CompactTextString(m) } +func (*EntityState) ProtoMessage() {} +func (*EntityState) Descriptor() ([]byte, []int) { + return fileDescriptor_23658fae9436c0cd, []int{4} +} +func (m *EntityState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EntityState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EntityState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EntityState) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityState.Merge(m, src) +} +func (m *EntityState) XXX_Size() int { + return m.Size() +} +func (m *EntityState) XXX_DiscardUnknown() { + xxx_messageInfo_EntityState.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityState proto.InternalMessageInfo + +func (m *EntityState) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *EntityState) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// Deletion event. No additional information is recorded. +type EntityDelete struct { +} + +func (m *EntityDelete) Reset() { *m = EntityDelete{} } +func (m *EntityDelete) String() string { return proto.CompactTextString(m) } +func (*EntityDelete) ProtoMessage() {} +func (*EntityDelete) Descriptor() ([]byte, []int) { + return fileDescriptor_23658fae9436c0cd, []int{5} +} +func (m *EntityDelete) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EntityDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EntityDelete.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EntityDelete) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityDelete.Merge(m, src) +} +func (m *EntityDelete) XXX_Size() int { + return m.Size() +} +func (m *EntityDelete) XXX_DiscardUnknown() { + xxx_messageInfo_EntityDelete.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityDelete proto.InternalMessageInfo + +func init() { + proto.RegisterType((*EntitiesData)(nil), "opentelemetry.proto.entities.v1.EntitiesData") + proto.RegisterType((*ResourceEntities)(nil), "opentelemetry.proto.entities.v1.ResourceEntities") + proto.RegisterType((*ScopeEntities)(nil), "opentelemetry.proto.entities.v1.ScopeEntities") + proto.RegisterType((*EntityEvent)(nil), "opentelemetry.proto.entities.v1.EntityEvent") + proto.RegisterType((*EntityState)(nil), "opentelemetry.proto.entities.v1.EntityState") + proto.RegisterType((*EntityDelete)(nil), "opentelemetry.proto.entities.v1.EntityDelete") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/entities/v1/entities.proto", fileDescriptor_23658fae9436c0cd) +} + +var fileDescriptor_23658fae9436c0cd = []byte{ + // 622 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4d, 0x6b, 0xd4, 0x40, + 0x18, 0xde, 0xd9, 0x6e, 0xbf, 0x66, 0xbb, 0xa5, 0x0e, 0x22, 0xa1, 0xe0, 0x6e, 0x09, 0x82, 0x55, + 0x34, 0xcb, 0xb6, 0x17, 0x2f, 0x1e, 0xac, 0xad, 0x54, 0x8b, 0xb6, 0x4d, 0x3f, 0x0e, 0x1e, 0x0c, + 0xd3, 0xcd, 0xcb, 0x3a, 0x90, 0xcc, 0x84, 0xc9, 0x64, 0x69, 0xfe, 0x85, 0x27, 0x7f, 0x84, 0x7f, + 0xc0, 0x3f, 0xe0, 0xa1, 0xc7, 0xe2, 0xc9, 0x8b, 0x22, 0xed, 0x45, 0xff, 0x85, 0x64, 0x92, 0x49, + 0xd3, 0xb2, 0x90, 0xe2, 0xed, 0xfd, 0x78, 0xde, 0xe7, 0x7d, 0xde, 0x27, 0x61, 0xb0, 0x23, 0x22, + 0xe0, 0x0a, 0x02, 0x08, 0x41, 0xc9, 0xb4, 0x1f, 0x49, 0xa1, 0x44, 0x1f, 0xb8, 0x62, 0x8a, 0x41, + 0xdc, 0x1f, 0x0f, 0xca, 0xd8, 0xd1, 0x2d, 0xd2, 0xbb, 0x86, 0xcf, 0x8b, 0x4e, 0x89, 0x19, 0x0f, + 0x96, 0xef, 0x8e, 0xc4, 0x48, 0xe4, 0x34, 0x59, 0x94, 0x23, 0x96, 0x1f, 0x4f, 0x5a, 0x33, 0x14, + 0x61, 0x28, 0x78, 0xb6, 0x24, 0x8f, 0x0a, 0xec, 0x44, 0x49, 0x12, 0x62, 0x91, 0xc8, 0x21, 0x64, + 0x68, 0x13, 0xe7, 0x78, 0x9b, 0xe3, 0x85, 0xad, 0x42, 0xc0, 0x26, 0x55, 0x94, 0x7c, 0xc0, 0x77, + 0x0c, 0xc2, 0x33, 0xca, 0x2c, 0xb4, 0x32, 0xb5, 0xda, 0x5e, 0x1b, 0x38, 0x35, 0xf2, 0x1d, 0xb7, + 0x98, 0x34, 0x8c, 0xee, 0x92, 0xbc, 0x51, 0xb1, 0x7f, 0x22, 0xbc, 0x74, 0x13, 0x46, 0x76, 0xf0, + 0x9c, 0x01, 0x5a, 0x68, 0x05, 0xad, 0xb6, 0xd7, 0x1e, 0x4d, 0xdc, 0x55, 0x6a, 0xaf, 0xec, 0xda, + 0x68, 0x9d, 0xfd, 0xea, 0x35, 0xdc, 0x92, 0x80, 0x1c, 0xe1, 0xc5, 0x78, 0x28, 0xa2, 0x8a, 0xfc, + 0xa6, 0x96, 0xef, 0xd4, 0xca, 0x3f, 0xc8, 0xc6, 0x4a, 0xed, 0x9d, 0xb8, 0x9a, 0x92, 0xfb, 0x18, + 0xc7, 0xc3, 0x8f, 0x10, 0x52, 0x2f, 0x91, 0x81, 0x35, 0xb5, 0x82, 0x56, 0xe7, 0xdd, 0xf9, 0xbc, + 0x72, 0x24, 0x83, 0x37, 0x33, 0x73, 0x7f, 0x66, 0x97, 0xfe, 0xce, 0xda, 0xdf, 0x11, 0xee, 0x5c, + 0xe3, 0x21, 0xbb, 0x78, 0x5a, 0x33, 0x15, 0x97, 0xad, 0x4f, 0x94, 0x51, 0x7c, 0xc3, 0xf1, 0xc0, + 0x79, 0xcd, 0x63, 0x25, 0x93, 0x10, 0xb8, 0xa2, 0x8a, 0x09, 0xae, 0xb9, 0x8a, 0x1b, 0x73, 0x1e, + 0xb2, 0x8f, 0x3b, 0x5a, 0x75, 0xea, 0xc1, 0x18, 0xb8, 0x32, 0xf7, 0x3d, 0xa9, 0xbd, 0x4f, 0x4b, + 0x4a, 0xb7, 0xb2, 0x21, 0x77, 0x01, 0xae, 0x92, 0xba, 0xe3, 0xec, 0x6f, 0x4d, 0xdc, 0xae, 0x0c, + 0x93, 0x07, 0x78, 0x51, 0xb1, 0x10, 0xbc, 0x84, 0xb3, 0x53, 0x8f, 0x53, 0x2e, 0xf4, 0x6d, 0x2d, + 0x77, 0x21, 0xab, 0x1e, 0x71, 0x76, 0xfa, 0x8e, 0x72, 0x41, 0x7a, 0xb8, 0x5d, 0xe8, 0x54, 0x69, + 0x04, 0x56, 0x53, 0xb3, 0xe2, 0xbc, 0x74, 0x98, 0x46, 0x40, 0x9e, 0xe3, 0x26, 0xf3, 0xad, 0x29, + 0xad, 0xfe, 0x61, 0x8d, 0x2d, 0x3b, 0x90, 0x1e, 0xd3, 0x20, 0x31, 0x56, 0x34, 0x99, 0x4f, 0xf6, + 0x71, 0x71, 0x84, 0x17, 0x2b, 0xaa, 0xc0, 0x6a, 0x69, 0x7f, 0x6f, 0x6b, 0xc3, 0x41, 0x36, 0xb3, + 0xdd, 0x70, 0x0b, 0x8d, 0x3a, 0x25, 0x87, 0xa5, 0xb5, 0x3e, 0x04, 0xa0, 0xc0, 0x9a, 0xd6, 0x9c, + 0x4f, 0x6f, 0xc9, 0xb9, 0xa9, 0x87, 0xb6, 0x1b, 0xc6, 0xdd, 0x3c, 0xdf, 0x98, 0xc1, 0x2d, 0x9f, + 0x2a, 0x6a, 0x7f, 0x46, 0xc6, 0xc6, 0x7c, 0xdb, 0x5b, 0x8c, 0xa9, 0x52, 0x92, 0x9d, 0x24, 0x0a, + 0x62, 0xab, 0xf5, 0x3f, 0x3e, 0x54, 0x08, 0xc8, 0x33, 0x6c, 0xf9, 0x52, 0x44, 0x11, 0xf8, 0xde, + 0x55, 0xd5, 0x1b, 0x8a, 0x84, 0x2b, 0x7d, 0x47, 0xc7, 0xbd, 0x57, 0xf4, 0x5f, 0x94, 0xed, 0x97, + 0x59, 0xd7, 0x5e, 0x2c, 0x1e, 0x01, 0x23, 0xf8, 0x2b, 0x3a, 0xbb, 0xe8, 0xa2, 0xf3, 0x8b, 0x2e, + 0xfa, 0x7d, 0xd1, 0x45, 0x9f, 0x2e, 0xbb, 0x8d, 0xf3, 0xcb, 0x6e, 0xe3, 0xc7, 0x65, 0xb7, 0x81, + 0x6d, 0x26, 0xea, 0xcc, 0xd8, 0xe8, 0x98, 0x7f, 0x7f, 0x2f, 0x6b, 0xed, 0xa1, 0xf7, 0xaf, 0x46, + 0x37, 0x87, 0x58, 0xf6, 0x80, 0x05, 0x01, 0x0c, 0x95, 0x90, 0xfd, 0x28, 0x73, 0xa7, 0xcf, 0xb8, + 0x02, 0xc9, 0x69, 0xd0, 0xd7, 0x99, 0x66, 0x1d, 0x01, 0xaf, 0x3e, 0xa7, 0x5f, 0x9a, 0xbd, 0xdd, + 0x08, 0xf8, 0x61, 0xc9, 0xa2, 0xf9, 0x1d, 0xb3, 0xcd, 0x39, 0x1e, 0x9c, 0xcc, 0xe8, 0xb9, 0xf5, + 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xa9, 0xf6, 0x47, 0x9a, 0x05, 0x00, 0x00, +} + +func (m *EntitiesData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EntitiesData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EntitiesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceEntities) > 0 { + for iNdEx := len(m.ResourceEntities) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceEntities[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceEntities) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceEntities) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceEntities) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintEntities(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopeEntities) > 0 { + for iNdEx := len(m.ScopeEntities) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScopeEntities[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeEntities) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeEntities) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeEntities) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintEntities(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.EntityEvents) > 0 { + for iNdEx := len(m.EntityEvents) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EntityEvents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EntityEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EntityEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EntityEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + { + size := m.Data.Size() + i -= size + if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.Id) > 0 { + for iNdEx := len(m.Id) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Id[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.EntityType) > 0 { + i -= len(m.EntityType) + copy(dAtA[i:], m.EntityType) + i = encodeVarintEntities(dAtA, i, uint64(len(m.EntityType))) + i-- + dAtA[i] = 0x12 + } + if m.TimeUnixNano != 0 { + i = encodeVarintEntities(dAtA, i, uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EntityEvent_EntityState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EntityEvent_EntityState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EntityState != nil { + { + size, err := m.EntityState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *EntityEvent_EntityDelete) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EntityEvent_EntityDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EntityDelete != nil { + { + size, err := m.EntityDelete.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *EntityState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EntityState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EntityState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DroppedAttributesCount != 0 { + i = encodeVarintEntities(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x28 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntities(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +func (m *EntityDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EntityDelete) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EntityDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintEntities(dAtA []byte, offset int, v uint64) int { + offset -= sovEntities(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EntitiesData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceEntities) > 0 { + for _, e := range m.ResourceEntities { + l = e.Size() + n += 1 + l + sovEntities(uint64(l)) + } + } + return n +} + +func (m *ResourceEntities) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovEntities(uint64(l)) + if len(m.ScopeEntities) > 0 { + for _, e := range m.ScopeEntities { + l = e.Size() + n += 1 + l + sovEntities(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovEntities(uint64(l)) + } + return n +} + +func (m *ScopeEntities) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Scope.Size() + n += 1 + l + sovEntities(uint64(l)) + if len(m.EntityEvents) > 0 { + for _, e := range m.EntityEvents { + l = e.Size() + n += 1 + l + sovEntities(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovEntities(uint64(l)) + } + return n +} + +func (m *EntityEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeUnixNano != 0 { + n += 1 + sovEntities(uint64(m.TimeUnixNano)) + } + l = len(m.EntityType) + if l > 0 { + n += 1 + l + sovEntities(uint64(l)) + } + if len(m.Id) > 0 { + for _, e := range m.Id { + l = e.Size() + n += 1 + l + sovEntities(uint64(l)) + } + } + if m.Data != nil { + n += m.Data.Size() + } + return n +} + +func (m *EntityEvent_EntityState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EntityState != nil { + l = m.EntityState.Size() + n += 1 + l + sovEntities(uint64(l)) + } + return n +} +func (m *EntityEvent_EntityDelete) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EntityDelete != nil { + l = m.EntityDelete.Size() + n += 1 + l + sovEntities(uint64(l)) + } + return n +} +func (m *EntityState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovEntities(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovEntities(uint64(m.DroppedAttributesCount)) + } + return n +} + +func (m *EntityDelete) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovEntities(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEntities(x uint64) (n int) { + return sovEntities(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EntitiesData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EntitiesData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EntitiesData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceEntities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceEntities = append(m.ResourceEntities, &ResourceEntities{}) + if err := m.ResourceEntities[len(m.ResourceEntities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEntities(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntities + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceEntities) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceEntities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceEntities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeEntities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeEntities = append(m.ScopeEntities, &ScopeEntities{}) + if err := m.ScopeEntities[len(m.ScopeEntities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEntities(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntities + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeEntities) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeEntities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeEntities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EntityEvents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EntityEvents = append(m.EntityEvents, &EntityEvent{}) + if err := m.EntityEvents[len(m.EntityEvents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEntities(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntities + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EntityEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EntityEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EntityEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeUnixNano |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EntityType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EntityType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = append(m.Id, v11.KeyValue{}) + if err := m.Id[len(m.Id)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EntityState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &EntityState{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &EntityEvent_EntityState{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EntityDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &EntityDelete{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &EntityEvent_EntityDelete{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEntities(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntities + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EntityState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EntityState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EntityState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntities + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntities + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEntities(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntities + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EntityDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntities + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EntityDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EntityDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEntities(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEntities + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEntities(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntities + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntities + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntities + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEntities + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEntities + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEntities + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEntities = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEntities = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEntities = fmt.Errorf("proto: unexpected end of group") +) diff --git a/pdata/internal/data/protogen/logs/v1/logs.pb.go b/pdata/internal/data/protogen/logs/v1/logs.pb.go index 65b11fbe286..0902eb75c6c 100644 --- a/pdata/internal/data/protogen/logs/v1/logs.pb.go +++ b/pdata/internal/data/protogen/logs/v1/logs.pb.go @@ -232,6 +232,8 @@ type ResourceLogs struct { // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_logs" field which have their own schema_url field. + // + // This field is deprecated in favour of Resource.entities.schema_url. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } diff --git a/pdata/internal/data/protogen/metrics/v1/metrics.pb.go b/pdata/internal/data/protogen/metrics/v1/metrics.pb.go index 3649bd83f81..8c9812475b9 100644 --- a/pdata/internal/data/protogen/metrics/v1/metrics.pb.go +++ b/pdata/internal/data/protogen/metrics/v1/metrics.pb.go @@ -160,6 +160,25 @@ func (DataPointFlags) EnumDescriptor() ([]byte, []int) { // storage, OR can be embedded by other protocols that transfer OTLP metrics // data but do not implement the OTLP protocol. // +// MetricsData +// └─── ResourceMetrics +// +// ├── Resource +// ├── SchemaURL +// └── ScopeMetrics +// ├── Scope +// ├── SchemaURL +// └── Metric +// ├── Name +// ├── Description +// ├── Unit +// └── data +// ├── Gauge +// ├── Sum +// ├── Histogram +// ├── ExponentialHistogram +// └── Summary +// // The main difference between this message and collector protocol is that // in this message there will not be any "control" or "metadata" specific to // OTLP protocol. @@ -228,6 +247,8 @@ type ResourceMetrics struct { // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_metrics" field which have their own schema_url field. + // + // This field is deprecated in favour of Resource.entities.schema_url. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -855,6 +876,9 @@ func (m *ExponentialHistogram) GetAggregationTemporality() AggregationTemporalit // data type. These data points cannot always be merged in a meaningful way. // While they can be useful in some applications, histogram data points are // recommended for new applications. +// Summary metrics do not have an aggregation temporality field. This is +// because the count and sum fields of a SummaryDataPoint are assumed to be +// cumulative values. type Summary struct { DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` } @@ -1644,7 +1668,8 @@ func (m *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 { } // SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. +// time-varying values of a Summary metric. The count and sum fields represent +// cumulative values. type SummaryDataPoint struct { // The set of key/value pairs that uniquely identify the timeseries from // where this point belongs. The list may be empty (may contain 0 elements). diff --git a/pdata/internal/data/protogen/resource/v1/resource.pb.go b/pdata/internal/data/protogen/resource/v1/resource.pb.go index d4d1565c764..a71639d183f 100644 --- a/pdata/internal/data/protogen/resource/v1/resource.pb.go +++ b/pdata/internal/data/protogen/resource/v1/resource.pb.go @@ -35,6 +35,8 @@ type Resource struct { // dropped_attributes_count is the number of dropped attributes. If the value is 0, then // no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Set of entities that participate in this Resource. + Entities []*ResourceEntityRef `protobuf:"bytes,3,rep,name=entities,proto3" json:"entities,omitempty"` } func (m *Resource) Reset() { *m = Resource{} } @@ -84,8 +86,104 @@ func (m *Resource) GetDroppedAttributesCount() uint32 { return 0 } +func (m *Resource) GetEntities() []*ResourceEntityRef { + if m != nil { + return m.Entities + } + return nil +} + +type ResourceEntityRef struct { + // The Schema URL, if known. This is the identifier of the Schema that the entity data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // + // This schema_url applies to the data in this message and to the Resource attributes + // referenced by id_attr_keys and descr_attr_keys. + // TODO: discuss if we are happy with this somewhat complicated definition of what + // the schema_url applies to. + // + // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. + SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` + // Defines the type of the entity. MUST not change during the lifetime of the entity. + // For example: "service" or "host". This field is required and MUST not be empty + // for valid entities. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // Attribute Keys that identify the entity. + // MUST not change during the lifetime of the entity. The Id must contain at least one attribute. + // These keys MUST exist in the containing Resource.attributes. + IdAttrKeys []string `protobuf:"bytes,3,rep,name=id_attr_keys,json=idAttrKeys,proto3" json:"id_attr_keys,omitempty"` + // Descriptive (non-identifying) attribute keys of the entity. + // MAY change over the lifetime of the entity. MAY be empty. + // These attribute keys are not part of entity's identity. + // These keys MUST exist in the containing Resource.attributes. + DescrAttrKeys []string `protobuf:"bytes,4,rep,name=descr_attr_keys,json=descrAttrKeys,proto3" json:"descr_attr_keys,omitempty"` +} + +func (m *ResourceEntityRef) Reset() { *m = ResourceEntityRef{} } +func (m *ResourceEntityRef) String() string { return proto.CompactTextString(m) } +func (*ResourceEntityRef) ProtoMessage() {} +func (*ResourceEntityRef) Descriptor() ([]byte, []int) { + return fileDescriptor_446f73eacf88f3f5, []int{1} +} +func (m *ResourceEntityRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceEntityRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceEntityRef.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceEntityRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceEntityRef.Merge(m, src) +} +func (m *ResourceEntityRef) XXX_Size() int { + return m.Size() +} +func (m *ResourceEntityRef) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceEntityRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceEntityRef proto.InternalMessageInfo + +func (m *ResourceEntityRef) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +func (m *ResourceEntityRef) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ResourceEntityRef) GetIdAttrKeys() []string { + if m != nil { + return m.IdAttrKeys + } + return nil +} + +func (m *ResourceEntityRef) GetDescrAttrKeys() []string { + if m != nil { + return m.DescrAttrKeys + } + return nil +} + func init() { proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource") + proto.RegisterType((*ResourceEntityRef)(nil), "opentelemetry.proto.resource.v1.ResourceEntityRef") } func init() { @@ -93,26 +191,33 @@ func init() { } var fileDescriptor_446f73eacf88f3f5 = []byte{ - // 302 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcb, 0x2f, 0x48, 0xcd, - 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, - 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, - 0x42, 0xf2, 0x28, 0xea, 0x21, 0x82, 0x7a, 0x70, 0x35, 0x65, 0x86, 0x52, 0x22, 0xe9, 0xf9, 0xe9, - 0xf9, 0x10, 0x63, 0x40, 0x2c, 0x88, 0x0a, 0x29, 0x2d, 0x6c, 0xd6, 0x24, 0xe7, 0xe7, 0xe6, 0xe6, - 0xe7, 0x81, 0x2c, 0x81, 0xb0, 0x20, 0x6a, 0x95, 0x26, 0x33, 0x72, 0x71, 0x04, 0x41, 0x4d, 0x14, - 0xf2, 0xe5, 0xe2, 0x4a, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x49, 0x2d, 0x96, 0x60, 0x54, - 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd7, 0xc3, 0xe6, 0x08, 0xa8, 0x19, 0x65, 0x86, 0x7a, 0xde, 0xa9, - 0x95, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x21, 0x19, 0x20, - 0x64, 0xc1, 0x25, 0x91, 0x52, 0x94, 0x5f, 0x50, 0x90, 0x9a, 0x12, 0x8f, 0x10, 0x8d, 0x4f, 0xce, - 0x2f, 0xcd, 0x2b, 0x91, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x0d, 0x12, 0x83, 0xca, 0x3b, 0xc2, 0xa5, - 0x9d, 0x41, 0xb2, 0x4e, 0xdb, 0x19, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, - 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x81, - 0x4b, 0x29, 0x33, 0x5f, 0x8f, 0x40, 0xb0, 0x38, 0xf1, 0xc2, 0x7c, 0x14, 0x00, 0x92, 0x0a, 0x60, - 0x8c, 0x72, 0x4b, 0x47, 0xd7, 0x94, 0x09, 0x0a, 0x91, 0x9c, 0x9c, 0xd4, 0xe4, 0x92, 0xfc, 0x22, - 0xfd, 0x82, 0x94, 0xc4, 0x92, 0x44, 0xfd, 0xcc, 0xbc, 0x92, 0xd4, 0xa2, 0xbc, 0xc4, 0x1c, 0x7d, - 0x30, 0x0f, 0x6c, 0x6a, 0x7a, 0x6a, 0x1e, 0x72, 0xfc, 0xac, 0x62, 0x92, 0xf7, 0x2f, 0x48, 0xcd, - 0x0b, 0x81, 0x9b, 0x02, 0x36, 0x5f, 0x0f, 0x66, 0x9b, 0x5e, 0x98, 0x61, 0x12, 0x1b, 0x58, 0x9f, - 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x8b, 0xcf, 0x38, 0xeb, 0x01, 0x00, 0x00, + // 410 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x8e, 0xd3, 0x30, + 0x14, 0x86, 0xe3, 0x99, 0x0a, 0x4d, 0x0c, 0x15, 0xc2, 0x42, 0x28, 0x1a, 0x89, 0x34, 0xea, 0x02, + 0x2a, 0x16, 0x8e, 0x32, 0x6c, 0xd8, 0x52, 0x04, 0x9b, 0x11, 0x30, 0xb2, 0x60, 0x16, 0x6c, 0xa2, + 0x34, 0x79, 0x04, 0x8b, 0xc4, 0x8e, 0x1c, 0xa7, 0x52, 0x6e, 0xd1, 0x73, 0x70, 0x01, 0xae, 0xd0, + 0x65, 0x97, 0xac, 0x10, 0x6a, 0x2e, 0x82, 0x62, 0xb7, 0xa1, 0x40, 0xa5, 0xee, 0x5e, 0xde, 0xff, + 0xbf, 0xcf, 0xef, 0x77, 0x8c, 0xa9, 0xac, 0x40, 0x68, 0x28, 0xa0, 0x04, 0xad, 0xda, 0xb0, 0x52, + 0x52, 0xcb, 0x50, 0x41, 0x2d, 0x1b, 0x95, 0x42, 0xb8, 0x8c, 0x86, 0x9a, 0x1a, 0x89, 0x4c, 0xfe, + 0xf2, 0xdb, 0x26, 0x1d, 0x3c, 0xcb, 0xe8, 0xf2, 0x61, 0x2e, 0x73, 0x69, 0x31, 0x7d, 0x65, 0x1d, + 0x97, 0xcf, 0x8e, 0x1d, 0x93, 0xca, 0xb2, 0x94, 0xa2, 0x3f, 0xc4, 0x56, 0xd6, 0x3b, 0xed, 0x10, + 0xbe, 0x60, 0x3b, 0x22, 0x79, 0x8b, 0x71, 0xa2, 0xb5, 0xe2, 0x8b, 0x46, 0x43, 0xed, 0xa1, 0xe0, + 0x7c, 0x76, 0xf7, 0xea, 0x29, 0x3d, 0xb6, 0xc4, 0x8e, 0xb1, 0x8c, 0xe8, 0x35, 0xb4, 0xb7, 0x49, + 0xd1, 0xc0, 0x7c, 0xb4, 0xfe, 0x39, 0x71, 0xd8, 0x01, 0x80, 0xbc, 0xc0, 0x5e, 0xa6, 0x64, 0x55, + 0x41, 0x16, 0xff, 0xe9, 0xc6, 0xa9, 0x6c, 0x84, 0xf6, 0xce, 0x02, 0x34, 0x1b, 0xb3, 0x47, 0x3b, + 0xfd, 0xe5, 0x20, 0xbf, 0xea, 0x55, 0xf2, 0x0e, 0x5f, 0x80, 0xd0, 0x5c, 0x73, 0xa8, 0xbd, 0x73, + 0xb3, 0xc6, 0x15, 0x3d, 0x71, 0x17, 0x74, 0x9f, 0xe2, 0x75, 0x3f, 0xd8, 0x32, 0xf8, 0xcc, 0x06, + 0xc6, 0x74, 0x85, 0xf0, 0x83, 0xff, 0x74, 0xf2, 0x18, 0xe3, 0x3a, 0xfd, 0x02, 0x65, 0x12, 0x37, + 0xaa, 0xf0, 0x50, 0x80, 0x66, 0x2e, 0x73, 0x6d, 0xe7, 0xa3, 0x2a, 0x08, 0xc1, 0x23, 0xdd, 0x56, + 0x60, 0x56, 0x75, 0x99, 0xa9, 0x49, 0x80, 0xef, 0x71, 0x9b, 0x26, 0xfe, 0x0a, 0xad, 0x5d, 0xce, + 0x65, 0x98, 0x9b, 0x04, 0xd7, 0xd0, 0xd6, 0xe4, 0x09, 0xbe, 0x9f, 0x41, 0x9d, 0xaa, 0x03, 0xd3, + 0xc8, 0x98, 0xc6, 0xa6, 0xbd, 0xf7, 0xcd, 0xbf, 0xa3, 0xf5, 0xd6, 0x47, 0x9b, 0xad, 0x8f, 0x7e, + 0x6d, 0x7d, 0xb4, 0xea, 0x7c, 0x67, 0xd3, 0xf9, 0xce, 0x8f, 0xce, 0x77, 0xf0, 0x94, 0xcb, 0x53, + 0x69, 0xe7, 0xe3, 0x7d, 0x9c, 0x9b, 0x5e, 0xba, 0x41, 0x9f, 0xde, 0xe4, 0xff, 0x0e, 0xf1, 0xfe, + 0xa7, 0x17, 0x05, 0xa4, 0x5a, 0xaa, 0xb0, 0xca, 0x12, 0x9d, 0x84, 0x5c, 0x68, 0x50, 0x22, 0x29, + 0x42, 0xf3, 0x65, 0xa8, 0x39, 0x88, 0xc3, 0x27, 0xf8, 0xed, 0x6c, 0xf2, 0xbe, 0x02, 0xf1, 0x61, + 0xa0, 0x18, 0xfe, 0x70, 0xb9, 0xf4, 0x36, 0x5a, 0xdc, 0x31, 0x73, 0xcf, 0x7f, 0x07, 0x00, 0x00, + 0xff, 0xff, 0xbe, 0xbb, 0xb4, 0xae, 0xce, 0x02, 0x00, 0x00, } func (m *Resource) Marshal() (dAtA []byte, err error) { @@ -135,6 +240,20 @@ func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Entities) > 0 { + for iNdEx := len(m.Entities) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entities[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResource(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if m.DroppedAttributesCount != 0 { i = encodeVarintResource(dAtA, i, uint64(m.DroppedAttributesCount)) i-- @@ -157,6 +276,61 @@ func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceEntityRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceEntityRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceEntityRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DescrAttrKeys) > 0 { + for iNdEx := len(m.DescrAttrKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DescrAttrKeys[iNdEx]) + copy(dAtA[i:], m.DescrAttrKeys[iNdEx]) + i = encodeVarintResource(dAtA, i, uint64(len(m.DescrAttrKeys[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.IdAttrKeys) > 0 { + for iNdEx := len(m.IdAttrKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IdAttrKeys[iNdEx]) + copy(dAtA[i:], m.IdAttrKeys[iNdEx]) + i = encodeVarintResource(dAtA, i, uint64(len(m.IdAttrKeys[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintResource(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + } + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintResource(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintResource(dAtA []byte, offset int, v uint64) int { offset -= sovResource(v) base := offset @@ -183,6 +357,41 @@ func (m *Resource) Size() (n int) { if m.DroppedAttributesCount != 0 { n += 1 + sovResource(uint64(m.DroppedAttributesCount)) } + if len(m.Entities) > 0 { + for _, e := range m.Entities { + l = e.Size() + n += 1 + l + sovResource(uint64(l)) + } + } + return n +} + +func (m *ResourceEntityRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + if len(m.IdAttrKeys) > 0 { + for _, s := range m.IdAttrKeys { + l = len(s) + n += 1 + l + sovResource(uint64(l)) + } + } + if len(m.DescrAttrKeys) > 0 { + for _, s := range m.DescrAttrKeys { + l = len(s) + n += 1 + l + sovResource(uint64(l)) + } + } return n } @@ -274,6 +483,218 @@ func (m *Resource) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResource + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entities = append(m.Entities, &ResourceEntityRef{}) + if err := m.Entities[len(m.Entities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceEntityRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceEntityRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceEntityRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthResource + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthResource + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IdAttrKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthResource + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IdAttrKeys = append(m.IdAttrKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DescrAttrKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthResource + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DescrAttrKeys = append(m.DescrAttrKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipResource(dAtA[iNdEx:]) diff --git a/pdata/internal/data/protogen/trace/v1/trace.pb.go b/pdata/internal/data/protogen/trace/v1/trace.pb.go index c1fcf0764e0..8238bdacb3f 100644 --- a/pdata/internal/data/protogen/trace/v1/trace.pb.go +++ b/pdata/internal/data/protogen/trace/v1/trace.pb.go @@ -239,6 +239,8 @@ type ResourceSpans struct { // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. + // + // This field is deprecated in favour of Resource.entities.schema_url. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } diff --git a/pdata/internal/generated_wrapper_resource.go b/pdata/internal/generated_wrapper_resource.go index 354b2457ba7..3ab3dd9a6d5 100644 --- a/pdata/internal/generated_wrapper_resource.go +++ b/pdata/internal/generated_wrapper_resource.go @@ -38,4 +38,5 @@ func GenerateTestResource() Resource { func FillTestResource(tv Resource) { FillTestMap(NewMap(&tv.orig.Attributes, tv.state)) tv.orig.DroppedAttributesCount = uint32(17) + FillTestResourceEntityRefSlice(NewResourceEntityRefSlice(&tv.orig.Entities, tv.state)) } diff --git a/pdata/internal/generated_wrapper_resourceentityref.go b/pdata/internal/generated_wrapper_resourceentityref.go new file mode 100644 index 00000000000..35025fc1c3d --- /dev/null +++ b/pdata/internal/generated_wrapper_resourceentityref.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +type ResourceEntityRef struct { + orig *otlpresource.ResourceEntityRef + state *State +} + +func GetOrigResourceEntityRef(ms ResourceEntityRef) *otlpresource.ResourceEntityRef { + return ms.orig +} + +func GetResourceEntityRefState(ms ResourceEntityRef) *State { + return ms.state +} + +func NewResourceEntityRef(orig *otlpresource.ResourceEntityRef, state *State) ResourceEntityRef { + return ResourceEntityRef{orig: orig, state: state} +} + +func GenerateTestResourceEntityRef() ResourceEntityRef { + orig := otlpresource.ResourceEntityRef{} + state := StateMutable + tv := NewResourceEntityRef(&orig, &state) + FillTestResourceEntityRef(tv) + return tv +} + +func FillTestResourceEntityRef(tv ResourceEntityRef) { + tv.orig.SchemaUrl = "https://opentelemetry.io/schemas/1.5.0" + tv.orig.Type = "host" + FillTestStringSlice(NewStringSlice(&tv.orig.IdAttrKeys, tv.state)) + FillTestStringSlice(NewStringSlice(&tv.orig.DescrAttrKeys, tv.state)) +} diff --git a/pdata/internal/generated_wrapper_resourceentityrefslice.go b/pdata/internal/generated_wrapper_resourceentityrefslice.go new file mode 100644 index 00000000000..aee0cb000ca --- /dev/null +++ b/pdata/internal/generated_wrapper_resourceentityrefslice.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +type ResourceEntityRefSlice struct { + orig *[]*otlpresource.ResourceEntityRef + state *State +} + +func GetOrigResourceEntityRefSlice(ms ResourceEntityRefSlice) *[]*otlpresource.ResourceEntityRef { + return ms.orig +} + +func GetResourceEntityRefSliceState(ms ResourceEntityRefSlice) *State { + return ms.state +} + +func NewResourceEntityRefSlice(orig *[]*otlpresource.ResourceEntityRef, state *State) ResourceEntityRefSlice { + return ResourceEntityRefSlice{orig: orig, state: state} +} + +func GenerateTestResourceEntityRefSlice() ResourceEntityRefSlice { + orig := []*otlpresource.ResourceEntityRef(nil) + state := StateMutable + es := NewResourceEntityRefSlice(&orig, &state) + FillTestResourceEntityRefSlice(es) + return es +} + +func FillTestResourceEntityRefSlice(es ResourceEntityRefSlice) { + *es.orig = make([]*otlpresource.ResourceEntityRef, 7) + for i := 0; i < 7; i++ { + (*es.orig)[i] = &otlpresource.ResourceEntityRef{} + FillTestResourceEntityRef(NewResourceEntityRef((*es.orig)[i], es.state)) + } +} diff --git a/pdata/internal/wrapper_entities.go b/pdata/internal/wrapper_entities.go new file mode 100644 index 00000000000..eabe07facdb --- /dev/null +++ b/pdata/internal/wrapper_entities.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/entities/v1" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +type Entities struct { + orig *otlpcollectorlog.ExportEntitiesServiceRequest + state *State +} + +func GetOrigEntities(ms Entities) *otlpcollectorlog.ExportEntitiesServiceRequest { + return ms.orig +} + +func GetEntitiesState(ms Entities) *State { + return ms.state +} + +func SetEntitiesState(ms Entities, state State) { + *ms.state = state +} + +func NewEntities(orig *otlpcollectorlog.ExportEntitiesServiceRequest, state *State) Entities { + return Entities{orig: orig, state: state} +} + +// EntitiesToProto internal helper to convert Entities to protobuf representation. +func EntitiesToProto(l Entities) otlpentities.EntitiesData { + return otlpentities.EntitiesData{ + ResourceEntities: l.orig.ResourceEntities, + } +} + +// EntitiesFromProto internal helper to convert protobuf representation to Entities. +// This function set exclusive state assuming that it's called only once per Entities. +func EntitiesFromProto(orig otlpentities.EntitiesData) Entities { + state := StateMutable + return NewEntities(&otlpcollectorlog.ExportEntitiesServiceRequest{ + ResourceEntities: orig.ResourceEntities, + }, &state) +} diff --git a/pdata/pcommon/generated_resource.go b/pdata/pcommon/generated_resource.go index 12e6cfa7f3b..0455a115664 100644 --- a/pdata/pcommon/generated_resource.go +++ b/pdata/pcommon/generated_resource.go @@ -66,9 +66,15 @@ func (ms Resource) SetDroppedAttributesCount(v uint32) { ms.getOrig().DroppedAttributesCount = v } +// Entities returns the Entities associated with this Resource. +func (ms Resource) Entities() ResourceEntityRefSlice { + return ResourceEntityRefSlice(internal.NewResourceEntityRefSlice(&ms.getOrig().Entities, internal.GetResourceState(internal.Resource(ms)))) +} + // CopyTo copies all properties from the current struct overriding the destination. func (ms Resource) CopyTo(dest Resource) { dest.getState().AssertMutable() ms.Attributes().CopyTo(dest.Attributes()) dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) + ms.Entities().CopyTo(dest.Entities()) } diff --git a/pdata/pcommon/generated_resource_test.go b/pdata/pcommon/generated_resource_test.go index 0397781aead..5094fad3d1a 100644 --- a/pdata/pcommon/generated_resource_test.go +++ b/pdata/pcommon/generated_resource_test.go @@ -54,6 +54,13 @@ func TestResource_DroppedAttributesCount(t *testing.T) { assert.Panics(t, func() { newResource(&otlpresource.Resource{}, &sharedState).SetDroppedAttributesCount(uint32(17)) }) } +func TestResource_Entities(t *testing.T) { + ms := NewResource() + assert.Equal(t, NewResourceEntityRefSlice(), ms.Entities()) + internal.FillTestResourceEntityRefSlice(internal.ResourceEntityRefSlice(ms.Entities())) + assert.Equal(t, ResourceEntityRefSlice(internal.GenerateTestResourceEntityRefSlice()), ms.Entities()) +} + func generateTestResource() Resource { return Resource(internal.GenerateTestResource()) } diff --git a/pdata/pcommon/generated_resourceentityref.go b/pdata/pcommon/generated_resourceentityref.go new file mode 100644 index 00000000000..0cffa0f4f8b --- /dev/null +++ b/pdata/pcommon/generated_resourceentityref.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceEntityRef function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceEntityRef internal.ResourceEntityRef + +func newResourceEntityRef(orig *otlpresource.ResourceEntityRef, state *internal.State) ResourceEntityRef { + return ResourceEntityRef(internal.NewResourceEntityRef(orig, state)) +} + +// NewResourceEntityRef creates a new empty ResourceEntityRef. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResourceEntityRef() ResourceEntityRef { + state := internal.StateMutable + return newResourceEntityRef(&otlpresource.ResourceEntityRef{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ResourceEntityRef) MoveTo(dest ResourceEntityRef) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = otlpresource.ResourceEntityRef{} +} + +func (ms ResourceEntityRef) getOrig() *otlpresource.ResourceEntityRef { + return internal.GetOrigResourceEntityRef(internal.ResourceEntityRef(ms)) +} + +func (ms ResourceEntityRef) getState() *internal.State { + return internal.GetResourceEntityRefState(internal.ResourceEntityRef(ms)) +} + +// SchemaUrl returns the schemaurl associated with this ResourceEntityRef. +func (ms ResourceEntityRef) SchemaUrl() string { + return ms.getOrig().SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ResourceEntityRef. +func (ms ResourceEntityRef) SetSchemaUrl(v string) { + ms.getState().AssertMutable() + ms.getOrig().SchemaUrl = v +} + +// Type returns the type associated with this ResourceEntityRef. +func (ms ResourceEntityRef) Type() string { + return ms.getOrig().Type +} + +// SetType replaces the type associated with this ResourceEntityRef. +func (ms ResourceEntityRef) SetType(v string) { + ms.getState().AssertMutable() + ms.getOrig().Type = v +} + +// IdAttrKeys returns the IdAttrKeys associated with this ResourceEntityRef. +func (ms ResourceEntityRef) IdAttrKeys() StringSlice { + return StringSlice(internal.NewStringSlice(&ms.getOrig().IdAttrKeys, internal.GetResourceEntityRefState(internal.ResourceEntityRef(ms)))) +} + +// DescrAttrKeys returns the DescrAttrKeys associated with this ResourceEntityRef. +func (ms ResourceEntityRef) DescrAttrKeys() StringSlice { + return StringSlice(internal.NewStringSlice(&ms.getOrig().DescrAttrKeys, internal.GetResourceEntityRefState(internal.ResourceEntityRef(ms)))) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ResourceEntityRef) CopyTo(dest ResourceEntityRef) { + dest.getState().AssertMutable() + dest.SetSchemaUrl(ms.SchemaUrl()) + dest.SetType(ms.Type()) + ms.IdAttrKeys().CopyTo(dest.IdAttrKeys()) + ms.DescrAttrKeys().CopyTo(dest.DescrAttrKeys()) +} diff --git a/pdata/pcommon/generated_resourceentityref_test.go b/pdata/pcommon/generated_resourceentityref_test.go new file mode 100644 index 00000000000..b51717eacfc --- /dev/null +++ b/pdata/pcommon/generated_resourceentityref_test.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +func TestResourceEntityRef_MoveTo(t *testing.T) { + ms := generateTestResourceEntityRef() + dest := NewResourceEntityRef() + ms.MoveTo(dest) + assert.Equal(t, NewResourceEntityRef(), ms) + assert.Equal(t, generateTestResourceEntityRef(), dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.MoveTo(newResourceEntityRef(&otlpresource.ResourceEntityRef{}, &sharedState)) }) + assert.Panics(t, func() { newResourceEntityRef(&otlpresource.ResourceEntityRef{}, &sharedState).MoveTo(dest) }) +} + +func TestResourceEntityRef_CopyTo(t *testing.T) { + ms := NewResourceEntityRef() + orig := NewResourceEntityRef() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + orig = generateTestResourceEntityRef() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.CopyTo(newResourceEntityRef(&otlpresource.ResourceEntityRef{}, &sharedState)) }) +} + +func TestResourceEntityRef_SchemaUrl(t *testing.T) { + ms := NewResourceEntityRef() + assert.Equal(t, "", ms.SchemaUrl()) + ms.SetSchemaUrl("https://opentelemetry.io/schemas/1.5.0") + assert.Equal(t, "https://opentelemetry.io/schemas/1.5.0", ms.SchemaUrl()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { + newResourceEntityRef(&otlpresource.ResourceEntityRef{}, &sharedState).SetSchemaUrl("https://opentelemetry.io/schemas/1.5.0") + }) +} + +func TestResourceEntityRef_Type(t *testing.T) { + ms := NewResourceEntityRef() + assert.Equal(t, "", ms.Type()) + ms.SetType("host") + assert.Equal(t, "host", ms.Type()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { newResourceEntityRef(&otlpresource.ResourceEntityRef{}, &sharedState).SetType("host") }) +} + +func TestResourceEntityRef_IdAttrKeys(t *testing.T) { + ms := NewResourceEntityRef() + assert.Equal(t, NewStringSlice(), ms.IdAttrKeys()) + internal.FillTestStringSlice(internal.StringSlice(ms.IdAttrKeys())) + assert.Equal(t, StringSlice(internal.GenerateTestStringSlice()), ms.IdAttrKeys()) +} + +func TestResourceEntityRef_DescrAttrKeys(t *testing.T) { + ms := NewResourceEntityRef() + assert.Equal(t, NewStringSlice(), ms.DescrAttrKeys()) + internal.FillTestStringSlice(internal.StringSlice(ms.DescrAttrKeys())) + assert.Equal(t, StringSlice(internal.GenerateTestStringSlice()), ms.DescrAttrKeys()) +} + +func generateTestResourceEntityRef() ResourceEntityRef { + return ResourceEntityRef(internal.GenerateTestResourceEntityRef()) +} diff --git a/pdata/pcommon/generated_resourceentityrefslice.go b/pdata/pcommon/generated_resourceentityrefslice.go new file mode 100644 index 00000000000..8d6117ed108 --- /dev/null +++ b/pdata/pcommon/generated_resourceentityrefslice.go @@ -0,0 +1,157 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +// ResourceEntityRefSlice logically represents a slice of ResourceEntityRef. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewResourceEntityRefSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceEntityRefSlice internal.ResourceEntityRefSlice + +func newResourceEntityRefSlice(orig *[]*otlpresource.ResourceEntityRef, state *internal.State) ResourceEntityRefSlice { + return ResourceEntityRefSlice(internal.NewResourceEntityRefSlice(orig, state)) +} + +// NewResourceEntityRefSlice creates a ResourceEntityRefSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewResourceEntityRefSlice() ResourceEntityRefSlice { + orig := []*otlpresource.ResourceEntityRef(nil) + state := internal.StateMutable + return newResourceEntityRefSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceEntityRefSlice()". +func (es ResourceEntityRefSlice) Len() int { + return len(*es.getOrig()) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceEntityRefSlice) At(i int) ResourceEntityRef { + return newResourceEntityRef((*es.getOrig())[i], es.getState()) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ResourceEntityRefSlice can be initialized: +// +// es := NewResourceEntityRefSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ResourceEntityRefSlice) EnsureCapacity(newCap int) { + es.getState().AssertMutable() + oldCap := cap(*es.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpresource.ResourceEntityRef, len(*es.getOrig()), newCap) + copy(newOrig, *es.getOrig()) + *es.getOrig() = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ResourceEntityRef. +// It returns the newly added ResourceEntityRef. +func (es ResourceEntityRefSlice) AppendEmpty() ResourceEntityRef { + es.getState().AssertMutable() + *es.getOrig() = append(*es.getOrig(), &otlpresource.ResourceEntityRef{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceEntityRefSlice) MoveAndAppendTo(dest ResourceEntityRefSlice) { + es.getState().AssertMutable() + dest.getState().AssertMutable() + if *dest.getOrig() == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.getOrig() = *es.getOrig() + } else { + *dest.getOrig() = append(*dest.getOrig(), *es.getOrig()...) + } + *es.getOrig() = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ResourceEntityRefSlice) RemoveIf(f func(ResourceEntityRef) bool) { + es.getState().AssertMutable() + newLen := 0 + for i := 0; i < len(*es.getOrig()); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.getOrig())[newLen] = (*es.getOrig())[i] + newLen++ + } + *es.getOrig() = (*es.getOrig())[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ResourceEntityRefSlice) CopyTo(dest ResourceEntityRefSlice) { + dest.getState().AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.getOrig()) + if srcLen <= destCap { + (*dest.getOrig()) = (*dest.getOrig())[:srcLen:destCap] + for i := range *es.getOrig() { + newResourceEntityRef((*es.getOrig())[i], es.getState()).CopyTo(newResourceEntityRef((*dest.getOrig())[i], dest.getState())) + } + return + } + origs := make([]otlpresource.ResourceEntityRef, srcLen) + wrappers := make([]*otlpresource.ResourceEntityRef, srcLen) + for i := range *es.getOrig() { + wrappers[i] = &origs[i] + newResourceEntityRef((*es.getOrig())[i], es.getState()).CopyTo(newResourceEntityRef(wrappers[i], dest.getState())) + } + *dest.getOrig() = wrappers +} + +// Sort sorts the ResourceEntityRef elements within ResourceEntityRefSlice given the +// provided less function so that two instances of ResourceEntityRefSlice +// can be compared. +func (es ResourceEntityRefSlice) Sort(less func(a, b ResourceEntityRef) bool) { + es.getState().AssertMutable() + sort.SliceStable(*es.getOrig(), func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} + +func (ms ResourceEntityRefSlice) getOrig() *[]*otlpresource.ResourceEntityRef { + return internal.GetOrigResourceEntityRefSlice(internal.ResourceEntityRefSlice(ms)) +} + +func (ms ResourceEntityRefSlice) getState() *internal.State { + return internal.GetResourceEntityRefSliceState(internal.ResourceEntityRefSlice(ms)) +} diff --git a/pdata/pcommon/generated_resourceentityrefslice_test.go b/pdata/pcommon/generated_resourceentityrefslice_test.go new file mode 100644 index 00000000000..1e4cbed44f7 --- /dev/null +++ b/pdata/pcommon/generated_resourceentityrefslice_test.go @@ -0,0 +1,144 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +func TestResourceEntityRefSlice(t *testing.T) { + es := NewResourceEntityRefSlice() + assert.Equal(t, 0, es.Len()) + state := internal.StateMutable + es = newResourceEntityRefSlice(&[]*otlpresource.ResourceEntityRef{}, &state) + assert.Equal(t, 0, es.Len()) + + emptyVal := NewResourceEntityRef() + testVal := ResourceEntityRef(internal.GenerateTestResourceEntityRef()) + for i := 0; i < 7; i++ { + el := es.AppendEmpty() + assert.Equal(t, emptyVal, es.At(i)) + internal.FillTestResourceEntityRef(internal.ResourceEntityRef(el)) + assert.Equal(t, testVal, es.At(i)) + } + assert.Equal(t, 7, es.Len()) +} + +func TestResourceEntityRefSliceReadOnly(t *testing.T) { + sharedState := internal.StateReadOnly + es := newResourceEntityRefSlice(&[]*otlpresource.ResourceEntityRef{}, &sharedState) + assert.Equal(t, 0, es.Len()) + assert.Panics(t, func() { es.AppendEmpty() }) + assert.Panics(t, func() { es.EnsureCapacity(2) }) + es2 := NewResourceEntityRefSlice() + es.CopyTo(es2) + assert.Panics(t, func() { es2.CopyTo(es) }) + assert.Panics(t, func() { es.MoveAndAppendTo(es2) }) + assert.Panics(t, func() { es2.MoveAndAppendTo(es) }) +} + +func TestResourceEntityRefSlice_CopyTo(t *testing.T) { + dest := NewResourceEntityRefSlice() + // Test CopyTo to empty + NewResourceEntityRefSlice().CopyTo(dest) + assert.Equal(t, NewResourceEntityRefSlice(), dest) + + // Test CopyTo larger slice + generateTestResourceEntityRefSlice().CopyTo(dest) + assert.Equal(t, generateTestResourceEntityRefSlice(), dest) + + // Test CopyTo same size slice + generateTestResourceEntityRefSlice().CopyTo(dest) + assert.Equal(t, generateTestResourceEntityRefSlice(), dest) +} + +func TestResourceEntityRefSlice_EnsureCapacity(t *testing.T) { + es := generateTestResourceEntityRefSlice() + + // Test ensure smaller capacity. + const ensureSmallLen = 4 + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + assert.Equal(t, es.Len(), cap(*es.getOrig())) + assert.Equal(t, generateTestResourceEntityRefSlice(), es) + + // Test ensure larger capacity + const ensureLargeLen = 9 + es.EnsureCapacity(ensureLargeLen) + assert.Less(t, generateTestResourceEntityRefSlice().Len(), ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.getOrig())) + assert.Equal(t, generateTestResourceEntityRefSlice(), es) +} + +func TestResourceEntityRefSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestResourceEntityRefSlice() + dest := NewResourceEntityRefSlice() + src := generateTestResourceEntityRefSlice() + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestResourceEntityRefSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestResourceEntityRefSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestResourceEntityRefSlice().MoveAndAppendTo(dest) + assert.Equal(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.Equal(t, expectedSlice.At(i), dest.At(i)) + assert.Equal(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestResourceEntityRefSlice_RemoveIf(t *testing.T) { + // Test RemoveIf on empty slice + emptySlice := NewResourceEntityRefSlice() + emptySlice.RemoveIf(func(el ResourceEntityRef) bool { + t.Fail() + return false + }) + + // Test RemoveIf + filtered := generateTestResourceEntityRefSlice() + pos := 0 + filtered.RemoveIf(func(el ResourceEntityRef) bool { + pos++ + return pos%3 == 0 + }) + assert.Equal(t, 5, filtered.Len()) +} + +func TestResourceEntityRefSlice_Sort(t *testing.T) { + es := generateTestResourceEntityRefSlice() + es.Sort(func(a, b ResourceEntityRef) bool { + return uintptr(unsafe.Pointer(a.getOrig())) < uintptr(unsafe.Pointer(b.getOrig())) + }) + for i := 1; i < es.Len(); i++ { + assert.Less(t, uintptr(unsafe.Pointer(es.At(i-1).getOrig())), uintptr(unsafe.Pointer(es.At(i).getOrig()))) + } + es.Sort(func(a, b ResourceEntityRef) bool { + return uintptr(unsafe.Pointer(a.getOrig())) > uintptr(unsafe.Pointer(b.getOrig())) + }) + for i := 1; i < es.Len(); i++ { + assert.Greater(t, uintptr(unsafe.Pointer(es.At(i-1).getOrig())), uintptr(unsafe.Pointer(es.At(i).getOrig()))) + } +} + +func generateTestResourceEntityRefSlice() ResourceEntityRefSlice { + return ResourceEntityRefSlice(internal.GenerateTestResourceEntityRefSlice()) +} diff --git a/pdata/pentity/encoding.go b/pdata/pentity/encoding.go new file mode 100644 index 00000000000..fca3fb35536 --- /dev/null +++ b/pdata/pentity/encoding.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pentity // import "go.opentelemetry.io/collector/pdata/pentity" + +// MarshalSizer is the interface that groups the basic Marshal and Size methods +type MarshalSizer interface { + Marshaler + Sizer +} + +// Marshaler marshals Entities into bytes. +type Marshaler interface { + // MarshalEntities the given Entities into bytes. + // If the error is not nil, the returned bytes slice cannot be used. + MarshalEntities(ld Entities) ([]byte, error) +} + +// Unmarshaler unmarshalls bytes into Entities. +type Unmarshaler interface { + // UnmarshalEntities the given bytes into Entities. + // If the error is not nil, the returned Entities cannot be used. + UnmarshalEntities(buf []byte) (Entities, error) +} + +// Sizer is an optional interface implemented by the Marshaler, +// that calculates the size of a marshaled Entities. +type Sizer interface { + // EntitiesSize returns the size in bytes of a marshaled Entities. + EntitiesSize(ld Entities) int +} diff --git a/pdata/pentity/entities.go b/pdata/pentity/entities.go new file mode 100644 index 00000000000..7ed216b776a --- /dev/null +++ b/pdata/pentity/entities.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pentity // import "go.opentelemetry.io/collector/pdata/pentity" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/entities/v1" +) + +// Entities is the top-level struct that is propagated through the entities pipeline. +// Use NewEntities to create new instance, zero-initialized instance is not valid for use. +type Entities internal.Entities + +func newEntities(orig *otlpcollectorlog.ExportEntitiesServiceRequest) Entities { + state := internal.StateMutable + return Entities(internal.NewEntities(orig, &state)) +} + +func (ms Entities) getOrig() *otlpcollectorlog.ExportEntitiesServiceRequest { + return internal.GetOrigEntities(internal.Entities(ms)) +} + +func (ms Entities) getState() *internal.State { + return internal.GetEntitiesState(internal.Entities(ms)) +} + +// NewEntities creates a new Entities struct. +func NewEntities() Entities { + return newEntities(&otlpcollectorlog.ExportEntitiesServiceRequest{}) +} + +// IsReadOnly returns true if this Entities instance is read-only. +func (ms Entities) IsReadOnly() bool { + return *ms.getState() == internal.StateReadOnly +} + +// CopyTo copies the Entities instance overriding the destination. +func (ms Entities) CopyTo(dest Entities) { + ms.ResourceEntities().CopyTo(dest.ResourceEntities()) +} + +// EntityCount calculates the total number of entities. +func (ms Entities) EntityCount() int { + entitiesCount := 0 + for i := 0; i < ms.ResourceEntities().Len(); i++ { + ses := ms.ResourceEntities().At(i).ScopeEntities() + for j := 0; j < ses.Len(); j++ { + entitiesCount += ses.At(i).EntityEvents().Len() + } + } + return entitiesCount +} + +// ResourceEntities returns the ResourceEntitiesSlice associated with this Entities. +func (ms Entities) ResourceEntities() ResourceEntitiesSlice { + return newResourceEntitiesSlice(&ms.getOrig().ResourceEntities, internal.GetEntitiesState(internal.Entities(ms))) +} + +// MarkReadOnly marks the Entities as shared so that no further modifications can be done on it. +func (ms Entities) MarkReadOnly() { + internal.SetEntitiesState(internal.Entities(ms), internal.StateReadOnly) +} diff --git a/pdata/pentity/event_type.go b/pdata/pentity/event_type.go new file mode 100644 index 00000000000..8e5d3268ea9 --- /dev/null +++ b/pdata/pentity/event_type.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pentity // import "go.opentelemetry.io/collector/pdata/pentity" + +// EventType specifies the type of entity event. +type EventType int32 + +const ( + // EventTypeEmpty means that metric type is unset. + EventTypeEmpty EventType = iota + EventTypeEntityState + EventTypeEntityDelete +) + +// String returns the string representation of the EventType. +func (mdt EventType) String() string { + switch mdt { + case EventTypeEmpty: + return "Empty" + case EventTypeEntityState: + return "State" + case EventTypeEntityDelete: + return "Delete" + } + return "" +} diff --git a/pdata/pentity/generated_entitydelete.go b/pdata/pentity/generated_entitydelete.go new file mode 100644 index 00000000000..67a80a15bbe --- /dev/null +++ b/pdata/pentity/generated_entitydelete.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +// EntityDelete are experimental implementation of OpenTelemetry Entity Data Model. + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewEntityDelete function to create new instances. +// Important: zero-initialized instance is not valid for use. +type EntityDelete struct { + orig *otlpentities.EntityDelete + state *internal.State +} + +func newEntityDelete(orig *otlpentities.EntityDelete, state *internal.State) EntityDelete { + return EntityDelete{orig: orig, state: state} +} + +// NewEntityDelete creates a new empty EntityDelete. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewEntityDelete() EntityDelete { + state := internal.StateMutable + return newEntityDelete(&otlpentities.EntityDelete{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms EntityDelete) MoveTo(dest EntityDelete) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpentities.EntityDelete{} +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms EntityDelete) CopyTo(dest EntityDelete) { + dest.state.AssertMutable() +} diff --git a/pdata/pentity/generated_entitydelete_test.go b/pdata/pentity/generated_entitydelete_test.go new file mode 100644 index 00000000000..7fe04c748a4 --- /dev/null +++ b/pdata/pentity/generated_entitydelete_test.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +func TestEntityDelete_MoveTo(t *testing.T) { + ms := generateTestEntityDelete() + dest := NewEntityDelete() + ms.MoveTo(dest) + assert.Equal(t, NewEntityDelete(), ms) + assert.Equal(t, generateTestEntityDelete(), dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.MoveTo(newEntityDelete(&otlpentities.EntityDelete{}, &sharedState)) }) + assert.Panics(t, func() { newEntityDelete(&otlpentities.EntityDelete{}, &sharedState).MoveTo(dest) }) +} + +func TestEntityDelete_CopyTo(t *testing.T) { + ms := NewEntityDelete() + orig := NewEntityDelete() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + orig = generateTestEntityDelete() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.CopyTo(newEntityDelete(&otlpentities.EntityDelete{}, &sharedState)) }) +} + +func generateTestEntityDelete() EntityDelete { + tv := NewEntityDelete() + fillTestEntityDelete(tv) + return tv +} + +func fillTestEntityDelete(tv EntityDelete) { +} diff --git a/pdata/pentity/generated_entityevent.go b/pdata/pentity/generated_entityevent.go new file mode 100644 index 00000000000..686239b7163 --- /dev/null +++ b/pdata/pentity/generated_entityevent.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// EntityEvent are experimental implementation of OpenTelemetry Entity Data Model. + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewEntityEvent function to create new instances. +// Important: zero-initialized instance is not valid for use. +type EntityEvent struct { + orig *otlpentities.EntityEvent + state *internal.State +} + +func newEntityEvent(orig *otlpentities.EntityEvent, state *internal.State) EntityEvent { + return EntityEvent{orig: orig, state: state} +} + +// NewEntityEvent creates a new empty EntityEvent. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewEntityEvent() EntityEvent { + state := internal.StateMutable + return newEntityEvent(&otlpentities.EntityEvent{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms EntityEvent) MoveTo(dest EntityEvent) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpentities.EntityEvent{} +} + +// Timestamp returns the timestamp associated with this EntityEvent. +func (ms EntityEvent) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this EntityEvent. +func (ms EntityEvent) SetTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeUnixNano = uint64(v) +} + +// EntityType returns the entitytype associated with this EntityEvent. +func (ms EntityEvent) EntityType() string { + return ms.orig.EntityType +} + +// SetEntityType replaces the entitytype associated with this EntityEvent. +func (ms EntityEvent) SetEntityType(v string) { + ms.state.AssertMutable() + ms.orig.EntityType = v +} + +// Id returns the Id associated with this EntityEvent. +func (ms EntityEvent) Id() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Id, ms.state)) +} + +// Type returns the type of the data for this EntityEvent. +// Calling this function on zero-initialized EntityEvent will cause a panic. +func (ms EntityEvent) Type() EventType { + switch ms.orig.Data.(type) { + case *otlpentities.EntityEvent_EntityState: + return EventTypeEntityState + case *otlpentities.EntityEvent_EntityDelete: + return EventTypeEntityDelete + } + return EventTypeEmpty +} + +// EntityState returns the entitystate associated with this EntityEvent. +// +// Calling this function when Type() != EventTypeEntityState returns an invalid +// zero-initialized instance of EntityState. Note that using such EntityState instance can cause panic. +// +// Calling this function on zero-initialized EntityEvent will cause a panic. +func (ms EntityEvent) EntityState() EntityState { + v, ok := ms.orig.GetData().(*otlpentities.EntityEvent_EntityState) + if !ok { + return EntityState{} + } + return newEntityState(v.EntityState, ms.state) +} + +// SetEmptyEntityState sets an empty entitystate to this EntityEvent. +// +// After this, Type() function will return EventTypeEntityState". +// +// Calling this function on zero-initialized EntityEvent will cause a panic. +func (ms EntityEvent) SetEmptyEntityState() EntityState { + ms.state.AssertMutable() + val := &otlpentities.EntityState{} + ms.orig.Data = &otlpentities.EntityEvent_EntityState{EntityState: val} + return newEntityState(val, ms.state) +} + +// EntityDelete returns the entitydelete associated with this EntityEvent. +// +// Calling this function when Type() != EventTypeEntityDelete returns an invalid +// zero-initialized instance of EntityDelete. Note that using such EntityDelete instance can cause panic. +// +// Calling this function on zero-initialized EntityEvent will cause a panic. +func (ms EntityEvent) EntityDelete() EntityDelete { + v, ok := ms.orig.GetData().(*otlpentities.EntityEvent_EntityDelete) + if !ok { + return EntityDelete{} + } + return newEntityDelete(v.EntityDelete, ms.state) +} + +// SetEmptyEntityDelete sets an empty entitydelete to this EntityEvent. +// +// After this, Type() function will return EventTypeEntityDelete". +// +// Calling this function on zero-initialized EntityEvent will cause a panic. +func (ms EntityEvent) SetEmptyEntityDelete() EntityDelete { + ms.state.AssertMutable() + val := &otlpentities.EntityDelete{} + ms.orig.Data = &otlpentities.EntityEvent_EntityDelete{EntityDelete: val} + return newEntityDelete(val, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms EntityEvent) CopyTo(dest EntityEvent) { + dest.state.AssertMutable() + dest.SetTimestamp(ms.Timestamp()) + dest.SetEntityType(ms.EntityType()) + ms.Id().CopyTo(dest.Id()) + switch ms.Type() { + case EventTypeEntityState: + ms.EntityState().CopyTo(dest.SetEmptyEntityState()) + case EventTypeEntityDelete: + ms.EntityDelete().CopyTo(dest.SetEmptyEntityDelete()) + } + +} diff --git a/pdata/pentity/generated_entityevent_test.go b/pdata/pentity/generated_entityevent_test.go new file mode 100644 index 00000000000..8f32217d0db --- /dev/null +++ b/pdata/pentity/generated_entityevent_test.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func TestEntityEvent_MoveTo(t *testing.T) { + ms := generateTestEntityEvent() + dest := NewEntityEvent() + ms.MoveTo(dest) + assert.Equal(t, NewEntityEvent(), ms) + assert.Equal(t, generateTestEntityEvent(), dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.MoveTo(newEntityEvent(&otlpentities.EntityEvent{}, &sharedState)) }) + assert.Panics(t, func() { newEntityEvent(&otlpentities.EntityEvent{}, &sharedState).MoveTo(dest) }) +} + +func TestEntityEvent_CopyTo(t *testing.T) { + ms := NewEntityEvent() + orig := NewEntityEvent() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + orig = generateTestEntityEvent() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.CopyTo(newEntityEvent(&otlpentities.EntityEvent{}, &sharedState)) }) +} + +func TestEntityEvent_Timestamp(t *testing.T) { + ms := NewEntityEvent() + assert.Equal(t, pcommon.Timestamp(0), ms.Timestamp()) + testValTimestamp := pcommon.Timestamp(1234567890) + ms.SetTimestamp(testValTimestamp) + assert.Equal(t, testValTimestamp, ms.Timestamp()) +} + +func TestEntityEvent_EntityType(t *testing.T) { + ms := NewEntityEvent() + assert.Equal(t, "", ms.EntityType()) + ms.SetEntityType("service") + assert.Equal(t, "service", ms.EntityType()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { newEntityEvent(&otlpentities.EntityEvent{}, &sharedState).SetEntityType("service") }) +} + +func TestEntityEvent_Id(t *testing.T) { + ms := NewEntityEvent() + assert.Equal(t, pcommon.NewMap(), ms.Id()) + internal.FillTestMap(internal.Map(ms.Id())) + assert.Equal(t, pcommon.Map(internal.GenerateTestMap()), ms.Id()) +} + +func TestEntityEvent_Type(t *testing.T) { + tv := NewEntityEvent() + assert.Equal(t, EventTypeEmpty, tv.Type()) +} + +func TestEntityEvent_EntityState(t *testing.T) { + ms := NewEntityEvent() + fillTestEntityState(ms.SetEmptyEntityState()) + assert.Equal(t, EventTypeEntityState, ms.Type()) + assert.Equal(t, generateTestEntityState(), ms.EntityState()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { newEntityEvent(&otlpentities.EntityEvent{}, &sharedState).SetEmptyEntityState() }) +} + +func TestEntityEvent_CopyTo_EntityState(t *testing.T) { + ms := NewEntityEvent() + fillTestEntityState(ms.SetEmptyEntityState()) + dest := NewEntityEvent() + ms.CopyTo(dest) + assert.Equal(t, ms, dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.CopyTo(newEntityEvent(&otlpentities.EntityEvent{}, &sharedState)) }) +} + +func TestEntityEvent_EntityDelete(t *testing.T) { + ms := NewEntityEvent() + fillTestEntityDelete(ms.SetEmptyEntityDelete()) + assert.Equal(t, EventTypeEntityDelete, ms.Type()) + assert.Equal(t, generateTestEntityDelete(), ms.EntityDelete()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { newEntityEvent(&otlpentities.EntityEvent{}, &sharedState).SetEmptyEntityDelete() }) +} + +func TestEntityEvent_CopyTo_EntityDelete(t *testing.T) { + ms := NewEntityEvent() + fillTestEntityDelete(ms.SetEmptyEntityDelete()) + dest := NewEntityEvent() + ms.CopyTo(dest) + assert.Equal(t, ms, dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.CopyTo(newEntityEvent(&otlpentities.EntityEvent{}, &sharedState)) }) +} + +func generateTestEntityEvent() EntityEvent { + tv := NewEntityEvent() + fillTestEntityEvent(tv) + return tv +} + +func fillTestEntityEvent(tv EntityEvent) { + tv.orig.TimeUnixNano = 1234567890 + tv.orig.EntityType = "service" + internal.FillTestMap(internal.NewMap(&tv.orig.Id, tv.state)) + tv.orig.Data = &otlpentities.EntityEvent_EntityDelete{EntityDelete: &otlpentities.EntityDelete{}} + fillTestEntityDelete(newEntityDelete(tv.orig.GetEntityDelete(), tv.state)) +} diff --git a/pdata/pentity/generated_entityeventslice.go b/pdata/pentity/generated_entityeventslice.go new file mode 100644 index 00000000000..c2cf40960f9 --- /dev/null +++ b/pdata/pentity/generated_entityeventslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +// EntityEventSlice logically represents a slice of EntityEvent. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewEntityEventSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type EntityEventSlice struct { + orig *[]*otlpentities.EntityEvent + state *internal.State +} + +func newEntityEventSlice(orig *[]*otlpentities.EntityEvent, state *internal.State) EntityEventSlice { + return EntityEventSlice{orig: orig, state: state} +} + +// NewEntityEventSlice creates a EntityEventSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewEntityEventSlice() EntityEventSlice { + orig := []*otlpentities.EntityEvent(nil) + state := internal.StateMutable + return newEntityEventSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewEntityEventSlice()". +func (es EntityEventSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es EntityEventSlice) At(i int) EntityEvent { + return newEntityEvent((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new EntityEventSlice can be initialized: +// +// es := NewEntityEventSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es EntityEventSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpentities.EntityEvent, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty EntityEvent. +// It returns the newly added EntityEvent. +func (es EntityEventSlice) AppendEmpty() EntityEvent { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpentities.EntityEvent{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es EntityEventSlice) MoveAndAppendTo(dest EntityEventSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es EntityEventSlice) RemoveIf(f func(EntityEvent) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es EntityEventSlice) CopyTo(dest EntityEventSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newEntityEvent((*es.orig)[i], es.state).CopyTo(newEntityEvent((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpentities.EntityEvent, srcLen) + wrappers := make([]*otlpentities.EntityEvent, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newEntityEvent((*es.orig)[i], es.state).CopyTo(newEntityEvent(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the EntityEvent elements within EntityEventSlice given the +// provided less function so that two instances of EntityEventSlice +// can be compared. +func (es EntityEventSlice) Sort(less func(a, b EntityEvent) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/pdata/pentity/generated_entityeventslice_test.go b/pdata/pentity/generated_entityeventslice_test.go new file mode 100644 index 00000000000..f0b088fc7bd --- /dev/null +++ b/pdata/pentity/generated_entityeventslice_test.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +func TestEntityEventSlice(t *testing.T) { + es := NewEntityEventSlice() + assert.Equal(t, 0, es.Len()) + state := internal.StateMutable + es = newEntityEventSlice(&[]*otlpentities.EntityEvent{}, &state) + assert.Equal(t, 0, es.Len()) + + emptyVal := NewEntityEvent() + testVal := generateTestEntityEvent() + for i := 0; i < 7; i++ { + el := es.AppendEmpty() + assert.Equal(t, emptyVal, es.At(i)) + fillTestEntityEvent(el) + assert.Equal(t, testVal, es.At(i)) + } + assert.Equal(t, 7, es.Len()) +} + +func TestEntityEventSliceReadOnly(t *testing.T) { + sharedState := internal.StateReadOnly + es := newEntityEventSlice(&[]*otlpentities.EntityEvent{}, &sharedState) + assert.Equal(t, 0, es.Len()) + assert.Panics(t, func() { es.AppendEmpty() }) + assert.Panics(t, func() { es.EnsureCapacity(2) }) + es2 := NewEntityEventSlice() + es.CopyTo(es2) + assert.Panics(t, func() { es2.CopyTo(es) }) + assert.Panics(t, func() { es.MoveAndAppendTo(es2) }) + assert.Panics(t, func() { es2.MoveAndAppendTo(es) }) +} + +func TestEntityEventSlice_CopyTo(t *testing.T) { + dest := NewEntityEventSlice() + // Test CopyTo to empty + NewEntityEventSlice().CopyTo(dest) + assert.Equal(t, NewEntityEventSlice(), dest) + + // Test CopyTo larger slice + generateTestEntityEventSlice().CopyTo(dest) + assert.Equal(t, generateTestEntityEventSlice(), dest) + + // Test CopyTo same size slice + generateTestEntityEventSlice().CopyTo(dest) + assert.Equal(t, generateTestEntityEventSlice(), dest) +} + +func TestEntityEventSlice_EnsureCapacity(t *testing.T) { + es := generateTestEntityEventSlice() + + // Test ensure smaller capacity. + const ensureSmallLen = 4 + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + assert.Equal(t, es.Len(), cap(*es.orig)) + assert.Equal(t, generateTestEntityEventSlice(), es) + + // Test ensure larger capacity + const ensureLargeLen = 9 + es.EnsureCapacity(ensureLargeLen) + assert.Less(t, generateTestEntityEventSlice().Len(), ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.orig)) + assert.Equal(t, generateTestEntityEventSlice(), es) +} + +func TestEntityEventSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestEntityEventSlice() + dest := NewEntityEventSlice() + src := generateTestEntityEventSlice() + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestEntityEventSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestEntityEventSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestEntityEventSlice().MoveAndAppendTo(dest) + assert.Equal(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.Equal(t, expectedSlice.At(i), dest.At(i)) + assert.Equal(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestEntityEventSlice_RemoveIf(t *testing.T) { + // Test RemoveIf on empty slice + emptySlice := NewEntityEventSlice() + emptySlice.RemoveIf(func(el EntityEvent) bool { + t.Fail() + return false + }) + + // Test RemoveIf + filtered := generateTestEntityEventSlice() + pos := 0 + filtered.RemoveIf(func(el EntityEvent) bool { + pos++ + return pos%3 == 0 + }) + assert.Equal(t, 5, filtered.Len()) +} + +func TestEntityEventSlice_Sort(t *testing.T) { + es := generateTestEntityEventSlice() + es.Sort(func(a, b EntityEvent) bool { + return uintptr(unsafe.Pointer(a.orig)) < uintptr(unsafe.Pointer(b.orig)) + }) + for i := 1; i < es.Len(); i++ { + assert.Less(t, uintptr(unsafe.Pointer(es.At(i-1).orig)), uintptr(unsafe.Pointer(es.At(i).orig))) + } + es.Sort(func(a, b EntityEvent) bool { + return uintptr(unsafe.Pointer(a.orig)) > uintptr(unsafe.Pointer(b.orig)) + }) + for i := 1; i < es.Len(); i++ { + assert.Greater(t, uintptr(unsafe.Pointer(es.At(i-1).orig)), uintptr(unsafe.Pointer(es.At(i).orig))) + } +} + +func generateTestEntityEventSlice() EntityEventSlice { + es := NewEntityEventSlice() + fillTestEntityEventSlice(es) + return es +} + +func fillTestEntityEventSlice(es EntityEventSlice) { + *es.orig = make([]*otlpentities.EntityEvent, 7) + for i := 0; i < 7; i++ { + (*es.orig)[i] = &otlpentities.EntityEvent{} + fillTestEntityEvent(newEntityEvent((*es.orig)[i], es.state)) + } +} diff --git a/pdata/pentity/generated_entitystate.go b/pdata/pentity/generated_entitystate.go new file mode 100644 index 00000000000..a4687343927 --- /dev/null +++ b/pdata/pentity/generated_entitystate.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// EntityState are experimental implementation of OpenTelemetry Entity Data Model. + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewEntityState function to create new instances. +// Important: zero-initialized instance is not valid for use. +type EntityState struct { + orig *otlpentities.EntityState + state *internal.State +} + +func newEntityState(orig *otlpentities.EntityState, state *internal.State) EntityState { + return EntityState{orig: orig, state: state} +} + +// NewEntityState creates a new empty EntityState. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewEntityState() EntityState { + state := internal.StateMutable + return newEntityState(&otlpentities.EntityState{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms EntityState) MoveTo(dest EntityState) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpentities.EntityState{} +} + +// Attributes returns the Attributes associated with this EntityState. +func (ms EntityState) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this EntityState. +func (ms EntityState) DroppedAttributesCount() uint32 { + return ms.orig.DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this EntityState. +func (ms EntityState) SetDroppedAttributesCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms EntityState) CopyTo(dest EntityState) { + dest.state.AssertMutable() + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} diff --git a/pdata/pentity/generated_entitystate_test.go b/pdata/pentity/generated_entitystate_test.go new file mode 100644 index 00000000000..6f455dd8adb --- /dev/null +++ b/pdata/pentity/generated_entitystate_test.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func TestEntityState_MoveTo(t *testing.T) { + ms := generateTestEntityState() + dest := NewEntityState() + ms.MoveTo(dest) + assert.Equal(t, NewEntityState(), ms) + assert.Equal(t, generateTestEntityState(), dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.MoveTo(newEntityState(&otlpentities.EntityState{}, &sharedState)) }) + assert.Panics(t, func() { newEntityState(&otlpentities.EntityState{}, &sharedState).MoveTo(dest) }) +} + +func TestEntityState_CopyTo(t *testing.T) { + ms := NewEntityState() + orig := NewEntityState() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + orig = generateTestEntityState() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.CopyTo(newEntityState(&otlpentities.EntityState{}, &sharedState)) }) +} + +func TestEntityState_Attributes(t *testing.T) { + ms := NewEntityState() + assert.Equal(t, pcommon.NewMap(), ms.Attributes()) + internal.FillTestMap(internal.Map(ms.Attributes())) + assert.Equal(t, pcommon.Map(internal.GenerateTestMap()), ms.Attributes()) +} + +func TestEntityState_DroppedAttributesCount(t *testing.T) { + ms := NewEntityState() + assert.Equal(t, uint32(0), ms.DroppedAttributesCount()) + ms.SetDroppedAttributesCount(uint32(17)) + assert.Equal(t, uint32(17), ms.DroppedAttributesCount()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { + newEntityState(&otlpentities.EntityState{}, &sharedState).SetDroppedAttributesCount(uint32(17)) + }) +} + +func generateTestEntityState() EntityState { + tv := NewEntityState() + fillTestEntityState(tv) + return tv +} + +func fillTestEntityState(tv EntityState) { + internal.FillTestMap(internal.NewMap(&tv.orig.Attributes, tv.state)) + tv.orig.DroppedAttributesCount = uint32(17) +} diff --git a/pdata/pentity/generated_entitystateslice.go b/pdata/pentity/generated_entitystateslice.go new file mode 100644 index 00000000000..a4df068ae84 --- /dev/null +++ b/pdata/pentity/generated_entitystateslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +// EntityStateSlice logically represents a slice of EntityState. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewEntityStateSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type EntityStateSlice struct { + orig *[]*otlpentities.EntityState + state *internal.State +} + +func newEntityStateSlice(orig *[]*otlpentities.EntityState, state *internal.State) EntityStateSlice { + return EntityStateSlice{orig: orig, state: state} +} + +// NewEntityStateSlice creates a EntityStateSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewEntityStateSlice() EntityStateSlice { + orig := []*otlpentities.EntityState(nil) + state := internal.StateMutable + return newEntityStateSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewEntityStateSlice()". +func (es EntityStateSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es EntityStateSlice) At(i int) EntityState { + return newEntityState((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new EntityStateSlice can be initialized: +// +// es := NewEntityStateSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es EntityStateSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpentities.EntityState, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty EntityState. +// It returns the newly added EntityState. +func (es EntityStateSlice) AppendEmpty() EntityState { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpentities.EntityState{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es EntityStateSlice) MoveAndAppendTo(dest EntityStateSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es EntityStateSlice) RemoveIf(f func(EntityState) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es EntityStateSlice) CopyTo(dest EntityStateSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newEntityState((*es.orig)[i], es.state).CopyTo(newEntityState((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpentities.EntityState, srcLen) + wrappers := make([]*otlpentities.EntityState, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newEntityState((*es.orig)[i], es.state).CopyTo(newEntityState(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the EntityState elements within EntityStateSlice given the +// provided less function so that two instances of EntityStateSlice +// can be compared. +func (es EntityStateSlice) Sort(less func(a, b EntityState) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/pdata/pentity/generated_entitystateslice_test.go b/pdata/pentity/generated_entitystateslice_test.go new file mode 100644 index 00000000000..cfa145b5b98 --- /dev/null +++ b/pdata/pentity/generated_entitystateslice_test.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +func TestEntityStateSlice(t *testing.T) { + es := NewEntityStateSlice() + assert.Equal(t, 0, es.Len()) + state := internal.StateMutable + es = newEntityStateSlice(&[]*otlpentities.EntityState{}, &state) + assert.Equal(t, 0, es.Len()) + + emptyVal := NewEntityState() + testVal := generateTestEntityState() + for i := 0; i < 7; i++ { + el := es.AppendEmpty() + assert.Equal(t, emptyVal, es.At(i)) + fillTestEntityState(el) + assert.Equal(t, testVal, es.At(i)) + } + assert.Equal(t, 7, es.Len()) +} + +func TestEntityStateSliceReadOnly(t *testing.T) { + sharedState := internal.StateReadOnly + es := newEntityStateSlice(&[]*otlpentities.EntityState{}, &sharedState) + assert.Equal(t, 0, es.Len()) + assert.Panics(t, func() { es.AppendEmpty() }) + assert.Panics(t, func() { es.EnsureCapacity(2) }) + es2 := NewEntityStateSlice() + es.CopyTo(es2) + assert.Panics(t, func() { es2.CopyTo(es) }) + assert.Panics(t, func() { es.MoveAndAppendTo(es2) }) + assert.Panics(t, func() { es2.MoveAndAppendTo(es) }) +} + +func TestEntityStateSlice_CopyTo(t *testing.T) { + dest := NewEntityStateSlice() + // Test CopyTo to empty + NewEntityStateSlice().CopyTo(dest) + assert.Equal(t, NewEntityStateSlice(), dest) + + // Test CopyTo larger slice + generateTestEntityStateSlice().CopyTo(dest) + assert.Equal(t, generateTestEntityStateSlice(), dest) + + // Test CopyTo same size slice + generateTestEntityStateSlice().CopyTo(dest) + assert.Equal(t, generateTestEntityStateSlice(), dest) +} + +func TestEntityStateSlice_EnsureCapacity(t *testing.T) { + es := generateTestEntityStateSlice() + + // Test ensure smaller capacity. + const ensureSmallLen = 4 + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + assert.Equal(t, es.Len(), cap(*es.orig)) + assert.Equal(t, generateTestEntityStateSlice(), es) + + // Test ensure larger capacity + const ensureLargeLen = 9 + es.EnsureCapacity(ensureLargeLen) + assert.Less(t, generateTestEntityStateSlice().Len(), ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.orig)) + assert.Equal(t, generateTestEntityStateSlice(), es) +} + +func TestEntityStateSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestEntityStateSlice() + dest := NewEntityStateSlice() + src := generateTestEntityStateSlice() + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestEntityStateSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestEntityStateSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestEntityStateSlice().MoveAndAppendTo(dest) + assert.Equal(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.Equal(t, expectedSlice.At(i), dest.At(i)) + assert.Equal(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestEntityStateSlice_RemoveIf(t *testing.T) { + // Test RemoveIf on empty slice + emptySlice := NewEntityStateSlice() + emptySlice.RemoveIf(func(el EntityState) bool { + t.Fail() + return false + }) + + // Test RemoveIf + filtered := generateTestEntityStateSlice() + pos := 0 + filtered.RemoveIf(func(el EntityState) bool { + pos++ + return pos%3 == 0 + }) + assert.Equal(t, 5, filtered.Len()) +} + +func TestEntityStateSlice_Sort(t *testing.T) { + es := generateTestEntityStateSlice() + es.Sort(func(a, b EntityState) bool { + return uintptr(unsafe.Pointer(a.orig)) < uintptr(unsafe.Pointer(b.orig)) + }) + for i := 1; i < es.Len(); i++ { + assert.True(t, uintptr(unsafe.Pointer(es.At(i-1).orig)) < uintptr(unsafe.Pointer(es.At(i).orig))) + } + es.Sort(func(a, b EntityState) bool { + return uintptr(unsafe.Pointer(a.orig)) > uintptr(unsafe.Pointer(b.orig)) + }) + for i := 1; i < es.Len(); i++ { + assert.True(t, uintptr(unsafe.Pointer(es.At(i-1).orig)) > uintptr(unsafe.Pointer(es.At(i).orig))) + } +} + +func generateTestEntityStateSlice() EntityStateSlice { + es := NewEntityStateSlice() + fillTestEntityStateSlice(es) + return es +} + +func fillTestEntityStateSlice(es EntityStateSlice) { + *es.orig = make([]*otlpentities.EntityState, 7) + for i := 0; i < 7; i++ { + (*es.orig)[i] = &otlpentities.EntityState{} + fillTestEntityState(newEntityState((*es.orig)[i], es.state)) + } +} diff --git a/pdata/pentity/generated_resourceentities.go b/pdata/pentity/generated_resourceentities.go new file mode 100644 index 00000000000..fb89b59e5af --- /dev/null +++ b/pdata/pentity/generated_resourceentities.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceEntities is a collection of entities from a Resource. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceEntities function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceEntities struct { + orig *otlpentities.ResourceEntities + state *internal.State +} + +func newResourceEntities(orig *otlpentities.ResourceEntities, state *internal.State) ResourceEntities { + return ResourceEntities{orig: orig, state: state} +} + +// NewResourceEntities creates a new empty ResourceEntities. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResourceEntities() ResourceEntities { + state := internal.StateMutable + return newResourceEntities(&otlpentities.ResourceEntities{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ResourceEntities) MoveTo(dest ResourceEntities) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpentities.ResourceEntities{} +} + +// Resource returns the resource associated with this ResourceEntities. +func (ms ResourceEntities) Resource() pcommon.Resource { + return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ResourceEntities. +func (ms ResourceEntities) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ResourceEntities. +func (ms ResourceEntities) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// ScopeEntities returns the ScopeEntities associated with this ResourceEntities. +func (ms ResourceEntities) ScopeEntities() ScopeEntitiesSlice { + return newScopeEntitiesSlice(&ms.orig.ScopeEntities, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ResourceEntities) CopyTo(dest ResourceEntities) { + dest.state.AssertMutable() + ms.Resource().CopyTo(dest.Resource()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.ScopeEntities().CopyTo(dest.ScopeEntities()) +} diff --git a/pdata/pentity/generated_resourceentities_test.go b/pdata/pentity/generated_resourceentities_test.go new file mode 100644 index 00000000000..7b74f5d8a3b --- /dev/null +++ b/pdata/pentity/generated_resourceentities_test.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func TestResourceEntities_MoveTo(t *testing.T) { + ms := generateTestResourceEntities() + dest := NewResourceEntities() + ms.MoveTo(dest) + assert.Equal(t, NewResourceEntities(), ms) + assert.Equal(t, generateTestResourceEntities(), dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.MoveTo(newResourceEntities(&otlpentities.ResourceEntities{}, &sharedState)) }) + assert.Panics(t, func() { newResourceEntities(&otlpentities.ResourceEntities{}, &sharedState).MoveTo(dest) }) +} + +func TestResourceEntities_CopyTo(t *testing.T) { + ms := NewResourceEntities() + orig := NewResourceEntities() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + orig = generateTestResourceEntities() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.CopyTo(newResourceEntities(&otlpentities.ResourceEntities{}, &sharedState)) }) +} + +func TestResourceEntities_Resource(t *testing.T) { + ms := NewResourceEntities() + internal.FillTestResource(internal.Resource(ms.Resource())) + assert.Equal(t, pcommon.Resource(internal.GenerateTestResource()), ms.Resource()) +} + +func TestResourceEntities_SchemaUrl(t *testing.T) { + ms := NewResourceEntities() + assert.Equal(t, "", ms.SchemaUrl()) + ms.SetSchemaUrl("https://opentelemetry.io/schemas/1.5.0") + assert.Equal(t, "https://opentelemetry.io/schemas/1.5.0", ms.SchemaUrl()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { + newResourceEntities(&otlpentities.ResourceEntities{}, &sharedState).SetSchemaUrl("https://opentelemetry.io/schemas/1.5.0") + }) +} + +func TestResourceEntities_ScopeEntities(t *testing.T) { + ms := NewResourceEntities() + assert.Equal(t, NewScopeEntitiesSlice(), ms.ScopeEntities()) + fillTestScopeEntitiesSlice(ms.ScopeEntities()) + assert.Equal(t, generateTestScopeEntitiesSlice(), ms.ScopeEntities()) +} + +func generateTestResourceEntities() ResourceEntities { + tv := NewResourceEntities() + fillTestResourceEntities(tv) + return tv +} + +func fillTestResourceEntities(tv ResourceEntities) { + internal.FillTestResource(internal.NewResource(&tv.orig.Resource, tv.state)) + tv.orig.SchemaUrl = "https://opentelemetry.io/schemas/1.5.0" + fillTestScopeEntitiesSlice(newScopeEntitiesSlice(&tv.orig.ScopeEntities, tv.state)) +} diff --git a/pdata/pentity/generated_resourceentitiesslice.go b/pdata/pentity/generated_resourceentitiesslice.go new file mode 100644 index 00000000000..bc730c54944 --- /dev/null +++ b/pdata/pentity/generated_resourceentitiesslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +// ResourceEntitiesSlice logically represents a slice of ResourceEntities. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewResourceEntitiesSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceEntitiesSlice struct { + orig *[]*otlpentities.ResourceEntities + state *internal.State +} + +func newResourceEntitiesSlice(orig *[]*otlpentities.ResourceEntities, state *internal.State) ResourceEntitiesSlice { + return ResourceEntitiesSlice{orig: orig, state: state} +} + +// NewResourceEntitiesSlice creates a ResourceEntitiesSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewResourceEntitiesSlice() ResourceEntitiesSlice { + orig := []*otlpentities.ResourceEntities(nil) + state := internal.StateMutable + return newResourceEntitiesSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceEntitiesSlice()". +func (es ResourceEntitiesSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceEntitiesSlice) At(i int) ResourceEntities { + return newResourceEntities((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ResourceEntitiesSlice can be initialized: +// +// es := NewResourceEntitiesSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ResourceEntitiesSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpentities.ResourceEntities, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ResourceEntities. +// It returns the newly added ResourceEntities. +func (es ResourceEntitiesSlice) AppendEmpty() ResourceEntities { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpentities.ResourceEntities{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceEntitiesSlice) MoveAndAppendTo(dest ResourceEntitiesSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ResourceEntitiesSlice) RemoveIf(f func(ResourceEntities) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ResourceEntitiesSlice) CopyTo(dest ResourceEntitiesSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceEntities((*es.orig)[i], es.state).CopyTo(newResourceEntities((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpentities.ResourceEntities, srcLen) + wrappers := make([]*otlpentities.ResourceEntities, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceEntities((*es.orig)[i], es.state).CopyTo(newResourceEntities(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ResourceEntities elements within ResourceEntitiesSlice given the +// provided less function so that two instances of ResourceEntitiesSlice +// can be compared. +func (es ResourceEntitiesSlice) Sort(less func(a, b ResourceEntities) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/pdata/pentity/generated_resourceentitiesslice_test.go b/pdata/pentity/generated_resourceentitiesslice_test.go new file mode 100644 index 00000000000..2b3575ab27f --- /dev/null +++ b/pdata/pentity/generated_resourceentitiesslice_test.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +func TestResourceEntitiesSlice(t *testing.T) { + es := NewResourceEntitiesSlice() + assert.Equal(t, 0, es.Len()) + state := internal.StateMutable + es = newResourceEntitiesSlice(&[]*otlpentities.ResourceEntities{}, &state) + assert.Equal(t, 0, es.Len()) + + emptyVal := NewResourceEntities() + testVal := generateTestResourceEntities() + for i := 0; i < 7; i++ { + el := es.AppendEmpty() + assert.Equal(t, emptyVal, es.At(i)) + fillTestResourceEntities(el) + assert.Equal(t, testVal, es.At(i)) + } + assert.Equal(t, 7, es.Len()) +} + +func TestResourceEntitiesSliceReadOnly(t *testing.T) { + sharedState := internal.StateReadOnly + es := newResourceEntitiesSlice(&[]*otlpentities.ResourceEntities{}, &sharedState) + assert.Equal(t, 0, es.Len()) + assert.Panics(t, func() { es.AppendEmpty() }) + assert.Panics(t, func() { es.EnsureCapacity(2) }) + es2 := NewResourceEntitiesSlice() + es.CopyTo(es2) + assert.Panics(t, func() { es2.CopyTo(es) }) + assert.Panics(t, func() { es.MoveAndAppendTo(es2) }) + assert.Panics(t, func() { es2.MoveAndAppendTo(es) }) +} + +func TestResourceEntitiesSlice_CopyTo(t *testing.T) { + dest := NewResourceEntitiesSlice() + // Test CopyTo to empty + NewResourceEntitiesSlice().CopyTo(dest) + assert.Equal(t, NewResourceEntitiesSlice(), dest) + + // Test CopyTo larger slice + generateTestResourceEntitiesSlice().CopyTo(dest) + assert.Equal(t, generateTestResourceEntitiesSlice(), dest) + + // Test CopyTo same size slice + generateTestResourceEntitiesSlice().CopyTo(dest) + assert.Equal(t, generateTestResourceEntitiesSlice(), dest) +} + +func TestResourceEntitiesSlice_EnsureCapacity(t *testing.T) { + es := generateTestResourceEntitiesSlice() + + // Test ensure smaller capacity. + const ensureSmallLen = 4 + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + assert.Equal(t, es.Len(), cap(*es.orig)) + assert.Equal(t, generateTestResourceEntitiesSlice(), es) + + // Test ensure larger capacity + const ensureLargeLen = 9 + es.EnsureCapacity(ensureLargeLen) + assert.Less(t, generateTestResourceEntitiesSlice().Len(), ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.orig)) + assert.Equal(t, generateTestResourceEntitiesSlice(), es) +} + +func TestResourceEntitiesSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestResourceEntitiesSlice() + dest := NewResourceEntitiesSlice() + src := generateTestResourceEntitiesSlice() + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestResourceEntitiesSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestResourceEntitiesSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestResourceEntitiesSlice().MoveAndAppendTo(dest) + assert.Equal(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.Equal(t, expectedSlice.At(i), dest.At(i)) + assert.Equal(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestResourceEntitiesSlice_RemoveIf(t *testing.T) { + // Test RemoveIf on empty slice + emptySlice := NewResourceEntitiesSlice() + emptySlice.RemoveIf(func(el ResourceEntities) bool { + t.Fail() + return false + }) + + // Test RemoveIf + filtered := generateTestResourceEntitiesSlice() + pos := 0 + filtered.RemoveIf(func(el ResourceEntities) bool { + pos++ + return pos%3 == 0 + }) + assert.Equal(t, 5, filtered.Len()) +} + +func TestResourceEntitiesSlice_Sort(t *testing.T) { + es := generateTestResourceEntitiesSlice() + es.Sort(func(a, b ResourceEntities) bool { + return uintptr(unsafe.Pointer(a.orig)) < uintptr(unsafe.Pointer(b.orig)) + }) + for i := 1; i < es.Len(); i++ { + assert.Less(t, uintptr(unsafe.Pointer(es.At(i-1).orig)), uintptr(unsafe.Pointer(es.At(i).orig))) + } + es.Sort(func(a, b ResourceEntities) bool { + return uintptr(unsafe.Pointer(a.orig)) > uintptr(unsafe.Pointer(b.orig)) + }) + for i := 1; i < es.Len(); i++ { + assert.Greater(t, uintptr(unsafe.Pointer(es.At(i-1).orig)), uintptr(unsafe.Pointer(es.At(i).orig))) + } +} + +func generateTestResourceEntitiesSlice() ResourceEntitiesSlice { + es := NewResourceEntitiesSlice() + fillTestResourceEntitiesSlice(es) + return es +} + +func fillTestResourceEntitiesSlice(es ResourceEntitiesSlice) { + *es.orig = make([]*otlpentities.ResourceEntities, 7) + for i := 0; i < 7; i++ { + (*es.orig)[i] = &otlpentities.ResourceEntities{} + fillTestResourceEntities(newResourceEntities((*es.orig)[i], es.state)) + } +} diff --git a/pdata/pentity/generated_scopeentities.go b/pdata/pentity/generated_scopeentities.go new file mode 100644 index 00000000000..9f6f9376ea7 --- /dev/null +++ b/pdata/pentity/generated_scopeentities.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ScopeEntities is a collection of entities from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewScopeEntities function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeEntities struct { + orig *otlpentities.ScopeEntities + state *internal.State +} + +func newScopeEntities(orig *otlpentities.ScopeEntities, state *internal.State) ScopeEntities { + return ScopeEntities{orig: orig, state: state} +} + +// NewScopeEntities creates a new empty ScopeEntities. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewScopeEntities() ScopeEntities { + state := internal.StateMutable + return newScopeEntities(&otlpentities.ScopeEntities{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ScopeEntities) MoveTo(dest ScopeEntities) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpentities.ScopeEntities{} +} + +// Scope returns the scope associated with this ScopeEntities. +func (ms ScopeEntities) Scope() pcommon.InstrumentationScope { + return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ScopeEntities. +func (ms ScopeEntities) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ScopeEntities. +func (ms ScopeEntities) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// EntityEvents returns the EntityEvents associated with this ScopeEntities. +func (ms ScopeEntities) EntityEvents() EntityEventSlice { + return newEntityEventSlice(&ms.orig.EntityEvents, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ScopeEntities) CopyTo(dest ScopeEntities) { + dest.state.AssertMutable() + ms.Scope().CopyTo(dest.Scope()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.EntityEvents().CopyTo(dest.EntityEvents()) +} diff --git a/pdata/pentity/generated_scopeentities_test.go b/pdata/pentity/generated_scopeentities_test.go new file mode 100644 index 00000000000..2e03b67f8a4 --- /dev/null +++ b/pdata/pentity/generated_scopeentities_test.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func TestScopeEntities_MoveTo(t *testing.T) { + ms := generateTestScopeEntities() + dest := NewScopeEntities() + ms.MoveTo(dest) + assert.Equal(t, NewScopeEntities(), ms) + assert.Equal(t, generateTestScopeEntities(), dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.MoveTo(newScopeEntities(&otlpentities.ScopeEntities{}, &sharedState)) }) + assert.Panics(t, func() { newScopeEntities(&otlpentities.ScopeEntities{}, &sharedState).MoveTo(dest) }) +} + +func TestScopeEntities_CopyTo(t *testing.T) { + ms := NewScopeEntities() + orig := NewScopeEntities() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + orig = generateTestScopeEntities() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { ms.CopyTo(newScopeEntities(&otlpentities.ScopeEntities{}, &sharedState)) }) +} + +func TestScopeEntities_Scope(t *testing.T) { + ms := NewScopeEntities() + internal.FillTestInstrumentationScope(internal.InstrumentationScope(ms.Scope())) + assert.Equal(t, pcommon.InstrumentationScope(internal.GenerateTestInstrumentationScope()), ms.Scope()) +} + +func TestScopeEntities_SchemaUrl(t *testing.T) { + ms := NewScopeEntities() + assert.Equal(t, "", ms.SchemaUrl()) + ms.SetSchemaUrl("https://opentelemetry.io/schemas/1.5.0") + assert.Equal(t, "https://opentelemetry.io/schemas/1.5.0", ms.SchemaUrl()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { + newScopeEntities(&otlpentities.ScopeEntities{}, &sharedState).SetSchemaUrl("https://opentelemetry.io/schemas/1.5.0") + }) +} + +func TestScopeEntities_EntityEvents(t *testing.T) { + ms := NewScopeEntities() + assert.Equal(t, NewEntityEventSlice(), ms.EntityEvents()) + fillTestEntityEventSlice(ms.EntityEvents()) + assert.Equal(t, generateTestEntityEventSlice(), ms.EntityEvents()) +} + +func generateTestScopeEntities() ScopeEntities { + tv := NewScopeEntities() + fillTestScopeEntities(tv) + return tv +} + +func fillTestScopeEntities(tv ScopeEntities) { + internal.FillTestInstrumentationScope(internal.NewInstrumentationScope(&tv.orig.Scope, tv.state)) + tv.orig.SchemaUrl = "https://opentelemetry.io/schemas/1.5.0" + fillTestEntityEventSlice(newEntityEventSlice(&tv.orig.EntityEvents, tv.state)) +} diff --git a/pdata/pentity/generated_scopeentitiesslice.go b/pdata/pentity/generated_scopeentitiesslice.go new file mode 100644 index 00000000000..685120203e0 --- /dev/null +++ b/pdata/pentity/generated_scopeentitiesslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +// ScopeEntitiesSlice logically represents a slice of ScopeEntities. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewScopeEntitiesSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeEntitiesSlice struct { + orig *[]*otlpentities.ScopeEntities + state *internal.State +} + +func newScopeEntitiesSlice(orig *[]*otlpentities.ScopeEntities, state *internal.State) ScopeEntitiesSlice { + return ScopeEntitiesSlice{orig: orig, state: state} +} + +// NewScopeEntitiesSlice creates a ScopeEntitiesSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewScopeEntitiesSlice() ScopeEntitiesSlice { + orig := []*otlpentities.ScopeEntities(nil) + state := internal.StateMutable + return newScopeEntitiesSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewScopeEntitiesSlice()". +func (es ScopeEntitiesSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ScopeEntitiesSlice) At(i int) ScopeEntities { + return newScopeEntities((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ScopeEntitiesSlice can be initialized: +// +// es := NewScopeEntitiesSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ScopeEntitiesSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpentities.ScopeEntities, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ScopeEntities. +// It returns the newly added ScopeEntities. +func (es ScopeEntitiesSlice) AppendEmpty() ScopeEntities { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpentities.ScopeEntities{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ScopeEntitiesSlice) MoveAndAppendTo(dest ScopeEntitiesSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ScopeEntitiesSlice) RemoveIf(f func(ScopeEntities) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ScopeEntitiesSlice) CopyTo(dest ScopeEntitiesSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newScopeEntities((*es.orig)[i], es.state).CopyTo(newScopeEntities((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpentities.ScopeEntities, srcLen) + wrappers := make([]*otlpentities.ScopeEntities, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newScopeEntities((*es.orig)[i], es.state).CopyTo(newScopeEntities(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ScopeEntities elements within ScopeEntitiesSlice given the +// provided less function so that two instances of ScopeEntitiesSlice +// can be compared. +func (es ScopeEntitiesSlice) Sort(less func(a, b ScopeEntities) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/pdata/pentity/generated_scopeentitiesslice_test.go b/pdata/pentity/generated_scopeentitiesslice_test.go new file mode 100644 index 00000000000..34284bea52a --- /dev/null +++ b/pdata/pentity/generated_scopeentitiesslice_test.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentity + +import ( + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +func TestScopeEntitiesSlice(t *testing.T) { + es := NewScopeEntitiesSlice() + assert.Equal(t, 0, es.Len()) + state := internal.StateMutable + es = newScopeEntitiesSlice(&[]*otlpentities.ScopeEntities{}, &state) + assert.Equal(t, 0, es.Len()) + + emptyVal := NewScopeEntities() + testVal := generateTestScopeEntities() + for i := 0; i < 7; i++ { + el := es.AppendEmpty() + assert.Equal(t, emptyVal, es.At(i)) + fillTestScopeEntities(el) + assert.Equal(t, testVal, es.At(i)) + } + assert.Equal(t, 7, es.Len()) +} + +func TestScopeEntitiesSliceReadOnly(t *testing.T) { + sharedState := internal.StateReadOnly + es := newScopeEntitiesSlice(&[]*otlpentities.ScopeEntities{}, &sharedState) + assert.Equal(t, 0, es.Len()) + assert.Panics(t, func() { es.AppendEmpty() }) + assert.Panics(t, func() { es.EnsureCapacity(2) }) + es2 := NewScopeEntitiesSlice() + es.CopyTo(es2) + assert.Panics(t, func() { es2.CopyTo(es) }) + assert.Panics(t, func() { es.MoveAndAppendTo(es2) }) + assert.Panics(t, func() { es2.MoveAndAppendTo(es) }) +} + +func TestScopeEntitiesSlice_CopyTo(t *testing.T) { + dest := NewScopeEntitiesSlice() + // Test CopyTo to empty + NewScopeEntitiesSlice().CopyTo(dest) + assert.Equal(t, NewScopeEntitiesSlice(), dest) + + // Test CopyTo larger slice + generateTestScopeEntitiesSlice().CopyTo(dest) + assert.Equal(t, generateTestScopeEntitiesSlice(), dest) + + // Test CopyTo same size slice + generateTestScopeEntitiesSlice().CopyTo(dest) + assert.Equal(t, generateTestScopeEntitiesSlice(), dest) +} + +func TestScopeEntitiesSlice_EnsureCapacity(t *testing.T) { + es := generateTestScopeEntitiesSlice() + + // Test ensure smaller capacity. + const ensureSmallLen = 4 + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + assert.Equal(t, es.Len(), cap(*es.orig)) + assert.Equal(t, generateTestScopeEntitiesSlice(), es) + + // Test ensure larger capacity + const ensureLargeLen = 9 + es.EnsureCapacity(ensureLargeLen) + assert.Less(t, generateTestScopeEntitiesSlice().Len(), ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.orig)) + assert.Equal(t, generateTestScopeEntitiesSlice(), es) +} + +func TestScopeEntitiesSlice_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTestScopeEntitiesSlice() + dest := NewScopeEntitiesSlice() + src := generateTestScopeEntitiesSlice() + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestScopeEntitiesSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.Equal(t, generateTestScopeEntitiesSlice(), dest) + assert.Equal(t, 0, src.Len()) + assert.Equal(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTestScopeEntitiesSlice().MoveAndAppendTo(dest) + assert.Equal(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.Equal(t, expectedSlice.At(i), dest.At(i)) + assert.Equal(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func TestScopeEntitiesSlice_RemoveIf(t *testing.T) { + // Test RemoveIf on empty slice + emptySlice := NewScopeEntitiesSlice() + emptySlice.RemoveIf(func(el ScopeEntities) bool { + t.Fail() + return false + }) + + // Test RemoveIf + filtered := generateTestScopeEntitiesSlice() + pos := 0 + filtered.RemoveIf(func(el ScopeEntities) bool { + pos++ + return pos%3 == 0 + }) + assert.Equal(t, 5, filtered.Len()) +} + +func TestScopeEntitiesSlice_Sort(t *testing.T) { + es := generateTestScopeEntitiesSlice() + es.Sort(func(a, b ScopeEntities) bool { + return uintptr(unsafe.Pointer(a.orig)) < uintptr(unsafe.Pointer(b.orig)) + }) + for i := 1; i < es.Len(); i++ { + assert.Less(t, uintptr(unsafe.Pointer(es.At(i-1).orig)), uintptr(unsafe.Pointer(es.At(i).orig))) + } + es.Sort(func(a, b ScopeEntities) bool { + return uintptr(unsafe.Pointer(a.orig)) > uintptr(unsafe.Pointer(b.orig)) + }) + for i := 1; i < es.Len(); i++ { + assert.Greater(t, uintptr(unsafe.Pointer(es.At(i-1).orig)), uintptr(unsafe.Pointer(es.At(i).orig))) + } +} + +func generateTestScopeEntitiesSlice() ScopeEntitiesSlice { + es := NewScopeEntitiesSlice() + fillTestScopeEntitiesSlice(es) + return es +} + +func fillTestScopeEntitiesSlice(es ScopeEntitiesSlice) { + *es.orig = make([]*otlpentities.ScopeEntities, 7) + for i := 0; i < 7; i++ { + (*es.orig)[i] = &otlpentities.ScopeEntities{} + fillTestScopeEntities(newScopeEntities((*es.orig)[i], es.state)) + } +} diff --git a/pdata/pentity/json.go b/pdata/pentity/json.go new file mode 100644 index 00000000000..49b96a4f547 --- /dev/null +++ b/pdata/pentity/json.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pentity // import "go.opentelemetry.io/collector/pdata/pentity" + +import ( + "bytes" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/json" +) + +// JSONMarshaler marshals pdata.Entities to JSON bytes using the OTLP/JSON format. +type JSONMarshaler struct{} + +// MarshalEntities to the OTLP/JSON format. +func (*JSONMarshaler) MarshalEntities(ld Entities) ([]byte, error) { + buf := bytes.Buffer{} + pb := internal.EntitiesToProto(internal.Entities(ld)) + err := json.Marshal(&buf, &pb) + return buf.Bytes(), err +} + +var _ Unmarshaler = (*JSONUnmarshaler)(nil) + +// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Entities. +type JSONUnmarshaler struct{} + +// UnmarshalEntities from OTLP/JSON format into pdata.Entities. +func (*JSONUnmarshaler) UnmarshalEntities(buf []byte) (Entities, error) { + iter := jsoniter.ConfigFastest.BorrowIterator(buf) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + ld := NewEntities() + ld.unmarshalJsoniter(iter) + if iter.Error != nil { + return Entities{}, iter.Error + } + return ld, nil +} + +func (ms Entities) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource_entities", "resourceEntities": + iter.ReadArrayCB(func(*jsoniter.Iterator) bool { + ms.ResourceEntities().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (ms ResourceEntities) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource": + json.ReadResource(iter, &ms.orig.Resource) + case "scope_entities", "scopeEntities": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.ScopeEntities().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms ScopeEntities) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "scope": + json.ReadScope(iter, &ms.orig.Scope) + case "log_records", "logRecords": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.EntityEvents().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms EntityEvent) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "timeUnixNano", "time_unix_nano": + ms.orig.TimeUnixNano = json.ReadUint64(iter) + case "entityType", "entity_type": + ms.orig.EntityType = iter.ReadString() + case "observed_time_unix_nano", "observedTimeUnixNano": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.Id = append(ms.orig.Id, json.ReadAttribute(iter)) + return true + }) + // TODO: Add support for other fields. + default: + iter.Skip() + } + return true + }) +} diff --git a/pdata/pentity/pb.go b/pdata/pentity/pb.go new file mode 100644 index 00000000000..4da4b5a85f0 --- /dev/null +++ b/pdata/pentity/pb.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pentity // import "go.opentelemetry.io/collector/pdata/pentity" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpentities "go.opentelemetry.io/collector/pdata/internal/data/protogen/entities/v1" +) + +var _ MarshalSizer = (*ProtoMarshaler)(nil) + +type ProtoMarshaler struct{} + +func (e *ProtoMarshaler) MarshalEntities(ed Entities) ([]byte, error) { + pb := internal.EntitiesToProto(internal.Entities(ed)) + return pb.Marshal() +} + +func (e *ProtoMarshaler) EntitiesSize(ed Entities) int { + pb := internal.EntitiesToProto(internal.Entities(ed)) + return pb.Size() +} + +var _ Unmarshaler = (*ProtoUnmarshaler)(nil) + +type ProtoUnmarshaler struct{} + +func (d *ProtoUnmarshaler) UnmarshalEntities(buf []byte) (Entities, error) { + pb := otlpentities.EntitiesData{} + err := pb.Unmarshal(buf) + return Entities(internal.EntitiesFromProto(pb)), err +} diff --git a/pdata/pentity/pentityotlp/generated_exportpartialsuccess.go b/pdata/pentity/pentityotlp/generated_exportpartialsuccess.go new file mode 100644 index 00000000000..c367b1fab78 --- /dev/null +++ b/pdata/pentity/pentityotlp/generated_exportpartialsuccess.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentityotlp + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorentity "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/entities/v1" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExportPartialSuccess function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExportPartialSuccess struct { + orig *otlpcollectorentity.ExportEntitiesPartialSuccess + state *internal.State +} + +func newExportPartialSuccess(orig *otlpcollectorentity.ExportEntitiesPartialSuccess, state *internal.State) ExportPartialSuccess { + return ExportPartialSuccess{orig: orig, state: state} +} + +// NewExportPartialSuccess creates a new empty ExportPartialSuccess. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExportPartialSuccess() ExportPartialSuccess { + state := internal.StateMutable + return newExportPartialSuccess(&otlpcollectorentity.ExportEntitiesPartialSuccess{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpcollectorentity.ExportEntitiesPartialSuccess{} +} + +// RejectedEntities returns the rejectedentities associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) RejectedEntities() int64 { + return ms.orig.RejectedEntities +} + +// SetRejectedEntities replaces the rejectedentities associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetRejectedEntities(v int64) { + ms.state.AssertMutable() + ms.orig.RejectedEntities = v +} + +// ErrorMessage returns the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) ErrorMessage() string { + return ms.orig.ErrorMessage +} + +// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetErrorMessage(v string) { + ms.state.AssertMutable() + ms.orig.ErrorMessage = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { + dest.state.AssertMutable() + dest.SetRejectedEntities(ms.RejectedEntities()) + dest.SetErrorMessage(ms.ErrorMessage()) +} diff --git a/pdata/pentity/pentityotlp/generated_exportpartialsuccess_test.go b/pdata/pentity/pentityotlp/generated_exportpartialsuccess_test.go new file mode 100644 index 00000000000..cbd0d5df52b --- /dev/null +++ b/pdata/pentity/pentityotlp/generated_exportpartialsuccess_test.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pentityotlp + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorentity "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/entities/v1" +) + +func TestExportPartialSuccess_MoveTo(t *testing.T) { + ms := generateTestExportPartialSuccess() + dest := NewExportPartialSuccess() + ms.MoveTo(dest) + assert.Equal(t, NewExportPartialSuccess(), ms) + assert.Equal(t, generateTestExportPartialSuccess(), dest) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { + ms.MoveTo(newExportPartialSuccess(&otlpcollectorentity.ExportEntitiesPartialSuccess{}, &sharedState)) + }) + assert.Panics(t, func() { + newExportPartialSuccess(&otlpcollectorentity.ExportEntitiesPartialSuccess{}, &sharedState).MoveTo(dest) + }) +} + +func TestExportPartialSuccess_CopyTo(t *testing.T) { + ms := NewExportPartialSuccess() + orig := NewExportPartialSuccess() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + orig = generateTestExportPartialSuccess() + orig.CopyTo(ms) + assert.Equal(t, orig, ms) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { + ms.CopyTo(newExportPartialSuccess(&otlpcollectorentity.ExportEntitiesPartialSuccess{}, &sharedState)) + }) +} + +func TestExportPartialSuccess_RejectedEntities(t *testing.T) { + ms := NewExportPartialSuccess() + assert.Equal(t, int64(0), ms.RejectedEntities()) + ms.SetRejectedEntities(int64(13)) + assert.Equal(t, int64(13), ms.RejectedEntities()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { + newExportPartialSuccess(&otlpcollectorentity.ExportEntitiesPartialSuccess{}, &sharedState).SetRejectedEntities(int64(13)) + }) +} + +func TestExportPartialSuccess_ErrorMessage(t *testing.T) { + ms := NewExportPartialSuccess() + assert.Equal(t, "", ms.ErrorMessage()) + ms.SetErrorMessage("error message") + assert.Equal(t, "error message", ms.ErrorMessage()) + sharedState := internal.StateReadOnly + assert.Panics(t, func() { + newExportPartialSuccess(&otlpcollectorentity.ExportEntitiesPartialSuccess{}, &sharedState).SetErrorMessage("error message") + }) +} + +func generateTestExportPartialSuccess() ExportPartialSuccess { + tv := NewExportPartialSuccess() + fillTestExportPartialSuccess(tv) + return tv +} + +func fillTestExportPartialSuccess(tv ExportPartialSuccess) { + tv.orig.RejectedEntities = int64(13) + tv.orig.ErrorMessage = "error message" +} diff --git a/pdata/pentity/pentityotlp/grpc.go b/pdata/pentity/pentityotlp/grpc.go new file mode 100644 index 00000000000..cc39f363467 --- /dev/null +++ b/pdata/pentity/pentityotlp/grpc.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pentityotlp // import "go.opentelemetry.io/collector/pdata/pentity/pentityotlp" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorentity "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/entities/v1" +) + +// GRPCClient is the client API for OTLP-GRPC Entities service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCClient interface { + // Export pentity.Entities to the server. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) + + // unexported disallow implementation of the GRPCClient. + unexported() +} + +// NewGRPCClient returns a new GRPCClient connected using the given connection. +func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { + return &grpcClient{rawClient: otlpcollectorentity.NewEntitiesServiceClient(cc)} +} + +type grpcClient struct { + rawClient otlpcollectorentity.EntitiesServiceClient +} + +func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { + rsp, err := c.rawClient.Export(ctx, request.orig, opts...) + if err != nil { + return ExportResponse{}, err + } + state := internal.StateMutable + return ExportResponse{orig: rsp, state: &state}, err +} + +func (c *grpcClient) unexported() {} + +// GRPCServer is the server API for OTLP gRPC EntitiesService service. +// Implementations MUST embed UnimplementedGRPCServer. +type GRPCServer interface { + // Export is called every time a new request is received. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, ExportRequest) (ExportResponse, error) + + // unexported disallow implementation of the GRPCServer. + unexported() +} + +var _ GRPCServer = (*UnimplementedGRPCServer)(nil) + +// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. +type UnimplementedGRPCServer struct{} + +func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { + return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func (*UnimplementedGRPCServer) unexported() {} + +// RegisterGRPCServer registers the Server to the grpc.Server. +func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { + otlpcollectorentity.RegisterEntitiesServiceServer(s, &rawEntitiesServer{srv: srv}) +} + +type rawEntitiesServer struct { + srv GRPCServer +} + +func (s rawEntitiesServer) Export(ctx context.Context, request *otlpcollectorentity.ExportEntitiesServiceRequest) ( + *otlpcollectorentity.ExportEntitiesServiceResponse, error) { + state := internal.StateMutable + rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state}) + return rsp.orig, err +} diff --git a/pdata/pentity/pentityotlp/request.go b/pdata/pentity/pentityotlp/request.go new file mode 100644 index 00000000000..1cf664546eb --- /dev/null +++ b/pdata/pentity/pentityotlp/request.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pentityotlp // import "go.opentelemetry.io/collector/pdata/pentity/pentityotlp" + +import ( + "bytes" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorentity "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/entities/v1" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/pentity" +) + +var jsonUnmarshaler = &pentity.JSONUnmarshaler{} + +// ExportRequest represents the request for gRPC/HTTP client/server. +// It's a wrapper for pentity.Entities data. +type ExportRequest struct { + orig *otlpcollectorentity.ExportEntitiesServiceRequest + state *internal.State +} + +// NewExportRequest returns an empty ExportRequest. +func NewExportRequest() ExportRequest { + state := internal.StateMutable + return ExportRequest{ + orig: &otlpcollectorentity.ExportEntitiesServiceRequest{}, + state: &state, + } +} + +// NewExportRequestFromEntities returns a ExportRequest from pentity.Entities. +// Because ExportRequest is a wrapper for pentity.Entities, +// any changes to the provided Entities struct will be reflected in the ExportRequest and vice versa. +func NewExportRequestFromEntities(ld pentity.Entities) ExportRequest { + return ExportRequest{ + orig: internal.GetOrigEntities(internal.Entities(ld)), + state: internal.GetEntitiesState(internal.Entities(ld)), + } +} + +// MarshalProto marshals ExportRequest into proto bytes. +func (ms ExportRequest) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportRequest from proto bytes. +func (ms ExportRequest) UnmarshalProto(data []byte) error { + return ms.orig.Unmarshal(data) +} + +// MarshalJSON marshals ExportRequest into JSON bytes. +func (ms ExportRequest) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportRequest from JSON bytes. +func (ms ExportRequest) UnmarshalJSON(data []byte) error { + ld, err := jsonUnmarshaler.UnmarshalEntities(data) + if err != nil { + return err + } + *ms.orig = *internal.GetOrigEntities(internal.Entities(ld)) + return nil +} + +func (ms ExportRequest) Entities() pentity.Entities { + return pentity.Entities(internal.NewEntities(ms.orig, ms.state)) +} diff --git a/pdata/pentity/pentityotlp/response.go b/pdata/pentity/pentityotlp/response.go new file mode 100644 index 00000000000..cc02ed9e040 --- /dev/null +++ b/pdata/pentity/pentityotlp/response.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pentityotlp // import "go.opentelemetry.io/collector/pdata/pentity/pentityotlp" + +import ( + "bytes" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorentity "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/entities/v1" + "go.opentelemetry.io/collector/pdata/internal/json" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportResponse struct { + orig *otlpcollectorentity.ExportEntitiesServiceResponse + state *internal.State +} + +// NewExportResponse returns an empty ExportResponse. +func NewExportResponse() ExportResponse { + state := internal.StateMutable + return ExportResponse{ + orig: &otlpcollectorentity.ExportEntitiesServiceResponse{}, + state: &state, + } +} + +// MarshalProto marshals ExportResponse into proto bytes. +func (ms ExportResponse) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportResponse from proto bytes. +func (ms ExportResponse) UnmarshalProto(data []byte) error { + return ms.orig.Unmarshal(data) +} + +// MarshalJSON marshals ExportResponse into JSON bytes. +func (ms ExportResponse) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportResponse from JSON bytes. +func (ms ExportResponse) UnmarshalJSON(data []byte) error { + iter := jsoniter.ConfigFastest.BorrowIterator(data) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + ms.unmarshalJsoniter(iter) + return iter.Error +} + +// PartialSuccess returns the ExportPartialSuccess associated with this ExportResponse. +func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { + return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state) +} + +func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "partial_success", "partialSuccess": + ms.PartialSuccess().unmarshalJsoniter(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(_ *jsoniter.Iterator, f string) bool { + switch f { + case "rejected_entities", "rejectedEntities": + ms.orig.RejectedEntities = json.ReadInt64(iter) + case "error_message", "errorMessage": + ms.orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} diff --git a/pipeline/signal.go b/pipeline/signal.go index 77376c999ad..1a8230109a5 100644 --- a/pipeline/signal.go +++ b/pipeline/signal.go @@ -16,7 +16,8 @@ type Signal = globalsignal.Signal var ErrSignalNotSupported = errors.New("telemetry type is not supported") var ( - SignalTraces = globalsignal.MustNewSignal("traces") - SignalMetrics = globalsignal.MustNewSignal("metrics") - SignalLogs = globalsignal.MustNewSignal("logs") + SignalTraces = globalsignal.MustNewSignal("traces") + SignalMetrics = globalsignal.MustNewSignal("metrics") + SignalLogs = globalsignal.MustNewSignal("logs") + SignalEntities = globalsignal.MustNewSignal("entities") ) diff --git a/processor/internal/entities.go b/processor/internal/entities.go new file mode 100644 index 00000000000..8932a68fb8e --- /dev/null +++ b/processor/internal/entities.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/processor/internal" + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" +) + +// Entities is a processor that can consume profiles. +type Entities interface { + component.Component + consumer.Entities +} diff --git a/processor/memorylimiterprocessor/README.md b/processor/memorylimiterprocessor/README.md index d5707909f40..c004f9aafa4 100644 --- a/processor/memorylimiterprocessor/README.md +++ b/processor/memorylimiterprocessor/README.md @@ -3,7 +3,7 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: traces, metrics, logs | +| Stability | [beta]: traces, metrics, logs, entities | | Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Fmemorylimiter%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Fmemorylimiter) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Fmemorylimiter%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Fmemorylimiter) | diff --git a/processor/memorylimiterprocessor/factory.go b/processor/memorylimiterprocessor/factory.go index 03e18aeedc6..6bc21c60a67 100644 --- a/processor/memorylimiterprocessor/factory.go +++ b/processor/memorylimiterprocessor/factory.go @@ -35,7 +35,8 @@ func NewFactory() processor.Factory { createDefaultConfig, processor.WithTraces(f.createTraces, metadata.TracesStability), processor.WithMetrics(f.createMetrics, metadata.MetricsStability), - processor.WithLogs(f.createLogs, metadata.LogsStability)) + processor.WithLogs(f.createLogs, metadata.LogsStability), + processor.WithEntities(f.createEntities, metadata.EntitiesStability)) } // CreateDefaultConfig creates the default configuration for processor. Notice @@ -95,6 +96,23 @@ func (f *factory) createLogs( processorhelper.WithShutdown(memLimiter.shutdown)) } +func (f *factory) createEntities( + ctx context.Context, + set processor.Settings, + cfg component.Config, + nextConsumer consumer.Entities, +) (processor.Entities, error) { + memLimiter, err := f.getMemoryLimiter(set, cfg) + if err != nil { + return nil, err + } + return processorhelper.NewEntities(ctx, set, cfg, nextConsumer, + memLimiter.processEntities, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithStart(memLimiter.start), + processorhelper.WithShutdown(memLimiter.shutdown)) +} + // getMemoryLimiter checks if we have a cached memoryLimiter with a specific config, // otherwise initialize and add one to the store. func (f *factory) getMemoryLimiter(set processor.Settings, cfg component.Config) (*memoryLimiterProcessor, error) { diff --git a/processor/memorylimiterprocessor/internal/metadata/generated_status.go b/processor/memorylimiterprocessor/internal/metadata/generated_status.go index 0e841608278..d5986ccd36a 100644 --- a/processor/memorylimiterprocessor/internal/metadata/generated_status.go +++ b/processor/memorylimiterprocessor/internal/metadata/generated_status.go @@ -12,7 +12,8 @@ var ( ) const ( - TracesStability = component.StabilityLevelBeta - MetricsStability = component.StabilityLevelBeta - LogsStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelBeta + MetricsStability = component.StabilityLevelBeta + LogsStability = component.StabilityLevelBeta + EntitiesStability = component.StabilityLevelBeta ) diff --git a/processor/memorylimiterprocessor/memorylimiter.go b/processor/memorylimiterprocessor/memorylimiter.go index 45da42e0b32..c8827ad576f 100644 --- a/processor/memorylimiterprocessor/memorylimiter.go +++ b/processor/memorylimiterprocessor/memorylimiter.go @@ -8,6 +8,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/internal/memorylimiter" + "go.opentelemetry.io/collector/pdata/pentity" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" @@ -100,3 +101,21 @@ func (p *memoryLimiterProcessor) processLogs(ctx context.Context, ld plog.Logs) p.obsrep.accepted(ctx, numRecords, pipeline.SignalLogs) return ld, nil } + +func (p *memoryLimiterProcessor) processEntities(ctx context.Context, ld pentity.Entities) (pentity.Entities, error) { + numRecords := ld.EntityCount() + if p.memlimiter.MustRefuse() { + // TODO: actually to be 100% sure that this is "refused" and not "dropped" + // it is necessary to check the pipeline to see if this is directly connected + // to a receiver (ie.: a receiver is on the call stack). For now it + // assumes that the pipeline is properly configured and a receiver is on the + // callstack. + p.obsrep.refused(ctx, numRecords, pipeline.SignalEntities) + return ld, memorylimiter.ErrDataRefused + } + + // Even if the next consumer returns error record the data as accepted by + // this processor. + p.obsrep.accepted(ctx, numRecords, pipeline.SignalEntities) + return ld, nil +} diff --git a/processor/memorylimiterprocessor/metadata.yaml b/processor/memorylimiterprocessor/metadata.yaml index d0fb4dfc6eb..f10460dddbd 100644 --- a/processor/memorylimiterprocessor/metadata.yaml +++ b/processor/memorylimiterprocessor/metadata.yaml @@ -4,7 +4,7 @@ github_project: open-telemetry/opentelemetry-collector status: class: processor stability: - beta: [traces, metrics, logs] + beta: [traces, metrics, logs, entities] distributions: [core, contrib, k8s] tests: diff --git a/processor/processor.go b/processor/processor.go index 8ecd4d497c6..374467e9986 100644 --- a/processor/processor.go +++ b/processor/processor.go @@ -30,6 +30,12 @@ type Logs interface { consumer.Logs } +// Entities is a processor that can consume entities. +type Entities interface { + component.Component + consumer.Entities +} + // Settings is passed to Create* functions in Factory. type Settings struct { // ID returns the ID of the component that will be created. @@ -75,6 +81,15 @@ type Factory interface { // LogsStability gets the stability level of the Logs processor. LogsStability() component.StabilityLevel + // CreateEntities creates an Entities processor based on this config. + // If the processor type does not support entities, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateEntities(ctx context.Context, set Settings, cfg component.Config, next consumer.Entities) (Entities, error) + + // EntitiesStability gets the stability level of the Entities processor. + EntitiesStability() component.StabilityLevel + unexportedFactoryFunc() } @@ -102,6 +117,8 @@ type factory struct { metricsStabilityLevel component.StabilityLevel CreateLogsFunc logsStabilityLevel component.StabilityLevel + CreateEntitiesFunc + entitiesStabilityLevel component.StabilityLevel } func (f *factory) Type() component.Type { @@ -122,6 +139,10 @@ func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } +func (f factory) EntitiesStability() component.StabilityLevel { + return f.entitiesStabilityLevel +} + // CreateTracesFunc is the equivalent of Factory.CreateTraces(). type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) @@ -155,6 +176,17 @@ func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg compon return f(ctx, set, cfg, next) } +// CreateEntitiesFunc is the equivalent of Factory.CreateEntities(). +type CreateEntitiesFunc func(context.Context, Settings, component.Config, consumer.Entities) (Entities, error) + +// CreateEntities implements Factory.CreateEntities. +func (f CreateEntitiesFunc) CreateEntities(ctx context.Context, set Settings, cfg component.Config, next consumer.Entities) (Entities, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + // WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { @@ -179,6 +211,14 @@ func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOpt }) } +// WithEntities overrides the default "error not supported" implementation for CreateEntities and the default "undefined" stability level. +func WithEntities(createEntities CreateEntitiesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.entitiesStabilityLevel = sl + o.CreateEntitiesFunc = createEntities + }) +} + // NewFactory returns a Factory. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { f := &factory{ diff --git a/processor/processor_test.go b/processor/processor_test.go index 79fd2cb6aa6..49ba7b472aa 100644 --- a/processor/processor_test.go +++ b/processor/processor_test.go @@ -28,6 +28,8 @@ func TestNewFactory(t *testing.T) { _, err = factory.CreateMetrics(context.Background(), Settings{}, &defaultCfg, consumertest.NewNop()) require.Error(t, err) _, err = factory.CreateLogs(context.Background(), Settings{}, &defaultCfg, consumertest.NewNop()) + require.Error(t, err) + _, err = factory.CreateEntities(context.Background(), Settings{}, &defaultCfg, consumertest.NewNop()) assert.Error(t, err) } diff --git a/processor/processorhelper/entities.go b/processor/processorhelper/entities.go new file mode 100644 index 00000000000..34675d5ef1a --- /dev/null +++ b/processor/processorhelper/entities.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package processorhelper // import "go.opentelemetry.io/collector/processor/processorhelper" + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pentity" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/processor" +) + +// ProcessEntitiesFunc is a helper function that processes the incoming data and returns the data to be sent to the next +// component. +// If error is returned then returned data are ignored. It MUST not call the next component. +type ProcessEntitiesFunc func(context.Context, pentity.Entities) (pentity.Entities, error) + +type entities struct { + component.StartFunc + component.ShutdownFunc + consumer.Entities +} + +// NewEntities creates a processor.Entities that ensure context propagation and the right tags are set. +func NewEntities( + _ context.Context, + set processor.Settings, + _ component.Config, + nextConsumer consumer.Entities, + logsFunc ProcessEntitiesFunc, + options ...Option, +) (processor.Entities, error) { + if logsFunc == nil { + return nil, errors.New("nil logsFunc") + } + + obs, err := newObsReport(set, pipeline.SignalEntities) + if err != nil { + return nil, err + } + + eventOptions := spanAttributes(set.ID) + bs := fromOptions(options) + logsConsumer, err := consumer.NewEntities(func(ctx context.Context, ld pentity.Entities) error { + span := trace.SpanFromContext(ctx) + span.AddEvent("Start processing.", eventOptions) + recordsIn := ld.EntityCount() + + var errFunc error + ld, errFunc = logsFunc(ctx, ld) + span.AddEvent("End processing.", eventOptions) + if errFunc != nil { + obs.recordInOut(ctx, recordsIn, 0) + if errors.Is(errFunc, ErrSkipProcessingData) { + return nil + } + return errFunc + } + recordsOut := ld.EntityCount() + obs.recordInOut(ctx, recordsIn, recordsOut) + return nextConsumer.ConsumeEntities(ctx, ld) + }, bs.consumerOptions...) + if err != nil { + return nil, err + } + + return &entities{ + StartFunc: bs.StartFunc, + ShutdownFunc: bs.ShutdownFunc, + Entities: logsConsumer, + }, nil +} diff --git a/processor/processorprofiles/processor.go b/processor/processorprofiles/processor.go index 7532e98eda3..16dec8948f1 100644 --- a/processor/processorprofiles/processor.go +++ b/processor/processorprofiles/processor.go @@ -103,6 +103,13 @@ func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel }) } +// WithEntities overrides the default "error not supported" implementation for CreateEntities and the default "undefined" stability level. +func WithEntities(createEntities processor.CreateEntitiesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, processor.WithEntities(createEntities, sl)) + }) +} + // NewFactory returns a Factory. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { opts := factoryOpts{factory: &factory{}} diff --git a/processor/processortest/nop_processor.go b/processor/processortest/nop_processor.go index 9a5937e60e5..a84770b43ac 100644 --- a/processor/processortest/nop_processor.go +++ b/processor/processortest/nop_processor.go @@ -37,6 +37,7 @@ func NewNopFactory() processor.Factory { processorprofiles.WithMetrics(createMetrics, component.StabilityLevelStable), processorprofiles.WithLogs(createLogs, component.StabilityLevelStable), processorprofiles.WithProfiles(createProfiles, component.StabilityLevelAlpha), + processorprofiles.WithEntities(createEntities, component.StabilityLevelStable), ) } @@ -56,6 +57,10 @@ func createProfiles(context.Context, processor.Settings, component.Config, consu return nopInstance, nil } +func createEntities(context.Context, processor.Settings, component.Config, consumer.Entities) (processor.Entities, error) { + return nopInstance, nil +} + type nopConfig struct{} var nopInstance = &nop{ diff --git a/receiver/internal/obsmetrics.go b/receiver/internal/obsmetrics.go index 7a4ba33de32..27ba3f58759 100644 --- a/receiver/internal/obsmetrics.go +++ b/receiver/internal/obsmetrics.go @@ -31,6 +31,12 @@ const ( // Collector. RefusedLogRecordsKey = "refused_log_records" + // AcceptedEntitiesKey used to identify log records accepted by the Collector. + AcceptedEntitiesKey = "accepted_entities" + // RefusedEntitiesKey used to identify log records refused (ie.: not ingested) by the + // Collector. + RefusedEntitiesKey = "refused_entities" + // ScraperKey used to identify scrapers in metrics and traces. ScraperKey = "scraper" @@ -46,5 +52,6 @@ const ( ReceiverPrefix = ReceiverKey + SpanNameSep ReceiveTraceDataOperationSuffix = SpanNameSep + "TraceDataReceived" ReceiverMetricsOperationSuffix = SpanNameSep + "MetricsReceived" + ReceiverEntitiesOperationSuffix = SpanNameSep + "EntitiesReceived" ReceiverLogsOperationSuffix = SpanNameSep + "LogsReceived" ) diff --git a/receiver/otlpreceiver/README.md b/receiver/otlpreceiver/README.md index a7848092f00..465ee1eeedf 100644 --- a/receiver/otlpreceiver/README.md +++ b/receiver/otlpreceiver/README.md @@ -3,7 +3,7 @@ | Status | | | ------------- |-----------| -| Stability | [development]: profiles | +| Stability | [development]: profiles, entities | | | [beta]: logs | | | [stable]: traces, metrics | | Distributions | [core], [contrib], [k8s] | diff --git a/receiver/otlpreceiver/encoder.go b/receiver/otlpreceiver/encoder.go index 6462aec745d..2d01108cd43 100644 --- a/receiver/otlpreceiver/encoder.go +++ b/receiver/otlpreceiver/encoder.go @@ -10,6 +10,7 @@ import ( "github.com/gogo/protobuf/proto" spb "google.golang.org/genproto/googleapis/rpc/status" + "go.opentelemetry.io/collector/pdata/pentity/pentityotlp" "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" @@ -32,11 +33,13 @@ type encoder interface { unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) + unmarshalEntitiesRequest(buf []byte) (pentityotlp.ExportRequest, error) marshalTracesResponse(ptraceotlp.ExportResponse) ([]byte, error) marshalMetricsResponse(pmetricotlp.ExportResponse) ([]byte, error) marshalLogsResponse(plogotlp.ExportResponse) ([]byte, error) marshalProfilesResponse(pprofileotlp.ExportResponse) ([]byte, error) + marshalEntitiesResponse(pentityotlp.ExportResponse) ([]byte, error) marshalStatus(rsp *spb.Status) ([]byte, error) @@ -69,6 +72,12 @@ func (protoEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportReq return req, err } +func (protoEncoder) unmarshalEntitiesRequest(buf []byte) (pentityotlp.ExportRequest, error) { + req := pentityotlp.NewExportRequest() + err := req.UnmarshalProto(buf) + return req, err +} + func (protoEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) { return resp.MarshalProto() } @@ -85,6 +94,10 @@ func (protoEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([ return resp.MarshalProto() } +func (protoEncoder) marshalEntitiesResponse(resp pentityotlp.ExportResponse) ([]byte, error) { + return resp.MarshalProto() +} + func (protoEncoder) marshalStatus(resp *spb.Status) ([]byte, error) { return proto.Marshal(resp) } @@ -119,6 +132,12 @@ func (jsonEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequ return req, err } +func (jsonEncoder) unmarshalEntitiesRequest(buf []byte) (pentityotlp.ExportRequest, error) { + req := pentityotlp.NewExportRequest() + err := req.UnmarshalJSON(buf) + return req, err +} + func (jsonEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) { return resp.MarshalJSON() } @@ -135,6 +154,10 @@ func (jsonEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([] return resp.MarshalJSON() } +func (jsonEncoder) marshalEntitiesResponse(resp pentityotlp.ExportResponse) ([]byte, error) { + return resp.MarshalJSON() +} + func (jsonEncoder) marshalStatus(resp *spb.Status) ([]byte, error) { buf := new(bytes.Buffer) err := jsonPbMarshaler.Marshal(buf, resp) diff --git a/receiver/otlpreceiver/factory.go b/receiver/otlpreceiver/factory.go index 9ffc0d987f8..4d805fdca03 100644 --- a/receiver/otlpreceiver/factory.go +++ b/receiver/otlpreceiver/factory.go @@ -34,6 +34,7 @@ func NewFactory() receiver.Factory { receiverprofiles.WithMetrics(createMetrics, metadata.MetricsStability), receiverprofiles.WithLogs(createLog, metadata.LogsStability), receiverprofiles.WithProfiles(createProfiles, metadata.ProfilesStability), + receiverprofiles.WithEntities(createEntities, metadata.EntitiesStability), ) } @@ -149,6 +150,28 @@ func createProfiles( return r, nil } +// createEntities creates a trace receiver based on provided config. +func createEntities( + _ context.Context, + set receiver.Settings, + cfg component.Config, + nextConsumer consumer.Entities, +) (receiver.Entities, error) { + oCfg := cfg.(*Config) + r, err := receivers.LoadOrStore( + oCfg, + func() (*otlpReceiver, error) { + return newOtlpReceiver(oCfg, &set) + }, + ) + if err != nil { + return nil, err + } + + r.Unwrap().registerEntitiesConsumer(nextConsumer) + return r, nil +} + // This is the map of already created OTLP receivers for particular configurations. // We maintain this map because the receiver.Factory is asked trace and metric receivers separately // when it gets CreateTraces() and CreateMetrics() but they must not diff --git a/receiver/otlpreceiver/internal/entities/otlp.go b/receiver/otlpreceiver/internal/entities/otlp.go new file mode 100644 index 00000000000..57a0d069c78 --- /dev/null +++ b/receiver/otlpreceiver/internal/entities/otlp.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package entities // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/entities" + +import ( + "context" + + "go.opentelemetry.io/collector/pdata/pentity/pentityotlp" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +const dataFormatProtobuf = "protobuf" + +// Receiver is the type used to handle entities from OpenTelemetry exporters. +type Receiver struct { + pentityotlp.UnimplementedGRPCServer + nextConsumer consumer.Entities + obsreport *receiverhelper.ObsReport +} + +// New creates a new Receiver reference. +func New(nextConsumer consumer.Entities, obsreport *receiverhelper.ObsReport) *Receiver { + return &Receiver{ + nextConsumer: nextConsumer, + obsreport: obsreport, + } +} + +// Export implements the service Export entities func. +func (r *Receiver) Export(ctx context.Context, req pentityotlp.ExportRequest) (pentityotlp.ExportResponse, error) { + ld := req.Entities() + numSpans := ld.EntityCount() + if numSpans == 0 { + return pentityotlp.NewExportResponse(), nil + } + + ctx = r.obsreport.StartEntitiesOp(ctx) + err := r.nextConsumer.ConsumeEntities(ctx, ld) + r.obsreport.EndEntitiesOp(ctx, dataFormatProtobuf, numSpans, err) + + // Use appropriate status codes for permanent/non-permanent errors + // If we return the error straightaway, then the grpc implementation will set status code to Unknown + // Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345 + // So, convert the error to appropriate grpc status and return the error + // NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503) + // Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400) + if err != nil { + return pentityotlp.NewExportResponse(), errors.GetStatusFromError(err) + } + + return pentityotlp.NewExportResponse(), nil +} diff --git a/receiver/otlpreceiver/internal/metadata/generated_status.go b/receiver/otlpreceiver/internal/metadata/generated_status.go index 5fe9771f5dd..1d162e4d8f2 100644 --- a/receiver/otlpreceiver/internal/metadata/generated_status.go +++ b/receiver/otlpreceiver/internal/metadata/generated_status.go @@ -13,6 +13,7 @@ var ( const ( ProfilesStability = component.StabilityLevelDevelopment + EntitiesStability = component.StabilityLevelDevelopment LogsStability = component.StabilityLevelBeta TracesStability = component.StabilityLevelStable MetricsStability = component.StabilityLevelStable diff --git a/receiver/otlpreceiver/metadata.yaml b/receiver/otlpreceiver/metadata.yaml index c65af6e6a5a..4df83b43a35 100644 --- a/receiver/otlpreceiver/metadata.yaml +++ b/receiver/otlpreceiver/metadata.yaml @@ -6,5 +6,5 @@ status: stability: stable: [traces, metrics] beta: [logs] - development: [profiles] + development: [profiles, entities] distributions: [core, contrib, k8s] diff --git a/receiver/otlpreceiver/otlp.go b/receiver/otlpreceiver/otlp.go index a4c5f08c7a9..e54082a7dd3 100644 --- a/receiver/otlpreceiver/otlp.go +++ b/receiver/otlpreceiver/otlp.go @@ -18,11 +18,13 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumerprofiles" + "go.opentelemetry.io/collector/pdata/pentity/pentityotlp" "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/entities" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" @@ -40,6 +42,7 @@ type otlpReceiver struct { nextMetrics consumer.Metrics nextLogs consumer.Logs nextProfiles consumerprofiles.Profiles + nextEntities consumer.Entities shutdownWG sync.WaitGroup obsrepGRPC *receiverhelper.ObsReport @@ -58,6 +61,7 @@ func newOtlpReceiver(cfg *Config, set *receiver.Settings) (*otlpReceiver, error) nextMetrics: nil, nextLogs: nil, nextProfiles: nil, + nextEntities: nil, settings: set, } @@ -109,6 +113,10 @@ func (r *otlpReceiver) startGRPCServer(host component.Host) error { pprofileotlp.RegisterGRPCServer(r.serverGRPC, profiles.New(r.nextProfiles)) } + if r.nextEntities != nil { + pentityotlp.RegisterGRPCServer(r.serverGRPC, entities.New(r.nextEntities, r.obsrepGRPC)) + } + r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", r.cfg.GRPC.NetAddr.Endpoint)) var gln net.Listener if gln, err = r.cfg.GRPC.NetAddr.Listen(context.Background()); err != nil { @@ -161,6 +169,13 @@ func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) }) } + if r.nextEntities != nil { + httpProfilesReceiver := entities.New(r.nextEntities, r.obsrepHTTP) + httpMux.HandleFunc(defaultProfilesURLPath, func(resp http.ResponseWriter, req *http.Request) { + handleEntities(resp, req, httpProfilesReceiver) + }) + } + var err error if r.serverHTTP, err = r.cfg.HTTP.ToServer(ctx, host, r.settings.TelemetrySettings, httpMux, confighttp.WithErrorHandler(errorHandler)); err != nil { return err @@ -230,3 +245,7 @@ func (r *otlpReceiver) registerLogsConsumer(lc consumer.Logs) { func (r *otlpReceiver) registerProfilesConsumer(tc consumerprofiles.Profiles) { r.nextProfiles = tc } + +func (r *otlpReceiver) registerEntitiesConsumer(ec consumer.Entities) { + r.nextEntities = ec +} diff --git a/receiver/otlpreceiver/otlphttp.go b/receiver/otlpreceiver/otlphttp.go index 4c9c8231fe7..5497a91f31d 100644 --- a/receiver/otlpreceiver/otlphttp.go +++ b/receiver/otlpreceiver/otlphttp.go @@ -13,6 +13,7 @@ import ( "google.golang.org/grpc/status" "go.opentelemetry.io/collector/internal/httphelper" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/entities" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" @@ -149,6 +150,37 @@ func handleProfiles(resp http.ResponseWriter, req *http.Request, profilesReceive writeResponse(resp, enc.contentType(), http.StatusOK, msg) } +func handleEntities(resp http.ResponseWriter, req *http.Request, entitiesReceiver *entities.Receiver) { + enc, ok := readContentType(resp, req) + if !ok { + return + } + + body, ok := readAndCloseBody(resp, req, enc) + if !ok { + return + } + + otlpReq, err := enc.unmarshalEntitiesRequest(body) + if err != nil { + writeError(resp, enc, err, http.StatusBadRequest) + return + } + + otlpResp, err := entitiesReceiver.Export(req.Context(), otlpReq) + if err != nil { + writeError(resp, enc, err, http.StatusInternalServerError) + return + } + + msg, err := enc.marshalEntitiesResponse(otlpResp) + if err != nil { + writeError(resp, enc, err, http.StatusInternalServerError) + return + } + writeResponse(resp, enc.contentType(), http.StatusOK, msg) +} + func readContentType(resp http.ResponseWriter, req *http.Request) (encoder, bool) { if req.Method != http.MethodPost { handleUnmatchedMethod(resp) diff --git a/receiver/receiver.go b/receiver/receiver.go index dd7242d5f77..c73758de41e 100644 --- a/receiver/receiver.go +++ b/receiver/receiver.go @@ -39,7 +39,16 @@ type Logs interface { component.Component } -// Settings configures receiver creators. +// Entities receiver receives entities. +// Its purpose is to translate data from any format to the collector's internal entities data format. +// EntitiesReceiver feeds a consumer.Entities with data. +// +// For example, it could be a receiver that reads sysentities and convert them into plog.Entities. +type Entities interface { + component.Component +} + +// Settings configures Receiver creators. type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -84,6 +93,14 @@ type Factory interface { // LogsStability gets the stability level of the Logs receiver. LogsStability() component.StabilityLevel + // CreateEntities creates a EntitiesReceiver based on this config. + // If the receiver type does not support the data type or if the config is not valid + // an error will be returned instead. + CreateEntities(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Entities) (Entities, error) + + // EntitiesStability gets the stability level of the EntitiesReceiver. + EntitiesStability() component.StabilityLevel + unexportedFactoryFunc() } @@ -133,6 +150,22 @@ func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg compon return f(ctx, set, cfg, next) } +// CreateEntitiesFunc is the equivalent of ReceiverFactory.CreateEntitiesReceiver(). +type CreateEntitiesFunc func(context.Context, Settings, component.Config, consumer.Entities) (Entities, error) + +// CreateEntities implements Factory.CreateEntitiesReceiver(). +func (f CreateEntitiesFunc) CreateEntities( + ctx context.Context, + set Settings, + cfg component.Config, + nextConsumer consumer.Entities, +) (Entities, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, nextConsumer) +} + type factory struct { cfgType component.Type component.CreateDefaultConfigFunc @@ -142,6 +175,8 @@ type factory struct { metricsStabilityLevel component.StabilityLevel CreateLogsFunc logsStabilityLevel component.StabilityLevel + CreateEntitiesFunc + entitiesStabilityLevel component.StabilityLevel } func (f *factory) Type() component.Type { @@ -162,7 +197,11 @@ func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } -// WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. +func (f *factory) EntitiesStability() component.StabilityLevel { + return f.entitiesStabilityLevel +} + +// WithTraces overrides the default "error not supported" implementation for CreateTracesReceiver and the default "undefined" stability level. func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.tracesStabilityLevel = sl @@ -186,6 +225,14 @@ func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOpt }) } +// WithEntities overrides the default "error not supported" implementation for CreateEntitiesReceiver and the default "undefined" stability level. +func WithEntities(createEntities CreateEntitiesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factory) { + o.entitiesStabilityLevel = sl + o.CreateEntitiesFunc = createEntities + }) +} + // NewFactory returns a Factory. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { f := &factory{ diff --git a/receiver/receiverhelper/obsreport.go b/receiver/receiverhelper/obsreport.go index 1be60915288..3a22c40a263 100644 --- a/receiver/receiverhelper/obsreport.go +++ b/receiver/receiverhelper/obsreport.go @@ -122,6 +122,24 @@ func (rec *ObsReport) EndMetricsOp( rec.endOp(receiverCtx, format, numReceivedPoints, err, pipeline.SignalMetrics) } +// StartEntitiesOp is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. +func (rec *ObsReport) StartEntitiesOp(operationCtx context.Context) context.Context { + return rec.startOp(operationCtx, internal.ReceiverEntitiesOperationSuffix) +} + +// EndEntitiesOp completes the receive operation that was started with +// StartEntitiesOp. +func (rec *ObsReport) EndEntitiesOp( + receiverCtx context.Context, + format string, + numReceivedLogRecords int, + err error, +) { + rec.endOp(receiverCtx, format, numReceivedLogRecords, err, pipeline.SignalEntities) +} + // startOp creates the span used to trace the operation. Returning // the updated context with the created span. func (rec *ObsReport) startOp(receiverCtx context.Context, operationSuffix string) context.Context { diff --git a/receiver/receiverprofiles/profiles.go b/receiver/receiverprofiles/profiles.go index 858a9f44120..140428027de 100644 --- a/receiver/receiverprofiles/profiles.go +++ b/receiver/receiverprofiles/profiles.go @@ -105,6 +105,14 @@ func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel }) } +// WithEntities overrides the default "error not supported" implementation for Factory. +// CreateEntities and the default "undefined" stability level. +func WithEntities(createEntities receiver.CreateEntitiesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, receiver.WithEntities(createEntities, sl)) + }) +} + // NewFactory returns a Factory. func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { opts := factoryOpts{factory: &factory{}} diff --git a/receiver/receivertest/nop_receiver.go b/receiver/receivertest/nop_receiver.go index 973c36dffae..3ec5170c77f 100644 --- a/receiver/receivertest/nop_receiver.go +++ b/receiver/receivertest/nop_receiver.go @@ -37,6 +37,7 @@ func NewNopFactory() receiver.Factory { receiverprofiles.WithMetrics(createMetrics, component.StabilityLevelStable), receiverprofiles.WithLogs(createLogs, component.StabilityLevelStable), receiverprofiles.WithProfiles(createProfiles, component.StabilityLevelAlpha), + receiverprofiles.WithEntities(createEntities, component.StabilityLevelAlpha), ) } @@ -77,6 +78,10 @@ func createProfiles(context.Context, receiver.Settings, component.Config, consum return nopInstance, nil } +func createEntities(context.Context, receiver.Settings, component.Config, consumer.Entities) (receiver.Entities, error) { + return nopInstance, nil +} + var nopInstance = &nopReceiver{} // nopReceiver acts as a receiver for testing purposes. diff --git a/service/internal/builders/connector.go b/service/internal/builders/connector.go index d93a7345f81..e50fd833b07 100644 --- a/service/internal/builders/connector.go +++ b/service/internal/builders/connector.go @@ -113,6 +113,26 @@ func (b *ConnectorBuilder) CreateTracesToProfiles(ctx context.Context, set conne return f.CreateTracesToProfiles(ctx, set, cfg, next) } +// CreateTracesToEntities creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToEntities(ctx context.Context, set connector.Settings, + next consumer.Entities) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesToEntitiesStability()) + return f.CreateTracesToEntities(ctx, set, cfg, next) +} + // CreateMetricsToTraces creates a Metrics connector based on the settings and config. func (b *ConnectorBuilder) CreateMetricsToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Metrics, error) { if next == nil { @@ -194,6 +214,26 @@ func (b *ConnectorBuilder) CreateMetricsToProfiles(ctx context.Context, set conn return f.CreateMetricsToProfiles(ctx, set, cfg, next) } +// CreateMetricsToEntities creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToEntities(ctx context.Context, set connector.Settings, + next consumer.Entities) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsToEntitiesStability()) + return f.CreateMetricsToEntities(ctx, set, cfg, next) +} + // CreateLogsToTraces creates a Logs connector based on the settings and config. func (b *ConnectorBuilder) CreateLogsToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Logs, error) { if next == nil { @@ -275,6 +315,26 @@ func (b *ConnectorBuilder) CreateLogsToProfiles(ctx context.Context, set connect return f.CreateLogsToProfiles(ctx, set, cfg, next) } +// CreateLogsToEntities creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToEntities(ctx context.Context, set connector.Settings, + next consumer.Entities) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsToEntitiesStability()) + return f.CreateLogsToEntities(ctx, set, cfg, next) +} + // CreateProfilesToTraces creates a Profiles connector based on the settings and config. func (b *ConnectorBuilder) CreateProfilesToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connectorprofiles.Profiles, error) { if next == nil { @@ -371,6 +431,85 @@ func (b *ConnectorBuilder) CreateProfilesToProfiles(ctx context.Context, set con return f.CreateProfilesToProfiles(ctx, set, cfg, next) } +// CreateEntitiesToTraces creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateEntitiesToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Entities, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.EntitiesToTracesStability()) + return f.CreateEntitiesToTraces(ctx, set, cfg, next) +} + +// CreateEntitiesToMetrics creates a Entities connector based on the settings and config. +func (b *ConnectorBuilder) CreateEntitiesToMetrics(ctx context.Context, set connector.Settings, + next consumer.Metrics) (connector.Entities, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.EntitiesToMetricsStability()) + return f.CreateEntitiesToMetrics(ctx, set, cfg, next) +} + +// CreateEntitiesToLogs creates a Entities connector based on the settings and config. +func (b *ConnectorBuilder) CreateEntitiesToLogs(ctx context.Context, set connector.Settings, + next consumer.Logs) (connector.Entities, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.EntitiesToLogsStability()) + return f.CreateEntitiesToLogs(ctx, set, cfg, next) +} + +// CreateEntitiesToEntities creates a Entities connector based on the settings and config. +func (b *ConnectorBuilder) CreateEntitiesToEntities(ctx context.Context, set connector.Settings, + next consumer.Entities) (connector.Entities, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.EntitiesToEntitiesStability()) + return f.CreateEntitiesToEntities(ctx, set, cfg, next) +} + func (b *ConnectorBuilder) IsConfigured(componentID component.ID) bool { _, ok := b.cfgs[componentID] return ok diff --git a/service/internal/builders/exporter.go b/service/internal/builders/exporter.go index 6ab05ae8b63..7f6baab1711 100644 --- a/service/internal/builders/exporter.go +++ b/service/internal/builders/exporter.go @@ -94,6 +94,22 @@ func (b *ExporterBuilder) CreateProfiles(ctx context.Context, set exporter.Setti return f.CreateProfiles(ctx, set, cfg) } +// CreateEntities creates a Entities exporter based on the settings and config. +func (b *ExporterBuilder) CreateEntities(ctx context.Context, set exporter.Settings) (exporter.Entities, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.EntitiesStability()) + return f.CreateEntities(ctx, set, cfg) +} + func (b *ExporterBuilder) Factory(componentType component.Type) component.Factory { return b.factories[componentType] } diff --git a/service/internal/builders/processor.go b/service/internal/builders/processor.go index 890dd96c185..9a74638d925 100644 --- a/service/internal/builders/processor.go +++ b/service/internal/builders/processor.go @@ -108,6 +108,26 @@ func (b *ProcessorBuilder) CreateProfiles(ctx context.Context, set processor.Set return f.CreateProfiles(ctx, set, cfg, next) } +// CreateEntities creates a Entities processor based on the settings and config. +func (b *ProcessorBuilder) CreateEntities(ctx context.Context, set processor.Settings, + next consumer.Entities) (processor.Entities, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.EntitiesStability()) + return f.CreateEntities(ctx, set, cfg, next) +} + func (b *ProcessorBuilder) Factory(componentType component.Type) component.Factory { return b.factories[componentType] } diff --git a/service/internal/builders/receiver.go b/service/internal/builders/receiver.go index ceb78b8ef3a..dc70eeb3b74 100644 --- a/service/internal/builders/receiver.go +++ b/service/internal/builders/receiver.go @@ -110,6 +110,30 @@ func (b *ReceiverBuilder) CreateProfiles(ctx context.Context, set receiver.Setti return f.CreateProfiles(ctx, set, cfg, next) } +// CreateEntities creates a Entities receiver based on the settings and config. +func (b *ReceiverBuilder) CreateEntities(ctx context.Context, set receiver.Settings, next consumer.Entities) (receiver.Entities, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + recvFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + f, ok := recvFact.(receiverprofiles.Factory) + if !ok { + return nil, pipeline.ErrSignalNotSupported + } + + logStabilityLevel(set.Logger, f.EntitiesStability()) + return f.CreateEntities(ctx, set, cfg, next) +} + func (b *ReceiverBuilder) Factory(componentType component.Type) component.Factory { return b.factories[componentType] } diff --git a/service/internal/capabilityconsumer/capabilities.go b/service/internal/capabilityconsumer/capabilities.go index 14927fbfcd9..a2d63ae354a 100644 --- a/service/internal/capabilityconsumer/capabilities.go +++ b/service/internal/capabilityconsumer/capabilities.go @@ -71,3 +71,19 @@ type capProfiles struct { func (mts capProfiles) Capabilities() consumer.Capabilities { return mts.cap } + +func NewEntities(entities consumer.Entities, cap consumer.Capabilities) consumer.Entities { + if entities.Capabilities() == cap { + return entities + } + return capEntities{Entities: entities, cap: cap} +} + +type capEntities struct { + consumer.Entities + cap consumer.Capabilities +} + +func (mts capEntities) Capabilities() consumer.Capabilities { + return mts.cap +} diff --git a/service/internal/graph/capabilities.go b/service/internal/graph/capabilities.go index 8a16ae67853..0d5517dc809 100644 --- a/service/internal/graph/capabilities.go +++ b/service/internal/graph/capabilities.go @@ -26,6 +26,7 @@ type capabilitiesNode struct { consumer.ConsumeMetricsFunc consumer.ConsumeLogsFunc consumerprofiles.ConsumeProfilesFunc + consumer.ConsumeEntitiesFunc } func newCapabilitiesNode(pipelineID pipeline.ID) *capabilitiesNode { diff --git a/service/internal/graph/connector.go b/service/internal/graph/connector.go index 76e69df77d1..aee42afd964 100644 --- a/service/internal/graph/connector.go +++ b/service/internal/graph/connector.go @@ -63,6 +63,8 @@ func (n *connectorNode) buildComponent( return n.buildLogs(ctx, set, builder, nexts) case pipelineprofiles.SignalProfiles: return n.buildProfiles(ctx, set, builder, nexts) + case pipeline.SignalEntities: + return n.buildEntities(ctx, set, builder, nexts) } return nil } @@ -207,6 +209,41 @@ func (n *connectorNode) buildProfiles( return err } +func (n *connectorNode) buildEntities( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]consumer.Entities, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Entities) + } + next := connector.NewEntitiesRouter(consumers) + + var err error + switch n.exprPipelineType { + case pipeline.SignalEntities: + var conn connector.Entities + conn, err = builder.CreateEntitiesToEntities(ctx, set, next) + if err != nil { + return err + } + n.Component = componentEntities{ + Component: conn, + Entities: capabilityconsumer.NewEntities(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalTraces: + n.Component, err = builder.CreateTracesToEntities(ctx, set, next) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetricsToEntities(ctx, set, next) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogsToEntities(ctx, set, next) + } + return err +} + // When connecting pipelines of the same data type, the connector must // inherit the capabilities of pipelines in which it is acting as a receiver. // Since the incoming and outgoing data types are the same, we must also consider diff --git a/service/internal/graph/consumer.go b/service/internal/graph/consumer.go index 6bc6b96ca02..7bd523b369a 100644 --- a/service/internal/graph/consumer.go +++ b/service/internal/graph/consumer.go @@ -37,3 +37,8 @@ type componentProfiles struct { component.Component consumerprofiles.Profiles } + +type componentEntities struct { + component.Component + consumer.Entities +} diff --git a/service/internal/graph/exporter.go b/service/internal/graph/exporter.go index 948da2f759a..77b5d85c7a0 100644 --- a/service/internal/graph/exporter.go +++ b/service/internal/graph/exporter.go @@ -58,6 +58,8 @@ func (n *exporterNode) buildComponent( n.Component, err = builder.CreateLogs(ctx, set) case pipelineprofiles.SignalProfiles: n.Component, err = builder.CreateProfiles(ctx, set) + case pipeline.SignalEntities: + n.Component, err = builder.CreateEntities(ctx, set) default: return fmt.Errorf("error creating exporter %q for data type %q is not supported", set.ID, n.pipelineType) } diff --git a/service/internal/graph/graph.go b/service/internal/graph/graph.go index 26754e8fc34..8d08be8cab3 100644 --- a/service/internal/graph/graph.go +++ b/service/internal/graph/graph.go @@ -322,6 +322,10 @@ func (g *Graph) buildComponents(ctx context.Context, set Settings) error { cc := capabilityconsumer.NewProfiles(next.(consumerprofiles.Profiles), capability) n.baseConsumer = cc n.ConsumeProfilesFunc = cc.ConsumeProfiles + case pipeline.SignalEntities: + cc := capabilityconsumer.NewEntities(next.(consumer.Entities), capability) + n.baseConsumer = cc + n.ConsumeEntitiesFunc = cc.ConsumeEntities } case *fanOutNode: nexts := g.nextConsumers(n.ID()) @@ -350,6 +354,12 @@ func (g *Graph) buildComponents(ctx context.Context, set Settings) error { consumers = append(consumers, next.(consumerprofiles.Profiles)) } n.baseConsumer = fanoutconsumer.NewProfiles(consumers) + case pipeline.SignalEntities: + consumers := make([]consumer.Entities, 0, len(nexts)) + for _, next := range nexts { + consumers = append(consumers, next.(consumer.Entities)) + } + n.baseConsumer = fanoutconsumer.NewEntities(consumers) } } if err != nil { @@ -485,6 +495,7 @@ func (g *Graph) GetExporters() map[pipeline.Signal]map[component.ID]component.Co exportersMap[pipeline.SignalMetrics] = make(map[component.ID]component.Component) exportersMap[pipeline.SignalLogs] = make(map[component.ID]component.Component) exportersMap[pipelineprofiles.SignalProfiles] = make(map[component.ID]component.Component) + exportersMap[pipeline.SignalEntities] = make(map[component.ID]component.Component) for _, pg := range g.pipelines { for _, expNode := range pg.exporters { @@ -599,6 +610,21 @@ func connectorStability(f connector.Factory, expType, recType pipeline.Signal) c case pipelineprofiles.SignalProfiles: return fprof.ProfilesToProfilesStability() } + case pipeline.SignalEntities: + fprof, ok := f.(connector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + switch recType { + case pipeline.SignalTraces: + return fprof.EntitiesToTracesStability() + case pipeline.SignalMetrics: + return fprof.EntitiesToMetricsStability() + case pipeline.SignalLogs: + return fprof.EntitiesToLogsStability() + case pipeline.SignalEntities: + return fprof.EntitiesToEntitiesStability() + } } return component.StabilityLevelUndefined } diff --git a/service/internal/graph/processor.go b/service/internal/graph/processor.go index 9167152011c..76f406581bf 100644 --- a/service/internal/graph/processor.go +++ b/service/internal/graph/processor.go @@ -60,6 +60,8 @@ func (n *processorNode) buildComponent(ctx context.Context, n.Component, err = builder.CreateLogs(ctx, set, next.(consumer.Logs)) case pipelineprofiles.SignalProfiles: n.Component, err = builder.CreateProfiles(ctx, set, next.(consumerprofiles.Profiles)) + case pipeline.SignalEntities: + n.Component, err = builder.CreateEntities(ctx, set, next.(consumer.Entities)) default: return fmt.Errorf("error creating processor %q in pipeline %q, data type %q is not supported", set.ID, n.pipelineID.String(), n.pipelineID.Signal()) } diff --git a/service/internal/graph/receiver.go b/service/internal/graph/receiver.go index d5de95ca3b2..3089f370030 100644 --- a/service/internal/graph/receiver.go +++ b/service/internal/graph/receiver.go @@ -71,6 +71,12 @@ func (n *receiverNode) buildComponent(ctx context.Context, consumers = append(consumers, next.(consumerprofiles.Profiles)) } n.Component, err = builder.CreateProfiles(ctx, set, fanoutconsumer.NewProfiles(consumers)) + case pipeline.SignalEntities: + var consumers []consumer.Entities + for _, next := range nexts { + consumers = append(consumers, next.(consumer.Entities)) + } + n.Component, err = builder.CreateEntities(ctx, set, fanoutconsumer.NewEntities(consumers)) default: return fmt.Errorf("error creating receiver %q for data type %q is not supported", set.ID, n.pipelineType) } diff --git a/service/pipelines/config.go b/service/pipelines/config.go index 76a444127a6..08bc4c3b015 100644 --- a/service/pipelines/config.go +++ b/service/pipelines/config.go @@ -40,7 +40,7 @@ func (cfg Config) Validate() error { // only configured components. for pipelineID, p := range cfg { switch pipelineID.Signal() { - case pipeline.SignalTraces, pipeline.SignalMetrics, pipeline.SignalLogs: + case pipeline.SignalTraces, pipeline.SignalMetrics, pipeline.SignalLogs, pipeline.SignalEntities: // Continue case pipelineprofiles.SignalProfiles: if !serviceProfileSupportGate.IsEnabled() { diff --git a/service/service_test.go b/service/service_test.go index d1663a8212e..a6832799a6a 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -235,7 +235,7 @@ func TestServiceGetExporters(t *testing.T) { assert.True(t, ok) assert.NotNil(t, v) - assert.Len(t, expMap, 4) + assert.Len(t, expMap, 5) assert.Len(t, expMap[pipeline.SignalTraces], 1) assert.Contains(t, expMap[pipeline.SignalTraces], component.NewID(nopType)) assert.Len(t, expMap[pipeline.SignalMetrics], 1) @@ -244,6 +244,8 @@ func TestServiceGetExporters(t *testing.T) { assert.Contains(t, expMap[pipeline.SignalLogs], component.NewID(nopType)) assert.Len(t, expMap[pipelineprofiles.SignalProfiles], 1) assert.Contains(t, expMap[pipelineprofiles.SignalProfiles], component.NewID(nopType)) + assert.Len(t, expMap[pipeline.SignalEntities], 1) + assert.Contains(t, expMap[pipeline.SignalEntities], component.NewID(nopType)) } // TestServiceTelemetryCleanupOnError tests that if newService errors due to an invalid config telemetry is cleaned up @@ -617,6 +619,11 @@ func newNopConfig() Config { Processors: []component.ID{component.NewID(nopType)}, Exporters: []component.ID{component.NewID(nopType)}, }, + pipeline.NewID(pipeline.SignalEntities): { + Receivers: []component.ID{component.NewID(nopType)}, + Processors: []component.ID{component.NewID(nopType)}, + Exporters: []component.ID{component.NewID(nopType)}, + }, }) }