Skip to content

Commit

Permalink
Release v0.60.0 (#2028)
Browse files Browse the repository at this point in the history
* Add back -compat=X.X to make tidy command  (-compat=1.18)

* Fix lint error for deprecated metric.SetDataType() method, fix lint errors for deprecated methods (insert -> put)

* Release v0.60.0

* patch

* patch

* preserve evaluated attribute types

Co-authored-by: Ryan Fitzpatrick <[email protected]>
  • Loading branch information
jvoravong and Ryan Fitzpatrick authored Sep 23, 2022
1 parent 070dbe7 commit fe3f233
Show file tree
Hide file tree
Showing 24 changed files with 610 additions and 571 deletions.
11 changes: 11 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,17 @@

## Unreleased

## v0.60.0

This Splunk OpenTelemetry Collector release includes changes from the [opentelemetry-collector v0.60.0](https://github.com/open-telemetry/opentelemetry-collector/releases/tag/v0.60.0) and the [opentelemetry-collector-contrib v0.60.0](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases/tag/v0.60.0) releases.

### 💡 Enhancements 💡

- Update auto instrumentation java agent to [v1.16.0](https://github.com/signalfx/splunk-otel-java/releases/tag/v1.16.0)
- Replace usage of Map.Insert* and Map.Update* with Map.Upsert (#1957)
- Refactor main flags as settings.Settings (#1952)
- Support installing with ansible and skipping restart of services (#1930)

## v0.59.1

### 💡 Enhancements 💡
Expand Down
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,9 @@ misspell-correction:

.PHONY: tidy
tidy:
go mod tidy
cd tests && go mod tidy
cd internal/tools && go mod tidy
go mod tidy -compat=1.18
cd tests && go mod tidy -compat=1.18
cd internal/tools && go mod tidy -compat=1.18

.PHONY: fmt
fmt: addlicense misspell-correction
Expand Down
212 changes: 109 additions & 103 deletions go.mod

Large diffs are not rendered by default.

553 changes: 281 additions & 272 deletions go.sum

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion internal/exporter/httpsinkexporter/httpsink_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ package httpsinkexporter

import (
"context"
"net/http"
http "net/http"
"sync"

"github.com/gogo/protobuf/jsonpb"
Expand Down
2 changes: 1 addition & 1 deletion internal/extension/smartagentextension/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"gopkg.in/yaml.v2"
)

var _ config.Unmarshallable = (*Config)(nil)
var _ confmap.Unmarshaler = (*Config)(nil)

type Config struct {
config.ExtensionSettings `mapstructure:",squash"`
Expand Down
8 changes: 4 additions & 4 deletions internal/receiver/databricksreceiver/metrics_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,14 +49,14 @@ func (p metricsProvider) addJobStatusMetrics(ms pmetric.MetricSlice) ([]int, err
jobPt := jobPts.AppendEmpty()
pauseStatus := pauseStatusToInt(j.Settings.Schedule.PauseStatus)
jobPt.SetIntVal(pauseStatus)
jobPt.Attributes().UpsertInt(metadata.A.JobID, int64(j.JobID))
jobPt.Attributes().PutInt(metadata.A.JobID, int64(j.JobID))
for _, task := range j.Settings.Tasks {
taskPt := taskPts.AppendEmpty()
taskPt.SetIntVal(pauseStatus)
taskAttrs := taskPt.Attributes()
taskAttrs.UpsertInt(metadata.A.JobID, int64(j.JobID))
taskAttrs.UpsertString(metadata.A.TaskID, task.TaskKey)
taskAttrs.UpsertString(metadata.A.TaskType, taskType(task))
taskAttrs.PutInt(metadata.A.JobID, int64(j.JobID))
taskAttrs.PutString(metadata.A.TaskID, task.TaskKey)
taskAttrs.PutString(metadata.A.TaskType, taskType(task))
}
}
return jobIDs, nil
Expand Down
6 changes: 3 additions & 3 deletions internal/receiver/databricksreceiver/run_metrics_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,13 @@ func (p runMetricsProvider) addSingleJobRunMetrics(
}
jobPt := jobPts.AppendEmpty()
jobPt.SetIntVal(int64(run.ExecutionDuration))
jobPt.Attributes().UpsertInt(metadata.Attributes.JobID, int64(jobID))
jobPt.Attributes().PutInt(metadata.Attributes.JobID, int64(jobID))
for _, task := range run.Tasks {
taskPt := taskPts.AppendEmpty()
taskPt.SetIntVal(int64(task.ExecutionDuration))
taskAttrs := taskPt.Attributes()
taskAttrs.UpsertInt(metadata.Attributes.JobID, int64(jobID))
taskAttrs.UpsertString(metadata.Attributes.TaskID, task.TaskKey)
taskAttrs.PutInt(metadata.Attributes.JobID, int64(jobID))
taskAttrs.PutString(metadata.Attributes.TaskID, task.TaskKey)
}
}
return nil
Expand Down
2 changes: 1 addition & 1 deletion internal/receiver/databricksreceiver/scraper.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func (s scraper) scrape(_ context.Context) (pmetric.Metrics, error) {
out := pmetric.NewMetrics()
rms := out.ResourceMetrics()
rm := rms.AppendEmpty()
rm.Resource().Attributes().UpsertString(metadata.A.DatabricksInstanceName, s.instanceName)
rm.Resource().Attributes().PutString(metadata.A.DatabricksInstanceName, s.instanceName)
ilms := rm.ScopeMetrics()
ilm := ilms.AppendEmpty()
ms := ilm.Metrics()
Expand Down
24 changes: 12 additions & 12 deletions internal/receiver/discoveryreceiver/endpoint_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,9 @@ func endpointToPLogs(observerID config.ComponentID, eventType string, endpoints
pLogs = plog.NewLogs()
rlog := pLogs.ResourceLogs().AppendEmpty()
rAttrs := rlog.Resource().Attributes()
rAttrs.UpsertString(eventTypeAttr, eventType)
rAttrs.UpsertString(observerNameAttr, observerID.Name())
rAttrs.UpsertString(observerTypeAttr, string(observerID.Type()))
rAttrs.PutString(eventTypeAttr, eventType)
rAttrs.PutString(observerNameAttr, observerID.Name())
rAttrs.PutString(observerTypeAttr, string(observerID.Type()))
sl := rlog.ScopeLogs().AppendEmpty()
for _, endpoint := range endpoints {
logRecord := sl.LogRecords().AppendEmpty()
Expand All @@ -149,12 +149,12 @@ func endpointToPLogs(observerID config.ComponentID, eventType string, endpoints
// this must be the first mutation of attrs since it's destructive
envAttrs.CopyTo(attrs)
}
attrs.UpsertString("type", string(endpoint.Details.Type()))
attrs.PutString("type", string(endpoint.Details.Type()))
} else {
logRecord.Body().SetStringVal(fmt.Sprintf("%s endpoint %s", eventType, endpoint.ID))
}
attrs.UpsertString("endpoint", endpoint.Target)
attrs.UpsertString("id", string(endpoint.ID))
attrs.PutString("endpoint", endpoint.Target)
attrs.PutString("id", string(endpoint.ID))

// sorted log record attributes for determinism
attrs.Sort()
Expand All @@ -170,9 +170,9 @@ func endpointEnvToAttrs(endpointType observer.EndpointType, endpointEnv observer
// should result in a ValueMap
case shouldEmbedMap(endpointType, k):
if asMap, ok := v.(map[string]string); ok {
mapVal := attrs.UpsertEmptyMap(k)
mapVal := attrs.PutEmptyMap(k)
for item, itemVal := range asMap {
mapVal.UpsertString(item, itemVal)
mapVal.PutString(item, itemVal)
}
mapVal.Sort()
} else {
Expand All @@ -186,18 +186,18 @@ func endpointEnvToAttrs(endpointType observer.EndpointType, endpointEnv observer
if e != nil {
return attrs, fmt.Errorf("failed parsing %v pod attributes ", endpointType)
}
podAttrs.CopyTo(attrs.UpsertEmptyMap(k))
podAttrs.CopyTo(attrs.PutEmptyMap(k))
} else {
return attrs, fmt.Errorf("failed parsing %v pod env %#v", endpointType, v)
}
default:
switch vVal := v.(type) {
case uint16:
attrs.UpsertInt(k, int64(vVal))
attrs.PutInt(k, int64(vVal))
case bool:
attrs.UpsertBool(k, vVal)
attrs.PutBool(k, vVal)
default:
attrs.UpsertString(k, fmt.Sprintf("%v", v))
attrs.PutString(k, fmt.Sprintf("%v", v))
}
}
}
Expand Down
Loading

0 comments on commit fe3f233

Please sign in to comment.